code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
class ReaderWriter[T](private var content: T) {
def read: T = content
def write(x: T) { content = x }
}
| grzegorzbalcerek/scala-book-examples | examples/ReaderWriter.scala | Scala | mit | 108 |
package grammar
/**
* Created by Mikael on 13.08.2015.
* Will save the current grammar to the given field
*/
object GrammarSave {
case object Reference extends GrammarSave
case object Initial extends GrammarSave
case object None extends GrammarSave
}
sealed trait GrammarSave
| epfl-lara/grammar-web | js/src/main/scala/grammar/GrammarSave.scala | Scala | mit | 286 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.join
import org.apache.flink.api.common.state._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.co.CoProcessFunction
import org.apache.flink.table.api.StreamQueryConfig
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
/**
* Connect data for left stream and right stream. Only use for innerJoin.
*
* @param leftType the input type of left stream
* @param rightType the input type of right stream
* @param genJoinFuncName the function code of other non-equi condition
* @param genJoinFuncCode the function name of other non-equi condition
* @param queryConfig the configuration for the query to generate
*/
class NonWindowInnerJoin(
leftType: TypeInformation[Row],
rightType: TypeInformation[Row],
genJoinFuncName: String,
genJoinFuncCode: String,
queryConfig: StreamQueryConfig)
extends NonWindowJoin(
leftType,
rightType,
genJoinFuncName,
genJoinFuncCode,
queryConfig) {
override def open(parameters: Configuration): Unit = {
super.open(parameters)
LOG.debug("Instantiating NonWindowInnerJoin.")
}
/**
* Puts or Retract an element from the input stream into state and search the other state to
* output records meet the condition. Records will be expired in state if state retention time
* has been specified.
*/
override def processElement(
value: CRow,
ctx: CoProcessFunction[CRow, CRow, CRow]#Context,
out: Collector[CRow],
currentSideState: MapState[Row, JTuple2[Long, Long]],
otherSideState: MapState[Row, JTuple2[Long, Long]],
isLeft: Boolean): Unit = {
val inputRow = value.row
updateCurrentSide(value, ctx, currentSideState)
cRowWrapper.setCollector(out)
cRowWrapper.setChange(value.change)
val otherSideIterator = otherSideState.iterator()
// join other side data
while (otherSideIterator.hasNext) {
val otherSideEntry = otherSideIterator.next()
val otherSideRow = otherSideEntry.getKey
val otherSideCntAndExpiredTime = otherSideEntry.getValue
// join
cRowWrapper.setTimes(otherSideCntAndExpiredTime.f0)
callJoinFunction(inputRow, isLeft, otherSideRow, cRowWrapper)
// clear expired data. Note: clear after join to keep closer to the original semantics
if (stateCleaningEnabled && curProcessTime >= otherSideCntAndExpiredTime.f1) {
otherSideIterator.remove()
}
}
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/runtime/join/NonWindowInnerJoin.scala | Scala | apache-2.0 | 3,520 |
package uk.gov.dvla.auditing
object Status extends Enumeration {
val RecordFound,
NotFound = Value
}
| dvla/sdl-opensource | audit-core/src/main/scala/uk/gov/dvla/auditing/Status.scala | Scala | mit | 106 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package genc
import purescala.Common.{ Identifier }
import purescala.Definitions.{ Definition, FunDef, ValDef, Program }
import purescala.Types.{ TypeTree }
/*
* Some type aliased for readability
*/
package object phases {
case class VarInfo(id: Identifier, typ: TypeTree, isVar: Boolean)
type FunCtxDB = Map[FunDef, Seq[VarInfo]]
case class Dependencies(prog: Program, deps: Set[Definition])
}
| regb/leon | src/main/scala/leon/genc/phases/package.scala | Scala | gpl-3.0 | 465 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.analytic
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.Query
import org.geotools.data.collection.ListFeatureCollection
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureSource}
import org.geotools.factory.CommonFactoryFinder
import org.geotools.feature.simple.{SimpleFeatureBuilder, SimpleFeatureTypeBuilder}
import org.geotools.feature.visitor.{AbstractCalcResult, CalcResult}
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.utils.KryoLazyStatsUtils
import org.locationtech.geomesa.process.{GeoMesaProcess, GeoMesaProcessVisitor}
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import org.locationtech.geomesa.utils.stats.{EnumerationStat, Stat}
import org.opengis.feature.Feature
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import org.opengis.util.ProgressListener
import scala.collection.JavaConverters._
import scala.collection.mutable
@DescribeProcess(title = "Geomesa Unique",
description = "Finds unique attributes values, optimized for GeoMesa")
class UniqueProcess extends GeoMesaProcess with LazyLogging {
@DescribeResult(name = "result",
description = "Feature collection with an attribute containing the unique values")
def execute(
@DescribeParameter(name = "features", description = "Input feature collection")
features: SimpleFeatureCollection,
@DescribeParameter(name = "attribute", description = "Attribute whose unique values are extracted")
attribute: String,
@DescribeParameter(name = "filter", min = 0, description = "The filter to apply to the feature collection")
filter: Filter,
@DescribeParameter(name = "histogram", min = 0, description = "Create a histogram of attribute values")
histogram: java.lang.Boolean,
@DescribeParameter(name = "sort", min = 0, description = "Sort results - allowed to be ASC or DESC")
sort: String,
@DescribeParameter(name = "sortByCount", min = 0, description = "Sort by histogram counts instead of attribute values")
sortByCount: java.lang.Boolean,
progressListener: ProgressListener): SimpleFeatureCollection = {
val attributeDescriptor = features
.getSchema
.getAttributeDescriptors
.asScala
.find(_.getLocalName == attribute)
.getOrElse(throw new IllegalArgumentException(s"Attribute $attribute does not exist in feature schema."))
val hist = Option(histogram).exists(_.booleanValue)
val sortBy = Option(sortByCount).exists(_.booleanValue)
val visitor = new AttributeVisitor(features, attributeDescriptor, Option(filter).filter(_ != Filter.INCLUDE), hist)
features.accepts(visitor, progressListener)
val uniqueValues = visitor.getResult.attributes
val binding = attributeDescriptor.getType.getBinding
UniqueProcess.createReturnCollection(uniqueValues, binding, hist, Option(sort), sortBy)
}
}
object UniqueProcess {
val SftName = "UniqueValue"
val AttributeValue = "value"
val AttributeCount = "count"
/**
* Duplicates output format from geotools UniqueProcess
*
* @param uniqueValues values
* @param binding value binding
* @param histogram include counts or just values
* @param sort sort
* @param sortByCount sort by count or by value
* @return
*/
def createReturnCollection(uniqueValues: Map[Any, Long],
binding: Class[_],
histogram: Boolean,
sort: Option[String],
sortByCount: Boolean): SimpleFeatureCollection = {
val ft = createUniqueSft(binding, histogram)
val sfb = new SimpleFeatureBuilder(ft)
val result = new ListFeatureCollection(ft)
// if sorting was requested do it here, otherwise return results in iterator order
val sorted = sort.map { s =>
if (sortByCount) {
val ordering = if (s.equalsIgnoreCase("desc")) Ordering[Long].reverse else Ordering[Long]
uniqueValues.iterator.toList.sortBy(_._2)(ordering)
} else {
val ordering = if (s.equalsIgnoreCase("desc")) Ordering[String].reverse else Ordering[String]
uniqueValues.iterator.toList.sortBy(_._1.toString)(ordering)
}
}.getOrElse(uniqueValues.iterator)
// histogram includes extra 'count' attribute
val addFn = if (histogram) (key: Any, value: Long) => {
sfb.add(key)
sfb.add(value)
result.add(sfb.buildFeature(null))
} else (key: Any, _: Long) => {
sfb.add(key)
result.add(sfb.buildFeature(null))
}
sorted.foreach { case (key, value) => addFn(key, value) }
result
}
/**
* Based on geotools UniqueProcess simple feature type
*
* @param binding class of attribute
* @param histogram return counts or not
* @return
*/
def createUniqueSft(binding: Class[_], histogram: Boolean): SimpleFeatureType = {
val sftb = new SimpleFeatureTypeBuilder
sftb.add(AttributeValue, binding)
if (histogram) {
// histogram includes extra 'count' attribute
sftb.add(AttributeCount, classOf[java.lang.Long])
}
sftb.setName(SftName)
sftb.buildFeatureType
}
}
/**
* Visitor that tracks unique attribute values and counts
*
* @param features features to evaluate
* @param attributeDescriptor attribute to evaluate
* @param filter optional filter to apply to features before evaluating
* @param histogram return counts or not
*/
class AttributeVisitor(val features: SimpleFeatureCollection,
val attributeDescriptor: AttributeDescriptor,
val filter: Option[Filter],
histogram: Boolean) extends GeoMesaProcessVisitor with LazyLogging {
import org.locationtech.geomesa.utils.geotools.Conversions._
import scala.collection.JavaConversions._
private val attribute = attributeDescriptor.getLocalName
private val uniqueValues = mutable.Map.empty[Any, Long].withDefaultValue(0)
private var attributeIdx: Int = -1
private def getAttribute[T](f: SimpleFeature) = {
if (attributeIdx == -1) {
attributeIdx = f.getType.indexOf(attribute)
}
f.get[T](attributeIdx)
}
private def addSingularValue(f: SimpleFeature): Unit = {
val value = getAttribute[AnyRef](f)
if (value != null) {
uniqueValues(value) += 1
}
}
private def addMultiValue(f: SimpleFeature): Unit = {
val values = getAttribute[java.util.Collection[_]](f)
if (values != null) {
values.foreach(uniqueValues(_) += 1)
}
}
private val addValue: (SimpleFeature) => Unit =
if (attributeDescriptor.isList) addMultiValue else addSingularValue
// non-optimized visit
override def visit(feature: Feature): Unit = {
val f = feature.asInstanceOf[SimpleFeature]
if (filter.forall(_.evaluate(f))) {
addValue(f)
}
}
override def getResult: AttributeResult = new AttributeResult(uniqueValues.toMap)
override def execute(source: SimpleFeatureSource, query: Query): Unit = {
import org.locationtech.geomesa.filter.mergeFilters
logger.debug(s"Running Geomesa histogram process on source type ${source.getClass.getName}")
// combine filters from this process and any input collection
filter.foreach(f => query.setFilter(mergeFilters(query.getFilter, f)))
val sft = source.getSchema
val enumerated = if (attributeDescriptor.isMultiValued) {
// stats don't support list types
uniqueV5(source, query)
} else {
// TODO if !histogram, we could write a new unique skipping iterator
query.getHints.put(QueryHints.STATS_STRING, Stat.Enumeration(attribute))
query.getHints.put(QueryHints.ENCODE_STATS, java.lang.Boolean.TRUE)
// execute the query
val reader = source.getFeatures(query).features()
val enumeration = try {
// stats should always return exactly one result, even if there are no features in the table
val encoded = reader.next.getAttribute(0).asInstanceOf[String]
KryoLazyStatsUtils.decodeStat(sft)(encoded).asInstanceOf[EnumerationStat[Any]]
} finally {
reader.close()
}
enumeration.frequencies
}
uniqueValues.clear()
enumerated.foreach { case (k, v) => uniqueValues.put(k, v) }
}
private def uniqueV5(source: SimpleFeatureSource, query: Query): Iterable[(Any, Long)] = {
// only return the attribute we are interested in to reduce bandwidth
query.setPropertyNames(Seq(attribute).asJava)
// if there is no filter, try to force an attribute scan - should be fastest query
if (query.getFilter == Filter.INCLUDE && features.getSchema.getDescriptor(attribute).isIndexed) {
query.setFilter(AttributeVisitor.getIncludeAttributeFilter(attribute))
}
// execute the query
SelfClosingIterator(source.getFeatures(query).features()).foreach(addValue)
uniqueValues.toMap
}
}
object AttributeVisitor {
lazy val ff = CommonFactoryFinder.getFilterFactory2
/**
* Returns a filter that is equivalent to Filter.INCLUDE, but against the attribute index.
*
* @param attribute attribute to query
* @return
*/
def getIncludeAttributeFilter(attribute: String): Filter =
ff.greaterOrEqual(ff.property(attribute), ff.literal(""))
}
/**
* Result class to hold the attribute histogram
*
* @param attributes result
*/
class AttributeResult(val attributes: Map[Any, Long]) extends AbstractCalcResult {
override def getValue: java.util.Map[Any, Long] = attributes.asJava
override def isCompatible(targetResults: CalcResult): Boolean =
targetResults.isInstanceOf[AttributeResult] || targetResults == CalcResult.NULL_RESULT
override def merge(resultsToAdd: CalcResult): CalcResult = {
if (!isCompatible(resultsToAdd)) {
throw new IllegalArgumentException("Parameter is not a compatible type")
} else if (resultsToAdd == CalcResult.NULL_RESULT) {
this
} else if (resultsToAdd.isInstanceOf[AttributeResult]) {
val toAdd = resultsToAdd.getValue.asInstanceOf[Map[Any, Long]]
// note ++ on maps will get all keys with second maps values if exists, if not first map values
val merged = attributes ++ toAdd.map {
case (attr, count) => attr -> (count + attributes.getOrElse(attr, 0L))
}
new AttributeResult(merged)
} else {
throw new IllegalArgumentException(
"The CalcResults claim to be compatible, but the appropriate merge method has not been implemented.")
}
}
} | ronq/geomesa | geomesa-process/geomesa-process-vector/src/main/scala/org/locationtech/geomesa/process/analytic/UniqueProcess.scala | Scala | apache-2.0 | 11,312 |
// Copyright (C) 2019 MapRoulette contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.maproulette.models.dal
import java.sql.Connection
import anorm._
import javax.inject.{Inject, Singleton}
import org.maproulette.Config
import org.maproulette.data.ProjectType
import org.maproulette.exception.{InvalidException, NotFoundException}
import org.maproulette.models._
import org.maproulette.permissions.Permission
import org.maproulette.session.User
import org.maproulette.session.dal.UserGroupDAL
import org.postgresql.util.PSQLException
import play.api.db.Database
/**
* Specific functions for virtual projects
*
* @author krotstan
*/
@Singleton
class VirtualProjectDAL @Inject() (
override val db: Database,
childDAL: ChallengeDAL,
surveyDAL: SurveyDAL,
userGroupDAL: UserGroupDAL,
override val permission: Permission,
config: Config
) extends ProjectDAL(db, childDAL, surveyDAL, userGroupDAL, permission, config) {
/**
* Adds a challenge to a virtual project. You are required to have write access
* to the project you are adding the challenge to
*
* @param projectId The id of the virtual parent project
* @param challengeId The id of the challenge that you are moving
* @param c an implicit connection
*/
def addChallenge(projectId: Long, challengeId: Long, user: User)(
implicit c: Option[Connection] = None
): Option[Project] = {
this.permission.hasWriteAccess(ProjectType(), user)(projectId)
this.retrieveById(projectId) match {
case Some(p) =>
if (!p.isVirtual.getOrElse(false)) {
throw new InvalidException(s"Project must be a virtual project to add a challenge.")
}
case None => throw new NotFoundException(s"No project found with id $projectId found.")
}
this.withMRTransaction { implicit c =>
try {
val query =
s"""INSERT INTO virtual_project_challenges (project_id, challenge_id)
VALUES ($projectId, $challengeId)"""
SQL(query).execute()
} catch {
case e: PSQLException if (e.getSQLState == "23505") => //ignore
case other: Throwable =>
throw new InvalidException(
s"Unable to add challenge ${challengeId} to Virtual Project ${projectId}. " +
other.getMessage
)
}
None
}
}
/**
* Removes a challenge from a virtual project. You are required to have write access
* to the project you are removing the challenge from.
*
* @param projectId The id of the virtual parent project
* @param challengeId The id of the challenge that you are moving
* @param c an implicit connection
*/
def removeChallenge(projectId: Long, challengeId: Long, user: User)(
implicit c: Option[Connection] = None
): Option[Project] = {
this.permission.hasWriteAccess(ProjectType(), user)(projectId)
this.retrieveById(projectId) match {
case Some(p) =>
if (!p.isVirtual.getOrElse(false)) {
throw new InvalidException(s"Project must be a virtual project to remove a challenge.")
}
case None => throw new NotFoundException(s"No challenge with id $challengeId found.")
}
this.withMRTransaction { implicit c =>
val query =
s"""DELETE FROM virtual_project_challenges
WHERE project_id=$projectId AND challenge_id=$challengeId"""
SQL(query).execute()
None
}
}
}
| Crashfreak/maproulette2 | app/org/maproulette/models/dal/VirtualProjectDAL.scala | Scala | apache-2.0 | 3,541 |
package TAPL
import TAPL.Util._
object EquiRec {
trait Alg[E, T] extends Typed.Alg[E, T] with RecType.Alg[T] with TypeVar.Alg[T]
trait Print extends Alg[String, String] with Typed.Print with RecType.Print with TypeVar.Print
trait Parser[E, T, L <: {val pE : Util.PackratParser[E]; val pT : Util.PackratParser[T]}]
extends Typed.Parser[E, T, L] with RecType.Parser[T, L] with TypeVar.Parser[T, L] {
val pEquiRecE = pTypedE
val pEquiRecT = pTypedT | pRecTypeT | pTypeVarT
}
}
object TestEquiRec {
class List[E, T](pe: PackratParser[E], pt: PackratParser[T]) {
val pE = pe
val pT = pt
}
def parse[E, T](inp: String)(alg: EquiRec.Alg[E, T]) = {
def parser(l: => List[E, T]): List[E, T] = {
val lang = new EquiRec.Parser[E, T, List[E, T]] {}
new List[E, T](lang.pEquiRecE(alg)(l), lang.pEquiRecT(alg)(l))
}
runParser(fix(parser).pE)(inp)
}
def parseAndPrint(inp: String) = parse(inp)(new EquiRec.Print {})
} | hy-zhang/parser | Scala/Old/TAPL/EquiRec.scala | Scala | bsd-3-clause | 975 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.yggdrasil
package jdbm3
import java.time._
import quasar.precog._
import quasar.precog.common._
import quasar.time.{DateTimeInterval, OffsetDate}
import scala.reflect.ClassTag
/**
* Defines a base set of codecs that are often used in `RowFormat`s.
*/
trait StdCodecs {
implicit def LongCodec: Codec[Long]
implicit def DoubleCodec: Codec[Double]
implicit def BigDecimalCodec: Codec[BigDecimal]
implicit def StringCodec: Codec[String]
implicit def BooleanCodec: Codec[Boolean]
implicit def OffsetDateTimeCodec: Codec[OffsetDateTime]
implicit def OffsetTimeCodec: Codec[OffsetTime]
implicit def OffsetDateCodec: Codec[OffsetDate]
implicit def LocalDateTimeCodec: Codec[LocalDateTime]
implicit def LocalTimeCodec: Codec[LocalTime]
implicit def LocalDateCodec: Codec[LocalDate]
implicit def IntervalCodec: Codec[DateTimeInterval]
implicit def BitSetCodec: Codec[BitSet]
implicit def RawBitSetCodec: Codec[Array[Int]]
implicit def IndexedSeqCodec[A](implicit elemCodec: Codec[A]): Codec[IndexedSeq[A]]
implicit def ArrayCodec[A](implicit elemCodec: Codec[A], m: ClassTag[A]): Codec[Array[A]]
def codecForCValueType[A](cType: CValueType[A]): Codec[A] = cType match {
case CBoolean => BooleanCodec
case CString => StringCodec
case CLong => LongCodec
case CDouble => DoubleCodec
case CNum => BigDecimalCodec
case COffsetDateTime => OffsetDateTimeCodec
case COffsetTime => OffsetTimeCodec
case COffsetDate => OffsetDateCodec
case CLocalDateTime => LocalDateTimeCodec
case CLocalTime => LocalTimeCodec
case CLocalDate => LocalDateCodec
case CInterval => IntervalCodec
case CArrayType(elemType) => ArrayCodec(codecForCValueType(elemType), elemType.classTag)
}
}
trait RowFormatCodecs extends StdCodecs { self: RowFormat =>
implicit def LongCodec: Codec[Long] = Codec.PackedLongCodec
implicit def DoubleCodec: Codec[Double] = Codec.DoubleCodec
implicit def BigDecimalCodec: Codec[BigDecimal] = Codec.BigDecimalCodec
implicit def StringCodec: Codec[String] = Codec.Utf8Codec
implicit def BooleanCodec: Codec[Boolean] = Codec.BooleanCodec
implicit def OffsetDateTimeCodec: Codec[OffsetDateTime] = Codec.OffsetDateTimeCodec
implicit def OffsetTimeCodec: Codec[OffsetTime] = Codec.OffsetTimeCodec
implicit def OffsetDateCodec: Codec[OffsetDate] = Codec.OffsetDateCodec
implicit def LocalDateTimeCodec: Codec[LocalDateTime] = Codec.LocalDateTimeCodec
implicit def LocalTimeCodec: Codec[LocalTime] = Codec.LocalTimeCodec
implicit def LocalDateCodec: Codec[LocalDate] = Codec.LocalDateCodec
implicit def IntervalCodec: Codec[DateTimeInterval] = Codec.IntervalCodec
// implicit def BitSetCodec: Codec[BitSet] = Codec.BitSetCodec
//@transient implicit lazy val BitSetCodec: Codec[BitSet] = Codec.SparseBitSetCodec(columnRefs.size)
@transient implicit lazy val BitSetCodec: Codec[BitSet] = Codec.SparseBitSetCodec(columnRefs.size)
@transient implicit lazy val RawBitSetCodec: Codec[Array[Int]] = Codec.SparseRawBitSetCodec(columnRefs.size)
implicit def IndexedSeqCodec[A](implicit elemCodec: Codec[A]): Codec[IndexedSeq[A]] = Codec.IndexedSeqCodec(elemCodec)
implicit def ArrayCodec[A](implicit elemCodec: Codec[A], m: ClassTag[A]): Codec[Array[A]] = Codec.ArrayCodec(elemCodec)(m)
}
| jedesah/Quasar | yggdrasil/src/main/scala/quasar/yggdrasil/jdbm3/StdCodecs.scala | Scala | apache-2.0 | 4,128 |
package filodb.memory.format
import java.nio.ByteBuffer
import java.sql.Timestamp
import scala.reflect.ClassTag
import org.agrona.DirectBuffer
import org.agrona.concurrent.UnsafeBuffer
import org.joda.time.DateTime
import spire.syntax.cfor._
import filodb.memory.format.vectors.Histogram
/**
* A generic trait for reading typed values out of a row of data.
* Used for both reading out of Filo vectors as well as for RowToVectorBuilder,
* which means it can be used to compose heterogeneous Filo vectors together.
*/
// scalastyle:off
trait RowReader {
def notNull(columnNo: Int): Boolean
def getBoolean(columnNo: Int): Boolean
def getInt(columnNo: Int): Int
def getLong(columnNo: Int): Long
def getDouble(columnNo: Int): Double
def getFloat(columnNo: Int): Float
def getString(columnNo: Int): String
def getAny(columnNo: Int): Any
def getBlobBase(columnNo: Int): Any
def getBlobOffset(columnNo: Int): Long
def getBlobNumBytes(columnNo: Int): Int // Total number of bytes for the blob
// By default this is not implemented as histograms can be parsed from multiple serialized forms or actual objects
def getHistogram(columnNo: Int): Histogram = ???
/**
* Retrieves a view into the blob at column columnNo without duplicating contents.
* Smart implementations could reuse the same UnsafeBuffer to avoid allocations.
* This default implementation simply allocates a new one.
*/
def blobAsBuffer(columnNo: Int): DirectBuffer = {
val buf = new UnsafeBuffer(Array.empty[Byte])
UnsafeUtils.wrapDirectBuf(getBlobBase(columnNo), getBlobOffset(columnNo), getBlobNumBytes(columnNo), buf)
buf
}
final def getBuffer(columnNo: Int): ByteBuffer = {
val length = getBlobNumBytes(columnNo)
getBlobBase(columnNo) match {
case UnsafeUtils.ZeroPointer => // offheap
UnsafeUtils.asDirectBuffer(getBlobOffset(columnNo), length)
case array: Array[Byte] =>
ByteBuffer.wrap(array, (getBlobOffset(columnNo) - UnsafeUtils.arayOffset).toInt, length)
}
}
// def getUtf8MediumOffset(columnNo: Int): Long
// Please override final def if your RowReader has a faster implementation
def filoUTF8String(columnNo: Int): ZeroCopyUTF8String = getAny(columnNo) match {
case s: String =>
Option(s).map(ZeroCopyUTF8String.apply).getOrElse(ZeroCopyUTF8String.empty)
case z: ZeroCopyUTF8String => z
case null => ZeroCopyUTF8String.NA
}
/**
* This method serves two purposes.
* For RowReaders that need to parse from some input source, such as CSV,
* the ClassTag gives a way for per-type parsing for non-primitive types.
* For RowReaders for fast reading paths, such as Spark, the default
* implementation serves as a fast way to read from objects.
*/
def as[T: ClassTag](columnNo: Int): T = getAny(columnNo).asInstanceOf[T]
}
import filodb.memory.format.RowReader._
// A RowReader that knows how to hashcode and compare its individual elements. Extractors must
// correspond to the schema. This could allow partition keys to be wrapped directly around raw ingest
// elements without converting to BinaryRecord first
trait SchemaRowReader extends RowReader {
def extractors: Array[TypedFieldExtractor[_]]
// NOTE: This is an EXTREMELY HOT code path, needs to be super optimized. No standard Scala collection
// or slow functional code here.
override def hashCode: Int = {
var hash = 0
cforRange { 0 until extractors.size } { i =>
hash ^= extractors(i).getField(this, i).hashCode
}
hash
}
override def equals(other: Any): Boolean = other match {
case reader: RowReader =>
cforRange { 0 until extractors.size } { i =>
if (extractors(i).compare(this, reader, i) != 0) return false
}
true
case other: Any =>
false
}
}
/**
* An example of a RowReader that can read from Scala tuples containing Option[_]
*/
final case class TupleRowReader(tuple: Product) extends RowReader {
def notNull(columnNo: Int): Boolean =
tuple.productElement(columnNo).asInstanceOf[Option[Any]].nonEmpty
def getBoolean(columnNo: Int): Boolean = tuple.productElement(columnNo) match {
case Some(x: Boolean) => x
case None => false
}
def getInt(columnNo: Int): Int = tuple.productElement(columnNo) match {
case Some(x: Int) => x
case None => 0
}
def getLong(columnNo: Int): Long = tuple.productElement(columnNo) match {
case Some(x: Long) => x
case Some(x: Timestamp) => x.getTime
case None => 0L
}
def getDouble(columnNo: Int): Double = tuple.productElement(columnNo) match {
case Some(x: Double) => x
case None => 0.0
}
def getFloat(columnNo: Int): Float = tuple.productElement(columnNo) match {
case Some(x: Float) => x
case None => 0.0F
}
def getString(columnNo: Int): String = tuple.productElement(columnNo) match {
case Some(x: String) => x
case None => null
}
def getAny(columnNo: Int): Any =
tuple.productElement(columnNo).asInstanceOf[Option[Any]].getOrElse(null)
override def getBlobBase(columnNo: Int): Any = ???
override def getBlobOffset(columnNo: Int): Long = ???
override def getBlobNumBytes(columnNo: Int): Int = ???
}
/**
* A RowReader for working with OpenCSV or anything else that emits string[]
*/
final case class ArrayStringRowReader(strings: Array[String]) extends RowReader {
//scalastyle:off
def notNull(columnNo: Int): Boolean = strings(columnNo) != null && strings(columnNo) != ""
//scalastyle:on
def getBoolean(columnNo: Int): Boolean = strings(columnNo).toBoolean
def getInt(columnNo: Int): Int = strings(columnNo).toInt
def getLong(columnNo: Int): Long = try {
strings(columnNo).toLong
} catch {
case ex: NumberFormatException => DateTime.parse(strings(columnNo)).getMillis
}
def getDouble(columnNo: Int): Double = strings(columnNo).toDouble
def getFloat(columnNo: Int): Float = strings(columnNo).toFloat
def getString(columnNo: Int): String = strings(columnNo)
def getAny(columnNo: Int): Any = strings(columnNo)
override def as[T: ClassTag](columnNo: Int): T = {
implicitly[ClassTag[T]].runtimeClass.asInstanceOf[T]
}
override def toString: String = s"ArrayStringRR(${strings.mkString(", ")})"
override def getBlobBase(columnNo: Int): Any = ???
override def getBlobOffset(columnNo: Int): Long = ???
override def getBlobNumBytes(columnNo: Int): Int = ???
// override def getUtf8MediumOffset(columnNo: Int): Long = ???
}
// scalastyle:off
/**
* A RowReader that changes the column numbers around of an original RowReader. It could be used to
* present a subset of the original columns, for example.
* @param columnRoutes an array of original column numbers for the column in question. For example:
* Array(0, 2, 5) means an getInt(1) would map to a getInt(2) for the original RowReader
*/
//noinspection ScalaStyle
trait RoutingReader extends RowReader {
def origReader: RowReader
def columnRoutes: Array[Int]
final def notNull(columnNo: Int): Boolean = origReader.notNull(columnRoutes(columnNo))
final def getBoolean(columnNo: Int): Boolean = origReader.getBoolean(columnRoutes(columnNo))
final def getInt(columnNo: Int): Int = origReader.getInt(columnRoutes(columnNo))
final def getLong(columnNo: Int): Long = origReader.getLong(columnRoutes(columnNo))
final def getDouble(columnNo: Int): Double = origReader.getDouble(columnRoutes(columnNo))
final def getFloat(columnNo: Int): Float = origReader.getFloat(columnRoutes(columnNo))
final def getString(columnNo: Int): String = origReader.getString(columnRoutes(columnNo))
final def getAny(columnNo: Int): Any = origReader.getAny(columnRoutes(columnNo))
final def getBlobBase(columnNo: Int): Any = ???
final def getBlobOffset(columnNo: Int): Long = ???
final def getBlobNumBytes(columnNo: Int): Int = ???
override def equals(other: Any): Boolean = other match {
case RoutingRowReader(orig, _) => orig.equals(origReader)
case r: RowReader => r.equals(origReader)
case other: Any => false
}
}
final case class RoutingRowReader(origReader: RowReader, columnRoutes: Array[Int]) extends RoutingReader
// A RoutingRowReader which is also a SchemaRowReader
final case class SchemaRoutingRowReader(origReader: RowReader,
columnRoutes: Array[Int],
extractors: Array[TypedFieldExtractor[_]])
extends RoutingReader with SchemaRowReader {
override def toString: String = s"SchemaRoutingRR($origReader, ${columnRoutes.toList})"
}
final case class SingleValueRowReader(value: Any) extends RowReader {
def notNull(columnNo: Int): Boolean = Option(value).isDefined
def getBoolean(columnNo: Int): Boolean = value.asInstanceOf[Boolean]
def getInt(columnNo: Int): Int = value.asInstanceOf[Int]
def getLong(columnNo: Int): Long = value.asInstanceOf[Long]
def getDouble(columnNo: Int): Double = value.asInstanceOf[Double]
def getFloat(columnNo: Int): Float = value.asInstanceOf[Float]
def getString(columnNo: Int): String = value.asInstanceOf[String]
override def getHistogram(columnNo: Int): Histogram = value.asInstanceOf[Histogram]
def getAny(columnNo: Int): Any = value
def getBlobBase(columnNo: Int): Any = value
def getBlobOffset(columnNo: Int): Long = 0
def getBlobNumBytes(columnNo: Int): Int = value.asInstanceOf[Array[Byte]].length
}
final case class SeqRowReader(sequence: Seq[Any]) extends RowReader {
def notNull(columnNo: Int): Boolean = true
def getBoolean(columnNo: Int): Boolean = sequence(columnNo).asInstanceOf[Boolean]
def getInt(columnNo: Int): Int = sequence(columnNo).asInstanceOf[Int]
def getLong(columnNo: Int): Long = sequence(columnNo).asInstanceOf[Long]
def getDouble(columnNo: Int): Double = sequence(columnNo).asInstanceOf[Double]
def getFloat(columnNo: Int): Float = sequence(columnNo).asInstanceOf[Float]
def getString(columnNo: Int): String = sequence(columnNo).asInstanceOf[String]
override def getHistogram(columnNo: Int): Histogram = sequence(columnNo).asInstanceOf[Histogram]
def getAny(columnNo: Int): Any = sequence(columnNo)
def getBlobBase(columnNo: Int): Any = ???
def getBlobOffset(columnNo: Int): Long = ???
def getBlobNumBytes(columnNo: Int): Int = ???
}
final case class UTF8StringRowReader(records: Iterator[ZeroCopyUTF8String]) extends Iterator[RowReader] {
var currVal: ZeroCopyUTF8String = _
private val rowReader = new RowReader {
def notNull(columnNo: Int): Boolean = true
def getBoolean(columnNo: Int): Boolean = ???
def getInt(columnNo: Int): Int = ???
def getLong(columnNo: Int): Long = ???
def getDouble(columnNo: Int): Double = ???
def getFloat(columnNo: Int): Float = ???
def getString(columnNo: Int): String = currVal.toString
def getAny(columnNo: Int): Any = currVal
def getBlobBase(columnNo: Int): Any = currVal.base
def getBlobOffset(columnNo: Int): Long = currVal.offset
def getBlobNumBytes(columnNo: Int): Int = currVal.numBytes
}
override def hasNext: Boolean = records.hasNext
override def next(): RowReader = {
currVal = records.next()
rowReader
}
}
final case class UTF8MapIteratorRowReader(records: Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]]) extends Iterator[RowReader] {
var currVal: Map[ZeroCopyUTF8String, ZeroCopyUTF8String] = _
private val rowReader = new RowReader {
def notNull(columnNo: Int): Boolean = true
def getBoolean(columnNo: Int): Boolean = ???
def getInt(columnNo: Int): Int = ???
def getLong(columnNo: Int): Long = ???
def getDouble(columnNo: Int): Double = ???
def getFloat(columnNo: Int): Float = ???
def getString(columnNo: Int): String = currVal.toString
def getAny(columnNo: Int): Any = currVal
def getBlobBase(columnNo: Int): Any = ???
def getBlobOffset(columnNo: Int): Long = ???
def getBlobNumBytes(columnNo: Int): Int = ???
}
override def hasNext: Boolean = records.hasNext
override def next(): RowReader = {
currVal = records.next()
rowReader
}
}
final case class StringArrayRowReader(records: Seq[String]) extends Iterator[RowReader] {
var currVal: String = _
val iter = records.iterator
private val rowReader = new RowReader {
def notNull(columnNo: Int): Boolean = true
def getBoolean(columnNo: Int): Boolean = ???
def getInt(columnNo: Int): Int = ???
def getLong(columnNo: Int): Long = ???
def getDouble(columnNo: Int): Double = ???
def getFloat(columnNo: Int): Float = ???
def getString(columnNo: Int): String = currVal
def getAny(columnNo: Int): Any = currVal
def getBlobBase(columnNo: Int): Any = ???
def getBlobOffset(columnNo: Int): Long = ???
def getBlobNumBytes(columnNo: Int): Int = ???
}
override def hasNext: Boolean = iter.hasNext
override def next(): RowReader = {
currVal = iter.next()
rowReader
}
}
final case class SchemaSeqRowReader(sequence: Seq[Any],
extractors: Array[TypedFieldExtractor[_]]) extends SchemaRowReader {
def notNull(columnNo: Int): Boolean = true
def getBoolean(columnNo: Int): Boolean = sequence(columnNo).asInstanceOf[Boolean]
def getInt(columnNo: Int): Int = sequence(columnNo).asInstanceOf[Int]
def getLong(columnNo: Int): Long = sequence(columnNo).asInstanceOf[Long]
def getDouble(columnNo: Int): Double = sequence(columnNo).asInstanceOf[Double]
def getFloat(columnNo: Int): Float = sequence(columnNo).asInstanceOf[Float]
def getString(columnNo: Int): String = sequence(columnNo).asInstanceOf[String]
override def getHistogram(columnNo: Int): Histogram = sequence(columnNo).asInstanceOf[Histogram]
def getAny(columnNo: Int): Any = sequence(columnNo)
def getBlobBase(columnNo: Int): Any = sequence(columnNo).asInstanceOf[Array[Byte]]
def getBlobOffset(columnNo: Int): Long = 0
def getBlobNumBytes(columnNo: Int): Int = sequence(columnNo).asInstanceOf[Array[Byte]].length
}
object RowReader {
import DefaultValues._
// Type class for extracting a field of a specific type .. and comparing a field from two RowReaders
trait TypedFieldExtractor[@specialized F] {
def getField(reader: RowReader, columnNo: Int): F
def getFieldOrDefault(reader: RowReader, columnNo: Int): F = getField(reader, columnNo)
def compare(reader: RowReader, other: RowReader, columnNo: Int): Int
}
// A generic FieldExtractor for objects
case class ObjectFieldExtractor[T: ClassTag](default: T) extends TypedFieldExtractor[T] {
final def getField(reader: RowReader, columnNo: Int): T = reader.as[T](columnNo)
final override def getFieldOrDefault(reader: RowReader, columnNo: Int): T =
if (reader.notNull(columnNo)) getField(reader, columnNo) else default
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
if (getFieldOrDefault(reader, columnNo) == getFieldOrDefault(other, columnNo)) 0 else 1
}
class WrappedExtractor[@specialized T, F: TypedFieldExtractor](func: F => T)
extends TypedFieldExtractor[T] {
val orig = implicitly[TypedFieldExtractor[F]]
def getField(reader: RowReader, columnNo: Int): T = func(orig.getField(reader, columnNo))
def compare(reader: RowReader, other: RowReader, col: Int): Int = orig.compare(reader, other, col)
}
implicit object BooleanFieldExtractor extends TypedFieldExtractor[Boolean] {
final def getField(reader: RowReader, columnNo: Int): Boolean = reader.getBoolean(columnNo)
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
java.lang.Boolean.compare(getFieldOrDefault(reader, columnNo), getFieldOrDefault(other, columnNo))
}
implicit object LongFieldExtractor extends TypedFieldExtractor[Long] {
final def getField(reader: RowReader, columnNo: Int): Long = reader.getLong(columnNo)
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
java.lang.Long.compare(getFieldOrDefault(reader, columnNo), getFieldOrDefault(other, columnNo))
}
implicit object IntFieldExtractor extends TypedFieldExtractor[Int] {
final def getField(reader: RowReader, columnNo: Int): Int = reader.getInt(columnNo)
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
java.lang.Integer.compare(getFieldOrDefault(reader, columnNo), getFieldOrDefault(other, columnNo))
}
implicit object DoubleFieldExtractor extends TypedFieldExtractor[Double] {
final def getField(reader: RowReader, columnNo: Int): Double = reader.getDouble(columnNo)
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
java.lang.Double.compare(getFieldOrDefault(reader, columnNo), getFieldOrDefault(other, columnNo))
}
implicit object FloatFieldExtractor extends TypedFieldExtractor[Float] {
final def getField(reader: RowReader, columnNo: Int): Float = reader.getFloat(columnNo)
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
java.lang.Float.compare(getFieldOrDefault(reader, columnNo), getFieldOrDefault(other, columnNo))
}
implicit object StringFieldExtractor extends TypedFieldExtractor[String] {
final def getField(reader: RowReader, columnNo: Int): String = reader.getString(columnNo)
override final def getFieldOrDefault(reader: RowReader, columnNo: Int): String = {
val str = reader.getString(columnNo)
if (str == null) DefaultString else str
}
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
getFieldOrDefault(reader, columnNo).compareTo(getFieldOrDefault(other, columnNo))
}
implicit object UTF8StringFieldExtractor extends TypedFieldExtractor[ZeroCopyUTF8String] {
final def getField(reader: RowReader, columnNo: Int): ZeroCopyUTF8String =
reader.filoUTF8String(columnNo)
// TODO: do UTF8 comparison so we can avoid having to deserialize
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
getFieldOrDefault(reader, columnNo).compareTo(getFieldOrDefault(other, columnNo))
}
implicit object DateTimeFieldExtractor extends TypedFieldExtractor[DateTime] {
final def getField(reader: RowReader, columnNo: Int): DateTime = reader.as[DateTime](columnNo)
override final def getFieldOrDefault(reader: RowReader, columnNo: Int): DateTime = {
val dt = reader.as[DateTime](columnNo)
if (dt == null) DefaultDateTime else dt
}
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
getFieldOrDefault(reader, columnNo).compareTo(getFieldOrDefault(other, columnNo))
}
implicit object TimestampFieldExtractor extends TypedFieldExtractor[Timestamp] {
final def getField(reader: RowReader, columnNo: Int): Timestamp = reader.as[Timestamp](columnNo)
override final def getFieldOrDefault(reader: RowReader, columnNo: Int): Timestamp = {
val ts = reader.as[Timestamp](columnNo)
if (ts == null) DefaultTimestamp else ts
}
// TODO: compare the Long, instead of deserializing and comparing Timestamp object
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
getFieldOrDefault(reader, columnNo).compareTo(getFieldOrDefault(other, columnNo))
}
implicit object HistogramExtractor extends TypedFieldExtractor[Histogram] {
final def getField(reader: RowReader, columnNo: Int): Histogram = reader.getHistogram(columnNo)
final def compare(reader: RowReader, other: RowReader, columnNo: Int): Int =
getFieldOrDefault(reader, columnNo).compare(getFieldOrDefault(other, columnNo))
}
}
| filodb/FiloDB | memory/src/main/scala/filodb.memory/format/RowReader.scala | Scala | apache-2.0 | 19,682 |
package org.mbs3.elasticsearch.irc
import org.pircbotx._
import org.pircbotx.hooks._
import org.pircbotx.hooks.events._
import org.mbs3.elasticsearch.irc.commands.CommandListener
object ElasticsearchBot extends App {
val config = new Configuration.Builder()
.setName("martin_esbot")
.setLogin("elasticsearch@bot")
.setCapEnabled(true)
.setShutdownHookEnabled(false)
.setServerHostname("irc.freenode.net")
.addAutoJoinChannel("#martin-elasticsearch-test")
.addAutoJoinChannel("#gatorlug")
.addAutoJoinChannel("#archlinux")
.addListener(new LogEverythingListener)
.addListener(new CommandListener)
.addListener(new ElasticsearchListener("elasticsearch", "localhost", 9300))
.buildConfiguration
val bot = new PircBotX(config);
bot.startBot();
} | martinb3/capek | src/main/scala/org/mbs3/elasticsearch/irc/ElasticsearchBot.scala | Scala | apache-2.0 | 809 |
package ch.epfl.scala.index
package server
package routes
package api
package impl
import data._
import data.cleanup.GithubRepoExtractor
import data.download.PlayWsDownloader
import data.elastic._
import data.github._
import data.maven.{MavenModel, PomsReader, DownloadParentPoms}
import data.project.ProjectConvert
import model.misc.GithubRepo
import model.{Project, Release}
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.StatusCode
import com.sksamuel.elastic4s._
import ElasticDsl._
import scala.concurrent.Future
import scala.util.{Try, Success, Failure}
import java.io.{StringWriter, PrintWriter}
private[api] class PublishProcess(paths: DataPaths, dataRepository: DataRepository)(
implicit val system: ActorSystem,
implicit val materializer: ActorMaterializer
) extends PlayWsDownloader {
import system.dispatcher
/**
* write the pom file to disk if it's a pom file (SBT will also send *.pom.sha1 and *.pom.md5)
* - will check if there is a scm tag for github
* - will check if the publishing user have write access to the provided repository
*
* Response codes:
* Created - 201 - Data accepted and stored (by default for all files which is not *.pom)
* NoContent - 204 - No GitHub SCM tag provided
* Forbidden - 403 - No write access to the GitHub repository
*
* @param data the Publish data class holding all the data
* @return
*/
def writeFiles(data: PublishData): Future[(StatusCode, String)] = Future {
if (data.isPom) {
data.writeTemp()
getTmpPom(data) match {
case List(Success((pom, _, _))) =>
getGithubRepo(pom) match {
case None => {
data.deleteTemp()
(NoContent, "No Github Repo")
}
case Some(repo) => {
if (data.userState.hasPublishingAuthority || data.userState.repos.contains(repo)) {
data.writePom(paths)
data.deleteTemp()
updateIndex(repo, pom, data)
(Created, "Published release")
} else {
data.deleteTemp()
(Forbidden, s"${data.userState.user.login} cannot publish to ${repo.toString}")
}
}
}
case List(Failure(e)) => {
val sw = new StringWriter()
val pw = new PrintWriter(sw)
e.printStackTrace(pw)
(BadRequest, "Invalid pom: " + sw.toString())
}
case _ => (BadRequest, "Impossible ?")
}
} else {
if(data.userState.isSonatype) ((BadRequest, "Not a POM"))
else ((Created, "ignoring")) // for sbt, ignore SHA1, etc
}
}
/**
* Convert the POM XML data to a Maven Model
*
* @param data the XML String data
* @return
*/
private def getTmpPom(data: PublishData): List[Try[(MavenModel, LocalRepository, String)]] = {
val path = data.tempPath.getParent
val downloadParentPomsStep =
new DownloadParentPoms(LocalRepository.MavenCentral, paths, Some(path))
downloadParentPomsStep.run()
PomsReader.tmp(paths, path).load()
}
/**
* try to extract a github repository from scm tag in Maven Model
*
* @param pom the Maven model
* @return
*/
private def getGithubRepo(pom: MavenModel): Option[GithubRepo] =
(new GithubRepoExtractor(paths)).apply(pom)
/**
* Main task to update the scaladex index.
* - download GitHub info if allowd
* - download GitHub contributors if allowed
* - download GitHub readme if allowed
* - search for project and
* 1. update project
* 1. Search for release
* 2. update or create new release
* 2. create new project
*
* @param repo the Github repo reference model
* @param pom the Maven Model
* @param data the main publish data
* @return
*/
private def updateIndex(repo: GithubRepo, pom: MavenModel, data: PublishData): Future[Unit] = {
println("updating " + pom.artifactId)
new GithubDownload(paths, Some(data.credentials))
.run(repo, data.downloadInfo, data.downloadReadme, data.downloadContributors)
val githubRepoExtractor = new GithubRepoExtractor(paths)
val Some(GithubRepo(organization, repository)) = githubRepoExtractor(pom)
val projectReference = Project.Reference(organization, repository)
def updateProjectReleases(project: Option[Project], releases: List[Release]): Future[Unit] = {
val repository =
if (data.userState.hasPublishingAuthority) LocalRepository.MavenCentral
else LocalRepository.UserProvided
Meta.append(paths, Meta(data.hash, data.path, data.created), repository)
val converter = new ProjectConvert(paths)
val (newProject, newReleases) = converter(
pomsRepoSha = List((pom, repository, data.hash)),
cachedReleases = cachedReleases
).head
cachedReleases = upserts(cachedReleases, projectReference, newReleases)
val updatedProject = newProject.copy(
keywords = data.keywords,
liveData = true
)
val projectUpdate =
project match {
case Some(project) => {
esClient
.execute(
update(project.id.get).in(indexName / projectsCollection).doc(updatedProject)
)
.map(_ => println("updating project " + pom.artifactId))
}
case None =>
esClient
.execute(
index.into(indexName / projectsCollection).source(updatedProject)
)
.map(_ => println("inserting project " + pom.artifactId))
}
val releaseUpdate =
if (!releases.exists(r => r.reference == newReleases.head.reference)) {
// create new release
esClient
.execute(
index
.into(indexName / releasesCollection)
.source(
newReleases.head.copy(test = data.test, liveData = true)
))
.map(_ => ())
} else { Future.successful(()) }
for {
_ <- projectUpdate
_ <- releaseUpdate
} yield ()
}
for {
project <- dataRepository.project(projectReference)
releases <- dataRepository.releases(projectReference, None)
_ <- updateProjectReleases(project, releases)
} yield ()
}
private var cachedReleases = Map.empty[Project.Reference, Set[Release]]
}
| adamwy/scaladex | server/src/main/scala/ch.epfl.scala.index.server/routes/api/impl/PublishProcess.scala | Scala | bsd-3-clause | 6,556 |
package com.navercorp.graph
import scala.collection.mutable.ArrayBuffer
object GraphOps {
def setupAlias(nodeWeights: Array[(Long, Double)]): (Array[Int], Array[Double]) = {
val K = nodeWeights.length
val J = Array.fill(K)(0)
val q = Array.fill(K)(0.0)
val smaller = new ArrayBuffer[Int]()
val larger = new ArrayBuffer[Int]()
val sum = nodeWeights.map(_._2).sum
nodeWeights.zipWithIndex.foreach { case ((nodeId, weight), i) =>
q(i) = K * weight / sum
if (q(i) < 1.0) {
smaller.append(i)
} else {
larger.append(i)
}
}
while (smaller.nonEmpty && larger.nonEmpty) {
val small = smaller.remove(smaller.length - 1)
val large = larger.remove(larger.length - 1)
J(small) = large
q(large) = q(large) + q(small) - 1.0
if (q(large) < 1.0) smaller.append(large)
else larger.append(large)
}
(J, q)
}
def setupEdgeAlias(p: Double = 1.0, q: Double = 1.0)(srcId: Long, srcNeighbors: Array[(Long, Double)], dstNeighbors: Array[(Long, Double)]): (Array[Int], Array[Double]) = {
val neighbors_ = dstNeighbors.map { case (dstNeighborId, weight) =>
var unnormProb = weight / q
if (srcId == dstNeighborId) unnormProb = weight / p
else if (srcNeighbors.exists(_._1 == dstNeighborId)) unnormProb = weight
(dstNeighborId, unnormProb)
}
setupAlias(neighbors_)
}
def drawAlias(J: Array[Int], q: Array[Double]): Int = {
val K = J.length
val kk = math.floor(math.random * K).toInt
if (math.random < q(kk)) kk
else J(kk)
}
lazy val createUndirectedEdge = (srcId: Long, dstId: Long, weight: Double) => {
Array(
(srcId, Array((dstId, weight))),
(dstId, Array((srcId, weight)))
)
}
lazy val createDirectedEdge = (srcId: Long, dstId: Long, weight: Double) => {
Array(
(srcId, Array((dstId, weight)))
)
}
}
| aditya-grover/node2vec | node2vec_spark/src/main/scala/com/navercorp/graph/GraphOps.scala | Scala | mit | 1,915 |
package chapter.three
import ExerciseOne.randomNaturalNumbers
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class ExerciseOneSpec extends FlatSpec with Matchers {
// todo: make unit test more rubust
"randomNaturalNumbers" should "return an array of n random integers between 0 (inclusive) and n exclusive" in {
val n = 10
val numbers = randomNaturalNumbers(n)
numbers should have size n
numbers.filter(_ < 0) should have size 0
numbers.filter(_ >= n) should have size 0
}
}
| deekim/impatient-scala | src/test/scala/chapter/three/ExerciseOneSpec.scala | Scala | apache-2.0 | 585 |
package org.kimbasoft.scala.codingstyle.functional.implicits
/**
* Missing documentation
*
* @since 1.0
*/
object Implicitly {
abstract class Decorator[T] {
def decorate(obj: T): String
}
implicit object StringDecorator extends Decorator[String] {
def decorate(obj: String): String = "$" + obj
}
implicit object IntDecorator extends Decorator[Int] {
def decorate(obj: Int): String = "#" + obj
}
class Printer {
/**
* In order to get a suitable Decorator object for type T this method
* adds a second argument list with an implicit argument for the Decorator.
* Since the decorator object is directly injected into a named variable
* in can be used directly. However, the use of a second argument list
* for this case can be a bit ugly.
*/
def print1[T](obj: T)(implicit decorator: Decorator[T]): Unit = {
println(decorator.decorate(obj))
}
/**
* To avoid the ugliness of a second argument list this method performs a
* Context Binding on type T to Decorator. In other words, it prevents types
* from being used as T for which no Decorator object exists in the current
* context. Since this Context Binding removes the second argument it is
* not possible to access the decorator object directly anymore. This is where
* the 'implicitly[]' method comes into play. It will be parametrized with the
* object that type T is bound to, look it up and return its instance thus
* allowing for the invocation of the object's methods.
*/
def print2[T : Decorator](obj: T) = {
println(implicitly[Decorator[T]].decorate(obj))
}
}
def main(args: Array[String]) {
val strPrinter = new Printer
println("-- Implicit Argument ---------------")
strPrinter.print1("Hello")
strPrinter.print1(123)
println("-- Implicitly() --------------------")
strPrinter.print2("Good-bye")
strPrinter.print2(321)
}
}
| kimba74/sandbox-scala | src/main/scala/org/kimbasoft/scala/codingstyle/functional/implicits/Implicitly.scala | Scala | gpl-3.0 | 1,964 |
package sri.sangria.web.mutations
import sri.relay.mutation.RelayMutation
import sri.relay.query.RelayQL
import sri.relay.tools.RangeAddMutationConfig
import sri.relay.tools.RelayTypes.{MutationFragment, RelayMutationConfig}
import scala.scalajs.js
import scala.scalajs.js.Dynamic.{literal => json}
import scala.scalajs.js.annotation.ScalaJSDefined
import scala.scalajs.js.{Any, Array}
@ScalaJSDefined
class AddTodoMutation(input: js.Dynamic) extends RelayMutation(input) {
override def getMutation(): MutationFragment = {
js.eval(RelayQL( """mutation{ addTodo }"""))
}
override def getVariables(): js.Object = json("text" -> props.text.toString)
override def getFatQuery(): Any = js.eval(RelayQL(
"""
fragment on AddTodoPayload {
todoEdge,
viewer {
todos
}
}
"""))
// override def getOptimisticResponse(): UndefOr[js.Object] = {
// json("todoEdge" -> json("node" -> js.Dictionary("text" -> props.text))
// )
// }
override def getConfigs() = {
js.Array(
new RangeAddMutationConfig(
parentName = "viewer",
parentID = props.viewer.id.toString,
connectionName = "todos",
edgeName = "todoEdge",
rangeBehaviors = js.Dictionary("" -> "prepend"))
)
}
} | chandu0101/sri-sangria-example | web/src/main/scala/sri/sangria/web/mutations/AddTodoMutation.scala | Scala | apache-2.0 | 1,328 |
package org.jetbrains.plugins.scala
package lang
package resolve
package processor
import com.intellij.openapi.util.Key
import com.intellij.psi._
import com.intellij.psi.scope._
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScSuperReference, ScThisReference}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScTypeAlias, ScTypeAliasDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject, ScTrait, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScPackageImpl
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
import org.jetbrains.plugins.scala.lang.psi.types.api.TypeSystem
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import scala.collection.Set
object ResolveProcessor {
def getQualifiedName(result: ScalaResolveResult, place: PsiElement)
(implicit typeSystem: TypeSystem): String = {
def defaultForTypeAlias(t: ScTypeAlias): String = {
if (t.getParent.isInstanceOf[ScTemplateBody] && t.containingClass != null) {
"TypeAlias:" + t.containingClass.qualifiedName + "#" + t.name
} else null
}
result.getActualElement match {
case c: ScTypeParam => null
case c: ScObject => "Object:" + c.qualifiedName
case c: PsiClass => "Class:" + c.qualifiedName
case t: ScTypeAliasDefinition if t.typeParameters.isEmpty =>
t.aliasedType(TypingContext.empty) match {
case Success(tp, elem) =>
tp.extractClass(Option(place).map(_.getProject).orNull) match {
case Some(c: ScObject) => defaultForTypeAlias(t)
case Some(td: ScTypeDefinition) if td.typeParameters.isEmpty && ScalaPsiUtil.hasStablePath(td) =>
"Class:" + td.qualifiedName
case Some(c: PsiClass) if c.getTypeParameters.isEmpty => "Class:" + c.qualifiedName
case _ => defaultForTypeAlias(t)
}
case _ => defaultForTypeAlias(t)
}
case t: ScTypeAlias => defaultForTypeAlias(t)
case p: PsiPackage => "Package:" + p.getQualifiedName
case _ => null
}
}
}
class ResolveProcessor(override val kinds: Set[ResolveTargets.Value],
val ref: PsiElement,
val name: String)
(implicit override val typeSystem: TypeSystem) extends BaseProcessor(kinds) with PrecedenceHelper[String] {
@volatile
private var resolveScope: GlobalSearchScope = null
def getResolveScope: GlobalSearchScope = {
if (resolveScope == null) {
resolveScope = ref.getResolveScope
}
resolveScope
}
protected def getPlace: PsiElement = ref
val isThisOrSuperResolve = ref.getParent match {
case _: ScThisReference | _: ScSuperReference => true
case _ => false
}
def emptyResultSet: Boolean = candidatesSet.isEmpty || levelSet.isEmpty
protected var precedence: Int = 0
/**
* This method useful for resetting precednce if we dropped
* all found candidates to seek implicit conversion candidates.
*/
def resetPrecedence() {
precedence = 0
}
import org.jetbrains.plugins.scala.lang.resolve.processor.PrecedenceHelper.PrecedenceTypes._
def checkImports(): Boolean = precedence <= IMPORT
def checkWildcardImports(): Boolean = precedence <= WILDCARD_IMPORT
def checkPredefinedClassesAndPackages(): Boolean = precedence <= SCALA_PREDEF
override protected def getQualifiedName(result: ScalaResolveResult): String = {
ResolveProcessor.getQualifiedName(result, getPlace)
}
protected def getTopPrecedence(result: ScalaResolveResult): Int = precedence
protected def setTopPrecedence(result: ScalaResolveResult, i: Int) {
precedence = i
}
override def isUpdateHistory: Boolean = true
override def changedLevel = {
addChangedLevelToHistory()
def update: Boolean = {
val iterator = levelSet.iterator()
while (iterator.hasNext) {
candidatesSet += iterator.next()
}
qualifiedNamesSet.addAll(levelQualifiedNamesSet)
levelSet.clear()
levelQualifiedNamesSet.clear()
false
}
if (levelSet.isEmpty) true
else if (precedence == OTHER_MEMBERS) update
else !update
}
def isAccessible(named: PsiNamedElement, place: PsiElement): Boolean = {
val memb: PsiMember = {
named match {
case memb: PsiMember => memb
case pl => ScalaPsiUtil.nameContext(named) match {
case memb: PsiMember => memb
case _ => return true //something strange
}
}
}
ResolveUtils.isAccessible(memb, place)
}
def execute(element: PsiElement, state: ResolveState): Boolean = {
val named = element.asInstanceOf[PsiNamedElement]
def nameShadow: Option[String] = Option(state.get(ResolverEnv.nameKey))
if (nameAndKindMatch(named, state)) {
val accessible = isAccessible(named, ref)
if (accessibility && !accessible) return true
named match {
case o: ScObject if o.isPackageObject && JavaPsiFacade.getInstance(element.getProject).
findPackage(o.qualifiedName) != null =>
case pack: PsiPackage =>
val resolveResult: ScalaResolveResult =
new ScalaResolveResult(ScPackageImpl(pack), getSubst(state), getImports(state), nameShadow, isAccessible = accessible)
addResult(resolveResult)
case clazz: PsiClass if !isThisOrSuperResolve || PsiTreeUtil.isContextAncestor(clazz, ref, true) =>
addResult(new ScalaResolveResult(named, getSubst(state),
getImports(state), nameShadow, boundClass = getBoundClass(state), fromType = getFromType(state), isAccessible = accessible))
case clazz: PsiClass => //do nothing, it's wrong class or object
case _ if isThisOrSuperResolve => //do nothing for type alias
case _ =>
addResult(new ScalaResolveResult(named, getSubst(state),
getImports(state), nameShadow, boundClass = getBoundClass(state), fromType = getFromType(state), isAccessible = accessible))
}
}
true
}
protected def nameAndKindMatch(named: PsiNamedElement, state: ResolveState): Boolean = {
val nameSet = state.get(ResolverEnv.nameKey)
val elName = if (nameSet == null) {
val name = named.name
if (name == null) return false
if (name == "") return false
name
} else nameSet
val nameMatches = ScalaPsiUtil.memberNamesEquals(elName, name)
nameMatches && kindMatches(named)
}
override def getHint[T](hintKey: Key[T]): T = {
hintKey match {
case NameHint.KEY if name != "" => ScalaNameHint.asInstanceOf[T]
case _ => super.getHint(hintKey)
}
}
override def candidatesS: Set[ScalaResolveResult] = {
var res = candidatesSet
val iterator = levelSet.iterator()
while (iterator.hasNext) {
res += iterator.next()
}
if (!compareWithIgnoredSet(res)) {
res.clear()
restartFromHistory()
//now let's add everything again
res = candidatesSet
val iterator = levelSet.iterator()
while (iterator.hasNext) {
res += iterator.next()
}
}
/*
This is also hack for self type elements to filter duplicates.
For example:
trait IJTest {
self : MySub =>
type FooType
protected implicit def d: FooType
}
trait MySub extends IJTest {
type FooType = Long
}
*/
res.filter {
case r@ScalaResolveResult(_: ScTypeAlias | _: ScClass | _: ScTrait, _) =>
res.foldLeft(true) {
case (false, _) => false
case (true, rr@ScalaResolveResult(_: ScTypeAlias | _: ScClass | _: ScTrait, _)) =>
rr.element.name != r.element.name ||
!ScalaPsiUtil.superTypeMembers(rr.element).contains(r.element)
case (true, _) => true
}
case _ => true
}
}
object ScalaNameHint extends NameHint {
def getName(state: ResolveState) = {
val stateName = state.get(ResolverEnv.nameKey)
val result = if (stateName == null) name else stateName
if (result != null && result.startsWith("`") && result.endsWith("`") && result.length > 1) result.substring(1, result.length - 1)
else result
}
}
}
| katejim/intellij-scala | src/org/jetbrains/plugins/scala/lang/resolve/processor/ResolveProcessor.scala | Scala | apache-2.0 | 8,617 |
package com.twitter.util
import com.twitter.common.objectsize.ObjectSizeCalculator
import com.twitter.conversions.time._
import java.util.concurrent.ConcurrentLinkedQueue
import org.junit.runner.RunWith
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalacheck.{Gen, Arbitrary}
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import scala.collection.JavaConverters._
import scala.runtime.NonLocalReturnControl
import scala.util.control.ControlThrowable
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class FutureTest extends WordSpec with MockitoSugar with GeneratorDrivenPropertyChecks {
implicit def futureMatcher[A](future: Future[A]) = new {
def mustProduce(expected: Try[A]) {
expected match {
case Throw(ex) => {
val t = intercept[Throwable] {
Await.result(future, 1.second)
}
assert(t === ex)
}
case Return(v) => {
assert(Await.result(future, 1.second) === v)
}
}
}
}
class FatalException extends ControlThrowable
trait MkConst {
def apply[A](result: Try[A]): Future[A]
def value[A](a: A): Future[A] = this(Return(a))
def exception[A](exc: Throwable): Future[A] = this(Throw(exc))
}
def test(name: String, const: MkConst) {
"object Future (%s)".format(name) when {
"times" should {
trait TimesHelper {
val queue = new ConcurrentLinkedQueue[Promise[Unit]]
var complete = false
var failure = false
var ninterrupt = 0
val iteration = Future.times(3) {
val promise = new Promise[Unit]
promise.setInterruptHandler { case _ => ninterrupt += 1 }
queue add promise
promise
}
iteration onSuccess { _ =>
complete = true
} onFailure { f =>
failure = true
}
assert(complete === false)
assert(failure === false)
}
"when everything succeeds" in {
new TimesHelper {
queue.poll().setDone()
assert(complete === false)
assert(failure === false)
queue.poll().setDone()
assert(complete === false)
assert(failure === false)
queue.poll().setDone()
assert(complete === true)
assert(failure === false)
}
}
"when some succeed and some fail" in {
new TimesHelper {
queue.poll().setDone()
assert(complete === false)
assert(failure === false)
queue.poll().setException(new Exception(""))
assert(complete === false)
assert(failure === true)
}
}
"when interrupted" in {
new TimesHelper {
assert(ninterrupt === 0)
iteration.raise(new Exception)
for (i <- 1 to 3) {
assert(ninterrupt === i)
queue.poll().setDone()
}
}
}
}
"when" in {
var i = 0
Await.result {
Future.when(false) {
Future { i += 1 }
}
}
assert(i === 0)
Await.result {
Future.when(true) {
Future { i += 1 }
}
}
assert(i === 1)
}
"whileDo" should {
trait WhileDoHelper {
var i = 0
val queue = new ConcurrentLinkedQueue[HandledPromise[Unit]]
var complete = false
var failure = false
val iteration = Future.whileDo(i < 3) {
i += 1
val promise = new HandledPromise[Unit]
queue add promise
promise
}
iteration onSuccess { _ =>
complete = true
} onFailure { f =>
failure = true
}
assert(complete === false)
assert(failure === false)
}
"when everything succeeds" in {
new WhileDoHelper {
queue.poll().setDone()
assert(complete === false)
assert(failure === false)
queue.poll().setDone()
assert(complete === false)
assert(failure === false)
queue.poll().setDone()
assert(complete === true)
assert(failure === false)
}
}
"when some succeed and some fail" in {
new WhileDoHelper {
queue.poll().setDone()
assert(complete === false)
assert(failure === false)
queue.poll().setException(new Exception(""))
assert(complete === false)
assert(failure === true)
}
}
"when interrupted" in {
new WhileDoHelper {
assert((queue.asScala exists (_.handled.isDefined)) === false)
iteration.raise(new Exception)
assert((queue.asScala forall ( _.handled.isDefined)) === true)
}
}
}
"proxyTo" should {
"reject satisfied promises" in {
val str = "um um excuse me um"
val p1 = new Promise[String]()
p1.update(Return(str))
val p2 = new Promise[String]()
val ex = intercept[IllegalStateException] { p2.proxyTo(p1) }
assert(ex.getMessage.contains(str))
}
"proxies success" in {
val p1 = new Promise[Int]()
val p2 = new Promise[Int]()
p2.proxyTo(p1)
p2.update(Return(5))
assert(5 === Await.result(p1))
assert(5 === Await.result(p2))
}
"proxies failure" in {
val p1 = new Promise[Int]()
val p2 = new Promise[Int]()
p2.proxyTo(p1)
val t = new RuntimeException("wurmp")
p2.update(Throw(t))
val ex1 = intercept[RuntimeException] { Await.result(p1) }
assert(ex1.getMessage === t.getMessage)
val ex2 = intercept[RuntimeException] { Await.result(p2) }
assert(ex2.getMessage === t.getMessage)
}
}
"batched" should {
implicit val timer = new MockTimer
val result = Seq(4, 5, 6)
"execute after threshold is reached" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3)(f)
when(f.apply(Seq(1,2,3))) thenReturn(Future.value(result))
batcher(1)
verify(f, never()).apply(any[Seq[Int]])
batcher(2)
verify(f, never()).apply(any[Seq[Int]])
batcher(3)
verify(f).apply(Seq(1,2,3))
}
"execute after bufSizeFraction threshold is reached" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3, sizePercentile = 0.67f)(f)
when(f.apply(Seq(1,2,3))) thenReturn(Future.value(result))
batcher(1)
verify(f, never()).apply(any[Seq[Int]])
batcher(2)
verify(f).apply(Seq(1,2))
}
"treat bufSizeFraction return value < 0.0f as 1" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3, sizePercentile = 0.4f)(f)
when(f.apply(Seq(1,2,3))) thenReturn(Future.value(result))
batcher(1)
verify(f).apply(Seq(1))
}
"treat bufSizeFraction return value > 1.0f should return maxSizeThreshold" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3, sizePercentile = 1.3f)(f)
when(f.apply(Seq(1,2,3))) thenReturn(Future.value(result))
batcher(1)
verify(f, never()).apply(any[Seq[Int]])
batcher(2)
verify(f, never()).apply(any[Seq[Int]])
batcher(3)
verify(f).apply(Seq(1,2,3))
}
"execute after time threshold" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3, 3.seconds)(f)
Time.withCurrentTimeFrozen { control =>
when(f(Seq(1))) thenReturn(Future.value(Seq(4)))
batcher(1)
verify(f, never()).apply(any[Seq[Int]])
control.advance(1.second)
timer.tick()
verify(f, never()).apply(any[Seq[Int]])
control.advance(1.second)
timer.tick()
verify(f, never()).apply(any[Seq[Int]])
control.advance(1.second)
timer.tick()
verify(f).apply(Seq(1))
}
}
"only execute once if both are reached" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3)(f)
Time.withCurrentTimeFrozen { control =>
when(f(Seq(1,2,3))) thenReturn(Future.value(result))
batcher(1)
batcher(2)
batcher(3)
control.advance(10.seconds)
timer.tick()
verify(f).apply(Seq(1,2,3))
}
}
"propagates results" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3)(f)
Time.withCurrentTimeFrozen { control =>
when(f(Seq(1,2,3))) thenReturn(Future.value(result))
val res1 = batcher(1)
assert(res1.isDefined === false)
val res2 = batcher(2)
assert(res2.isDefined === false)
val res3 = batcher(3)
assert(res1.isDefined === true)
assert(res2.isDefined === true)
assert(res3.isDefined === true)
assert(Await.result(res1) === 4)
assert(Await.result(res2) === 5)
assert(Await.result(res3) === 6)
verify(f).apply(Seq(1,2,3))
}
}
"not block other batches" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3)(f)
Time.withCurrentTimeFrozen { control =>
val blocker = new Promise[Unit]
val thread = new Thread {
override def run() {
when(f(result)) thenReturn(Future.value(Seq(7,8,9)))
batcher(4)
batcher(5)
batcher(6)
verify(f).apply(result)
blocker.setValue(())
}
}
when(f(Seq(1,2,3))) thenAnswer {
new Answer[Future[Seq[Int]]] {
def answer(invocation: InvocationOnMock) = {
thread.start()
Await.result(blocker)
Future.value(result)
}
}
}
batcher(1)
batcher(2)
batcher(3)
verify(f).apply(Seq(1,2,3))
}
}
"swallow exceptions" in {
val f = mock[Seq[Int] => Future[Seq[Int]]]
val batcher = Future.batched(3)(f)
when(f(Seq(1, 2, 3))) thenAnswer {
new Answer[Unit] {
def answer(invocation: InvocationOnMock) = {
throw new Exception
}
}
}
batcher(1)
batcher(2)
batcher(3) // Success here implies no exception was thrown.
}
}
"interruptible" should {
"properly ignore the underlying future on interruption" in {
val p = Promise[Unit]
val i = p.interruptible()
val e = new Exception
i.raise(e)
p.setDone()
assert(p.poll === Some(Return(())))
assert(i.poll === Some(Throw(e)))
}
"respect the underlying future" in {
val p = Promise[Unit]
val i = p.interruptible()
p.setDone()
assert(p.poll === Some(Return(())))
assert(i.poll === Some(Return(())))
}
"do nothing for const" in {
val f = const.value(())
val i = f.interruptible()
i.raise(new Exception())
assert(f.poll === Some(Return(())))
assert(i.poll === Some(Return(())))
}
}
"collect" should {
trait CollectHelper {
val p0, p1 = new HandledPromise[Int]
val f = Future.collect(Seq(p0, p1))
assert(f.isDefined === false)
}
"only return when both futures complete" in {
new CollectHelper {
p0() = Return(1)
assert(f.isDefined === false)
p1() = Return(2)
assert(f.isDefined === true)
assert(Await.result(f) === Seq(1, 2))
}
}
"return with exception if the first future throws" in {
new CollectHelper {
p0() = Throw(new Exception)
intercept[Exception] { Await.result(f) }
}
}
"return with exception if the second future throws" in {
new CollectHelper {
p0() = Return(1)
assert(f.isDefined === false)
p1() = Throw(new Exception)
intercept[Exception] { Await.result(f) }
}
}
"propagate interrupts" in {
new CollectHelper {
val ps = Seq(p0, p1)
assert((ps.count(_.handled.isDefined)) === 0)
f.raise(new Exception)
assert((ps.count(_.handled.isDefined)) === 2)
}
}
}
"collectToTry" should {
trait CollectToTryHelper {
val p0, p1 = new HandledPromise[Int]
val f = Future.collectToTry(Seq(p0, p1))
assert(!f.isDefined)
}
"only return when both futures complete" in {
new CollectToTryHelper {
p0() = Return(1)
assert(!f.isDefined)
p1() = Return(2)
assert(f.isDefined)
assert(Await.result(f) === Seq(Return(1), Return(2)))
}
}
"be undefined if the first future throws and the second is undefined" in {
new CollectToTryHelper {
p0() = Throw(new Exception)
assert(!f.isDefined)
}
}
"return both results if the first is defined second future throws" in {
new CollectToTryHelper {
val ex = new Exception
p0() = Return(1)
assert(!f.isDefined)
p1() = Throw(ex)
assert(Await.result(f) === Seq(Return(1), Throw(ex)))
}
}
"propagate interrupts" in {
new CollectToTryHelper {
val ps = Seq(p0, p1)
assert(ps.count(_.handled.isDefined) === 0)
f.raise(new Exception)
assert(ps.count(_.handled.isDefined) === 2)
}
}
}
"propagate locals, restoring original context" in {
val local = new Local[Int]
val f = const.value(111)
var ran = 0
local() = 1010
f ensure {
assert(local() === Some(1010))
local() = 1212
f ensure {
assert(local() === Some(1212))
local() = 1313
ran += 1
}
assert(local() === Some(1212))
ran += 1
}
assert(local() === Some(1010))
assert(ran === 2)
}
"delay execution" in {
val f = const.value(111)
var count = 0
f onSuccess { _ =>
assert(count === 0)
f ensure {
assert(count === 1)
count += 1
}
assert(count === 0)
count += 1
}
assert(count === 2)
}
"are monitored" in {
val inner = const.value(123)
val exc = new Exception("a raw exception")
val f = Future.monitored {
inner ensure { throw exc }
}
assert(f.poll === Some(Throw(exc)))
}
}
"Future (%s)".format(name) should {
"select" which {
trait SelectHelper {
var nhandled = 0
val p0, p1 = new HandledPromise[Int]
val f = p0 select p1
assert(f.isDefined === false)
}
"select the first [result] to complete" in {
new SelectHelper {
p0() = Return(1)
p1() = Return(2)
assert(Await.result(f) === 1)
}
}
"select the first [exception] to complete" in {
new SelectHelper {
p0() = Throw(new Exception)
p1() = Return(2)
intercept[Exception] { Await.result(f) }
}
}
"propagate interrupts" in {
new SelectHelper {
val ps = Seq(p0, p1)
assert((ps exists (_.handled.isDefined)) === false)
f.raise(new Exception)
assert((ps forall (_.handled.isDefined)) === true)
}
}
}
def testJoin(label: String, joiner: ((Future[Int], Future[Int]) => Future[(Int, Int)])) {
"join(%s)".format(label) should {
trait JoinHelper {
val p0 = new HandledPromise[Int]
val p1 = new HandledPromise[Int]
val f = joiner(p0, p1)
assert(f.isDefined === false)
}
"only return when both futures complete" in {
new JoinHelper {
p0() = Return(1)
assert(f.isDefined === false)
p1() = Return(2)
assert(Await.result(f) === (1, 2))
}
}
"return with exception if the first future throws" in {
new JoinHelper {
p0() = Throw(new Exception)
intercept[Exception] { Await.result(f) }
}
}
"return with exception if the second future throws" in {
new JoinHelper {
p0() = Return(1)
assert(f.isDefined === false)
p1() = Throw(new Exception)
intercept[Exception] { Await.result(f) }
}
}
"propagate interrupts" in {
new JoinHelper {
assert(p0.handled === None)
assert(p1.handled === None)
val exc = new Exception
f.raise(exc)
assert(p0.handled === Some(exc))
assert(p1.handled === Some(exc))
}
}
}
}
testJoin("f join g", _ join _)
testJoin("Future.join(f, g)", Future.join(_, _))
"toJavaFuture" should {
"return the same thing as our Future when initialized" which {
val f = const.value(1)
val jf = f.toJavaFuture
assert(Await.result(f) === jf.get())
"must both be done" in {
assert(f.isDefined === true)
assert(jf.isDone === true)
assert(jf.isCancelled === false)
}
}
"return the same thing as our Future when set later" which {
val f = new Promise[Int]
val jf = f.toJavaFuture
f.setValue(1)
assert(Await.result(f) === jf.get())
"must both be done" in {
assert(f.isDefined === true)
assert(jf.isDone === true)
assert(jf.isCancelled === false)
}
}
"java future should throw an exception" in {
val f = new Promise[Int]
val jf = f.toJavaFuture
val e = new RuntimeException()
f.setException(e)
val actual = intercept[RuntimeException] { jf.get() }
assert(actual === e)
}
"interrupt Future when cancelled" in {
val f = new HandledPromise[Int]
val jf = f.toJavaFuture
assert(f.handled === None)
jf.cancel(true)
assert(f.handled match {
case Some(e: java.util.concurrent.CancellationException) => true
case _ => false
})
}
}
"monitored" should {
trait MonitoredHelper {
val inner = new HandledPromise[Int]
val exc = new Exception("some exception")
}
"catch raw exceptions (direct)" in {
new MonitoredHelper {
val f = Future.monitored {
throw exc
inner
}
assert(f.poll === Some(Throw(exc)))
}
}
"catch raw exceptions (indirect), interrupting computation" in {
new MonitoredHelper {
val inner1 = new Promise[Int]
var ran = false
val f = Future.monitored {
inner1 ensure {
throw exc
} ensure {
// Note that these are sequenced so that interrupts
// will be delivered before inner's handler is cleared.
ran = true
try {
inner.update(Return(1))
} catch {
case e: Throwable => assert(true === false)
}
}
inner
}
assert(ran === false)
assert(f.poll === None)
assert(inner.handled === None)
inner1.update(Return(1))
assert(ran === true)
assert(inner.isDefined === true)
assert(f.poll === Some(Throw(exc)))
assert(inner.handled === Some(exc))
}
}
"link" in {
new MonitoredHelper {
val f = Future.monitored { inner }
assert(inner.handled === None)
f.raise(exc)
assert(inner.handled === Some(exc))
}
}
"doesn't leak the underlying promise after completion" in {
new MonitoredHelper {
val inner1 = new Promise[String]
val inner2 = new Promise[String]
val f = Future.monitored { inner2.ensure(()); inner1 }
val s = "."*1024
val sSize = ObjectSizeCalculator.getObjectSize(s)
inner1.setValue("."*1024)
val inner2Size = ObjectSizeCalculator.getObjectSize(inner2)
assert(inner2Size < sSize)
}
}
}
"get(deprecated)" in {
val e = new Exception
val v = 123
assert(Future.exception[Int](e).get(0.seconds) === Throw(e))
assert(Future.value(v).get(0.seconds) === Return(v))
// Including fatal ones:
val e2 = new java.lang.IllegalAccessError
assert(Future.exception[Int](e2).get(0.seconds) === Throw(e2))
implicit val timer = new JavaTimer
val p = new Promise[Int]
val r = p.get(50.milliseconds)
intercept[TimeoutException]{ r() }
timer.stop()
}
}
"Promise (%s)".format(name) should {
"apply" which {
"when we're inside of a respond block (without deadlocking)" in {
val f = Future(1)
var didRun = false
f foreach { _ =>
f mustProduce Return(1)
didRun = true
}
assert(didRun === true)
}
}
"map" which {
"when it's all chill" in {
val f = Future(1) map { x => x + 1 }
assert(Await.result(f) === 2)
}
"when there's a problem in the passed in function" in {
val e = new Exception
val f = Future(1) map { x =>
throw e
x + 1
}
val actual = intercept[Exception] {
Await.result(f)
}
assert(actual === e)
}
}
"transform" should {
val e = new Exception("rdrr")
"values" in {
const.value(1).transform {
case Return(v) => const.value(v + 1)
case Throw(t) => const.value(0)
} mustProduce(Return(2))
}
"exceptions" in {
const.exception(e).transform {
case Return(_) => const.value(1)
case Throw(t) => const.value(0)
} mustProduce(Return(0))
}
"exceptions thrown during transformation" in {
const.value(1).transform {
case Return(v) => const.value(throw e)
case Throw(t) => const.value(0)
} mustProduce(Throw(e))
}
"non local returns executed during transformation" in {
def ret(): String = {
val f = const.value(1).transform {
case Return(v) =>
val fn = { () =>
return "OK"
}
fn()
Future.value(ret())
case Throw(t) => const.value(0)
}
assert(f.poll.isDefined)
val e = intercept[FutureNonLocalReturnControl] {
f.poll.get.get
}
val g = e.getCause match {
case t: NonLocalReturnControl[_] => t.asInstanceOf[NonLocalReturnControl[String]]
case _ =>
fail()
}
assert(g.value === "OK")
"bleh"
}
ret()
}
"fatal exceptions thrown during transformation" in {
val e = new FatalException()
val actual = intercept[FatalException] {
const.value(1).transform {
case Return(v) => const.value(throw e)
case Throw(t) => const.value(0)
}
}
assert(actual === e)
}
}
"transformedBy" should {
val e = new Exception("rdrr")
"flatMap" in {
const.value(1).transformedBy(new FutureTransformer[Int, Int] {
override def flatMap(value: Int) = const.value(value + 1)
override def rescue(t: Throwable) = const.value(0)
}) mustProduce(Return(2))
}
"rescue" in {
const.exception(e).transformedBy(new FutureTransformer[Int, Int] {
override def flatMap(value: Int) = const.value(value + 1)
override def rescue(t: Throwable) = const.value(0)
}) mustProduce(Return(0))
}
"exceptions thrown during transformation" in {
const.value(1).transformedBy(new FutureTransformer[Int, Int] {
override def flatMap(value: Int) = throw e
override def rescue(t: Throwable) = const.value(0)
}) mustProduce(Throw(e))
}
"map" in {
const.value(1).transformedBy(new FutureTransformer[Int, Int] {
override def map(value: Int) = value + 1
override def handle(t: Throwable) = 0
}) mustProduce(Return(2))
}
"handle" in {
const.exception(e).transformedBy(new FutureTransformer[Int, Int] {
override def map(value: Int) = value + 1
override def handle(t: Throwable) = 0
}) mustProduce(Return(0))
}
}
def testSequence(
which: String,
seqop: (Future[Unit], () => Future[Unit]) => Future[Unit]) {
which when {
"successes" should {
"interruption of the produced future" which {
"before the antecedent Future completes, propagates back to the antecedent" in {
val f1, f2 = new HandledPromise[Unit]
val f = seqop(f1, () => f2)
assert(f1.handled === None)
assert(f2.handled === None)
f.raise(new Exception)
assert(f1.handled.isDefined)
f1() = Return.Unit
assert(f2.handled.isDefined)
}
"after the antecedent Future completes, does not propagate back to the antecedent" in {
val f1, f2 = new HandledPromise[Unit]
val f = seqop(f1, () => f2)
assert(f1.handled === None)
assert(f2.handled === None)
f1() = Return.Unit
f.raise(new Exception)
assert(f1.handled === None)
assert(f2.handled.isDefined)
}
"forward through chains" in {
val f1, f2 = new Promise[Unit]
val exc = new Exception
val f3 = new Promise[Unit]
var didInterrupt = false
f3.setInterruptHandler {
case `exc` => didInterrupt = true
}
val f = seqop(f1, () => seqop(f2, () => f3))
f.raise(exc)
assert(didInterrupt === false)
f1.setDone()
assert(didInterrupt === false)
f2.setDone()
assert(didInterrupt === true)
}
}
}
"failures" should {
val e = new Exception
val g = seqop(Future[Unit](throw e), () => Future.Done)
"apply" in {
val actual = intercept[Exception] { Await.result(g) }
assert(actual === e)
}
"respond" in {
g mustProduce Throw(e)
}
"when there is an exception in the passed in function" in {
val e = new Exception
val f = seqop(Future.Done, () => throw e)
val actual = intercept[Exception] { Await.result(f) }
assert(actual === e)
}
}
}
}
testSequence("flatMap", (a, next) => a flatMap { _ => next() })
testSequence("before", (a, next) => a before next())
"flatMap (values)" should {
val f = Future(1) flatMap { x => Future(x + 1) }
"apply" which {
assert(Await.result(f) === 2)
}
"respond" which {
f mustProduce Return(2)
}
}
"flatten" should {
"successes" in {
val f = Future(Future(1))
f.flatten mustProduce Return(1)
}
"shallow failures" in {
val e = new Exception
val f: Future[Future[Int]] = const.exception(e)
f.flatten mustProduce Throw(e)
}
"deep failures" in {
val e = new Exception
val f: Future[Future[Int]] = const.value(const.exception(e))
f.flatten mustProduce Throw(e)
}
"interruption" in {
val f1 = new HandledPromise[Future[Int]]
val f2 = new HandledPromise[Int]
val f = f1.flatten
assert(f1.handled === None)
assert(f2.handled === None)
f.raise(new Exception)
f1.handled match {
case Some(_) =>
case None => assert(false === true)
}
assert(f2.handled === None)
f1() = Return(f2)
f2.handled match {
case Some(_) =>
case None => assert(false === true)
}
}
}
"rescue" should {
val e = new Exception
"successes" which {
val f = Future(1) rescue { case e => Future(2) }
"apply" in {
assert(Await.result(f) === 1)
}
"respond" in {
f mustProduce Return(1)
}
}
"failures" which {
val g = Future[Int](throw e) rescue { case e => Future(2) }
"apply" in {
assert(Await.result(g) === 2)
}
"respond" in {
g mustProduce Return(2)
}
"when the error handler errors" in {
val g = Future[Int](throw e) rescue { case e => throw e; Future(2) }
val actual = intercept[Exception] { Await.result(g) }
assert(actual === e)
}
}
"interruption of the produced future" which {
"before the antecedent Future completes, propagates back to the antecedent" in {
val f1, f2 = new HandledPromise[Int]
val f = f1 rescue { case _ => f2 }
assert(f1.handled === None)
assert(f2.handled === None)
f.raise(new Exception)
f1.handled match {
case Some(_) =>
case None => assert(false === true)
}
assert(f2.handled === None)
f1() = Throw(new Exception)
f2.handled match {
case Some(_) =>
case None => assert(false === true)
}
}
"after the antecedent Future completes, does not propagate back to the antecedent" in {
val f1, f2 = new HandledPromise[Int]
val f = f1 rescue { case _ => f2 }
assert(f1.handled === None)
assert(f2.handled === None)
f1() = Throw(new Exception)
f.raise(new Exception)
assert(f1.handled === None)
f2.handled match {
case Some(_) =>
case None => assert(false === true)
}
}
}
}
"foreach" in {
var wasCalledWith: Option[Int] = None
val f = Future(1)
f foreach { i =>
wasCalledWith = Some(i)
}
assert(wasCalledWith === Some(1))
}
"respond" should {
"when the result has arrived" in {
var wasCalledWith: Option[Int] = None
val f = Future(1)
f respond {
case Return(i) => wasCalledWith = Some(i)
case Throw(e) => fail(e.toString)
}
assert(wasCalledWith === Some(1))
}
"when the result has not yet arrived it buffers computations" in {
var wasCalledWith: Option[Int] = None
val f = new Promise[Int]
f foreach { i =>
wasCalledWith = Some(i)
}
assert(wasCalledWith === None)
f()= Return(1)
assert(wasCalledWith === Some(1))
}
"runs callbacks just once and in order" in {
var i,j,k,h = 0
val p = new Promise[Int]
p ensure {
i = i+j+k+h+1
} ensure {
j = i+j+k+h+1
} ensure {
k = i+j+k+h+1
} ensure {
h = i+j+k+h+1
}
assert(i === 0)
assert(j === 0)
assert(k === 0)
assert(h === 0)
p.setValue(1)
assert(i === 1)
assert(j === 2)
assert(k === 4)
assert(h === 8)
}
"monitor exceptions" in {
val m = new Monitor {
var handled = null: Throwable
def handle(exc: Throwable) = {
handled = exc
true
}
}
val exc = new Exception
val p = new Promise[Int]
m {
p ensure { throw exc }
}
assert(m.handled === null)
p.update(Return(1))
assert(m.handled === exc)
}
}
"willEqual" in {
assert(Await.result((const.value(1) willEqual(const.value(1))), 1.second) === true)
}
"Future() handles exceptions" in {
val e = new Exception
val f = Future[Int] { throw e }
val actual = intercept[Exception] { Await.result(f) }
assert(actual === e)
}
"propagate locals" in {
val local = new Local[Int]
val promise0 = new Promise[Unit]
val promise1 = new Promise[Unit]
local() = 1010
val both = promise0 flatMap { _ =>
val local0 = local()
promise1 map { _ =>
val local1 = local()
(local0, local1)
}
}
local() = 123
promise0() = Return.Unit
local() = 321
promise1() = Return.Unit
assert(both.isDefined === true)
assert(Await.result(both) === (Some(1010), Some(1010)))
}
"propagate locals across threads" in {
val local = new Local[Int]
val promise = new Promise[Option[Int]]
local() = 123
val done = promise map { otherValue => (otherValue, local()) }
val t = new Thread {
override def run() {
local() = 1010
promise() = Return(local())
}
}
t.run()
t.join()
assert(done.isDefined === true)
assert(Await.result(done) === (Some(1010), Some(123)))
}
"poll" should {
trait PollHelper {
val p = new Promise[Int]
}
"when waiting" in {
new PollHelper {
assert(p.poll === None)
}
}
"when succeeding" in {
new PollHelper {
p.setValue(1)
assert(p.poll === Some(Return(1)))
}
}
"when failing" in {
new PollHelper {
val e = new Exception
p.setException(e)
assert(p.poll === Some(Throw(e)))
}
}
}
"within" should {
"when we run out of time" in {
implicit val timer = new JavaTimer
val p = new HandledPromise[Int]
intercept[TimeoutException] { Await.result(p.within(50.milliseconds)) }
timer.stop()
assert(p.handled === None)
}
"when everything is chill" in {
implicit val timer = new JavaTimer
val p = new Promise[Int]
p.setValue(1)
assert(Await.result(p.within(50.milliseconds)) === 1)
timer.stop()
}
"when timeout is forever" in {
// We manage to throw an exception inside
// the scala compiler if we use MockTimer
// here. Sigh.
implicit val timer = new Timer {
def schedule(when: Time)(f: => Unit): TimerTask =
throw new Exception("schedule called")
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask =
throw new Exception("schedule called")
def stop() = ()
}
val p = new Promise[Int]
assert(p.within(Duration.Top) === p)
}
"when future already satisfied" in {
implicit val timer = new NullTimer
val p = new Promise[Int]
p.setValue(3)
assert(p.within(1.minute) === p)
}
"interruption" in Time.withCurrentTimeFrozen { tc =>
implicit val timer = new MockTimer
val p = new HandledPromise[Int]
val f = p.within(50.milliseconds)
assert(p.handled === None)
f.raise(new Exception)
p.handled match {
case Some(_) =>
case None => assert(false === true)
}
}
}
"raiseWithin" should {
"when we run out of time" in {
implicit val timer = new JavaTimer
val p = new HandledPromise[Int]
intercept[TimeoutException] {
Await.result(p.raiseWithin(50.milliseconds))
}
timer.stop()
p.handled match {
case Some(_) =>
case None => assert(false === true)
}
}
"when we run out of time, throw our stuff" in {
implicit val timer = new JavaTimer
class SkyFallException extends Exception("let the skyfall")
val skyFall = new SkyFallException
val p = new HandledPromise[Int]
intercept[SkyFallException] {
Await.result(p.raiseWithin(50.milliseconds, skyFall))
}
timer.stop()
p.handled match {
case Some(_) =>
case None => assert(false === true)
}
assert(p.handled === Some(skyFall))
}
"when we are within timeout, but inner throws TimeoutException, we don't raise" in {
implicit val timer = new JavaTimer
class SkyFallException extends Exception("let the skyfall")
val skyFall = new SkyFallException
val p = new HandledPromise[Int]
intercept[TimeoutException] {
Await.result(
p.within(20.milliseconds).raiseWithin(50.milliseconds, skyFall)
)
}
timer.stop()
assert(p.handled === None)
}
"when everything is chill" in {
implicit val timer = new JavaTimer
val p = new Promise[Int]
p.setValue(1)
assert(Await.result(p.raiseWithin(50.milliseconds)) === 1)
timer.stop()
}
"when timeout is forever" in {
// We manage to throw an exception inside
// the scala compiler if we use MockTimer
// here. Sigh.
implicit val timer = new Timer {
def schedule(when: Time)(f: => Unit): TimerTask =
throw new Exception("schedule called")
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask =
throw new Exception("schedule called")
def stop() = ()
}
val p = new Promise[Int]
assert(p.raiseWithin(Duration.Top) === p)
}
"when future already satisfied" in {
implicit val timer = new NullTimer
val p = new Promise[Int]
p.setValue(3)
assert(p.raiseWithin(1.minute) === p)
}
"interruption" in Time.withCurrentTimeFrozen { tc =>
implicit val timer = new MockTimer
val p = new HandledPromise[Int]
val f = p.raiseWithin(50.milliseconds)
assert(p.handled === None)
f.raise(new Exception)
p.handled match {
case Some(_) =>
case None => assert(false === true)
}
}
}
"masked" should {
"do unconditional interruption" in {
val p = new HandledPromise[Unit]
val f = p.masked
f.raise(new Exception())
assert(p.handled === None)
}
"do conditional interruption" in {
val p = new HandledPromise[Unit]
val f1 = p.mask {
case _: TimeoutException => true
}
val f2 = p.mask {
case _: TimeoutException => true
}
f1.raise(new TimeoutException("bang!"))
assert(p.handled === None)
f2.raise(new Exception())
assert(p.handled.isDefined)
}
}
"liftToTry" should {
"success" in {
val p = Future.value(3)
assert(Await.result(p.liftToTry) === Return(3))
}
"failure" in {
val ex = new Exception()
val p = Future.exception(ex)
assert(Await.result(p.liftToTry) === Throw(ex))
}
"propagates interrupt" in {
val p = new HandledPromise[Unit]
p.liftToTry.raise(new Exception())
assert(p.handled.isDefined)
}
}
}
"FutureTask (%s)".format(name) should {
"return result" in {
val task = new FutureTask("hello")
task.run()
assert(Await.result(task) === "hello")
}
"throw result" in {
val task = new FutureTask[String](throw new IllegalStateException)
task.run()
intercept[IllegalStateException] {
Await.result(task)
}
}
}
}
test("ConstFuture", new MkConst { def apply[A](r: Try[A]) = Future.const(r) })
test("Promise", new MkConst { def apply[A](r: Try[A]) = new Promise(r) })
"Future.apply" should {
"fail on NLRC" in {
def ok(): String = {
val f = Future(return "OK")
val t = intercept[FutureNonLocalReturnControl] {
f.poll.get.get
}
val nlrc = intercept[NonLocalReturnControl[String]] {
throw t.getCause
}
assert(nlrc.value === "OK")
"NOK"
}
assert(ok() === "NOK")
}
}
"Future.None" should {
"always be defined" in {
assert(Future.None.isDefined === true)
}
"but still None" in {
assert(Await.result(Future.None) === None)
}
}
"Future.True" should {
"always be defined" in {
assert(Future.True.isDefined === true)
}
"but still True" in {
assert(Await.result(Future.True) === true)
}
}
"Future.False" should {
"always be defined" in {
assert(Future.False.isDefined === true)
}
"but still False" in {
assert(Await.result(Future.False) === false)
}
}
"Future.never" should {
"must be undefined" in {
assert(Future.never.isDefined === false)
assert(Future.never.poll === None)
}
"always time out" in {
intercept[TimeoutException] { Await.ready(Future.never, 0.milliseconds) }
}
}
"Future.sleep" should {
"Satisfy after the given amount of time" in Time.withCurrentTimeFrozen { tc =>
implicit val timer = new MockTimer
val f = Future.sleep(10.seconds)
assert(!f.isDefined)
tc.advance(5.seconds)
timer.tick()
assert(!f.isDefined)
tc.advance(5.seconds)
timer.tick()
assert(f.isDefined)
Await.result(f)
}
"Be interruptible" in {
implicit val timer = new MockTimer
// sleep forever and grab the task that's created
val f = Future.sleep(Duration.Top)(timer)
val task = timer.tasks(0)
// then raise a known exception
val e = new Exception("expected")
f.raise(e)
// we were immediately satisfied with the exception and the task was canceled
f mustProduce Throw(e)
assert(task.isCancelled)
}
"Return Future.Done for durations <= 0" in {
implicit val timer = new MockTimer
assert(Future.sleep(Duration.Zero) eq Future.Done)
assert(Future.sleep((-10).seconds) eq Future.Done)
assert(timer.tasks.isEmpty)
}
}
"Future.select" should {
import Arbitrary.arbitrary
val genLen = Gen.choose(1, 10)
"return the first result" which {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = ((0 until n) map(_ => new Promise[Int])).toList
assert(ps.map(_.waitqLength).sum === 0)
val f = Future.select(ps)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
ps(i).update(t)
assert(f.isDefined)
val (ft, fps) = Await.result(f)
assert(ft === t)
assert(fps.toSet === (ps.toSet - ps(i)))
}
}
"not accumulate listeners when losing or" in {
val p = new Promise[Unit]
val q = new Promise[Unit]
(p or q)
assert(p.waitqLength === 1)
q.setDone()
assert(p.waitqLength === 0)
}
"not accumulate listeners when losing select" in {
val p = new Promise[Unit]
val q = new Promise[Unit]
val f = Future.select(Seq(p, q))
assert(p.waitqLength === 1)
q.setDone()
assert(p.waitqLength === 0)
}
"not accumulate listeners if not selected" in {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = ((0 until n) map(_ => new Promise[Int])).toList
assert(ps.map(_.waitqLength).sum === 0)
val f = Future.select(ps)
assert(ps.map(_.waitqLength).sum === n)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
f respond { _ => () }
assert(ps.map(_.waitqLength).sum === n)
ps(i).update(t)
assert(ps.map(_.waitqLength).sum === 0)
}
}
"fail if we attempt to select an empty future sequence" in {
val f = Future.select(Nil)
assert(f.isDefined)
val e = new IllegalArgumentException("empty future list")
val actual = intercept[IllegalArgumentException] { Await.result(f) }
assert(actual.getMessage === e.getMessage)
}
"propagate interrupts" in {
val fs = (0 until 10).map(_ => new HandledPromise[Int])
Future.select(fs).raise(new Exception)
assert(fs.forall(_.handled.isDefined))
}
}
// These tests are almost a carbon copy of the "Future.select" tests, they
// should evolve in-sync.
"Future.selectIndex" should {
import Arbitrary.arbitrary
val genLen = Gen.choose(1, 10)
"return the first result" which {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = ((0 until n) map(_ => new Promise[Int])).toIndexedSeq
assert(ps.map(_.waitqLength).sum === 0)
val f = Future.selectIndex(ps)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
ps(i).update(t)
assert(f.isDefined)
assert(Await.result(f) === i)
}
}
"not accumulate listeners when losing select" in {
val p = new Promise[Unit]
val q = new Promise[Unit]
val f = Future.selectIndex(IndexedSeq(p, q))
assert(p.waitqLength === 1)
q.setDone()
assert(p.waitqLength === 0)
}
"not accumulate listeners if not selected" in {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = ((0 until n) map(_ => new Promise[Int])).toIndexedSeq
assert(ps.map(_.waitqLength).sum === 0)
val f = Future.selectIndex(ps)
assert(ps.map(_.waitqLength).sum === n)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
f respond { _ => () }
assert(ps.map(_.waitqLength).sum === n)
ps(i).update(t)
assert(ps.map(_.waitqLength).sum === 0)
}
}
"fail if we attempt to select an empty future sequence" in {
val f = Future.selectIndex(IndexedSeq.empty)
assert(f.isDefined)
val e = new IllegalArgumentException("empty future list")
val actual = intercept[IllegalArgumentException] { Await.result(f) }
assert(actual.getMessage === e.getMessage)
}
"propagate interrupts" in {
val fs = (0 until 10).map(_ => new HandledPromise[Int])
Future.selectIndex(fs).raise(new Exception)
assert(fs.forall(_.handled.isDefined))
}
}
// TODO(John Sirois): Kill this mvn test hack when pants takes over.
"Java" should {
"work" in {
val test = new FutureCompilationTest()
test.testFutureCastMap()
test.testFutureCastFlatMap()
test.testTransformedBy()
assert(true === true)
}
}
}
| n054/util | util-core/src/test/scala/com/twitter/util/FutureTest.scala | Scala | apache-2.0 | 50,236 |
package im.actor.server.api.http.json
sealed trait Content
case class Text(text: String) extends Content
case class Image(imageUrl: String) extends Content
case class Document(documentUrl: String) extends Content
case class Group(title: String, avatars: Option[AvatarUrls])
case class User(name: String, avatars: Option[AvatarUrls])
case class GroupInviteInfo(group: Group, inviter: User)
case class AvatarUrls(small: Option[String], large: Option[String], full: Option[String])
case class Errors(message: String)
case class ReverseHook(url: String)
case class Status(status: String)
case class ReverseHookResponse(id: Int, url: Option[String]) | chenbk85/actor-platform | actor-server/actor-http-api/src/main/scala/im/actor/server/api/http/json/models.scala | Scala | mit | 649 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import org.scalatest.{FlatSpec, Matchers}
class UtilsSpec extends FlatSpec with Matchers {
"getNamedModules" should "work properly" in {
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric.NumericFloat
val model = Sequential().setName("model")
.add(Identity().setName("id"))
.add(ReLU().setName("relu"))
val namedMudules = Utils.getNamedModules(model)
namedMudules("model").isInstanceOf[Sequential[Float]] should be (true)
namedMudules("id").isInstanceOf[Identity[Float]] should be (true)
namedMudules("relu").isInstanceOf[ReLU[Float]] should be (true)
}
"isLayerwised" should "work properly" in {
val model = Sequential[Double]().add(Identity()).add(ReLU())
Utils.isLayerwiseScaled(model) should be (false)
model.setScaleB(2.0)
Utils.isLayerwiseScaled(model) should be (true)
val model2 = Sequential[Double]().add(SpatialConvolution[Double](2, 2, 2, 2)
.setScaleW(3.0)).add(ReLU())
Utils.isLayerwiseScaled(model2) should be (true)
val model3 = Sequential[Double]().add(SpatialConvolution[Double](2, 2, 2, 2)
.setScaleB(2.0)).add(ReLU())
Utils.isLayerwiseScaled(model3) should be (true)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/UtilsSpec.scala | Scala | apache-2.0 | 1,861 |
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{Environment, Silhouette}
import com.mohiva.play.silhouette.impl.authenticators.SessionAuthenticator
import forms.FeedbackForm
import models.User
import models.daos.drivers.GitHubAPI
import models.forms.QuickstartForm
import models.services.{QuickstartService, RepositoryService, UserService}
import modules.CustomGitHubProvider
import org.apache.http.HttpStatus
import play.api.Play
import play.api.Play.current
import play.api.i18n.MessagesApi
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* The basic application controller.
*
* @param messagesApi The Play messages API.
* @param env The Silhouette environment.
* @param gitHubProvider The social provider registry.
*/
class ApplicationController @Inject()(
val messagesApi: MessagesApi,
val env: Environment[User, SessionAuthenticator],
gitHubProvider: CustomGitHubProvider,
repoService: RepositoryService,
userService: UserService,
gitHub: GitHubAPI,
quickstartService: QuickstartService)
extends Silhouette[User, SessionAuthenticator] {
/**
* Handles the main action.
*
* @return The result to display.
*/
def index = UserAwareAction.async { implicit request =>
val repoCount = Play.configuration.getInt("gitrank.homeReposSuggestions").getOrElse(0)
request.identity match {
case Some(user) => userService.getOAuthInfo(user).flatMap({
case Some(oauthInfo) => userService.getScoredRepositoriesNames(user).flatMap(filter =>
gitHub.getUserStaredRepositories(repoCount, user, oauthInfo, filter).map(gitHubRepos =>
Ok(views.html.home(gitHubProvider, request.identity, gitHubRepos))
)
)
case None => throw new Error("User spotted without OAuth Credentials: " + user.username)
})
case None => gitHub.getMostStaredRepositories(repoCount).map(gitHubRepos =>
Ok(views.html.home(gitHubProvider, request.identity, gitHubRepos))
)
}
}
/**
* Handles the repository view
*
* @param owner Owner of the repository on the repo system (GitHub)
* @param repositoryName repository name on the repo system. (GitHub)
* @return The html page of the repository
*/
def gitHubRepository(owner: String,
repositoryName: String,
feedbackPage: Option[Int] = None,
quickstartPage: Option[Int] = None
) = UserAwareAction.async { implicit request =>
if (feedbackPage.getOrElse(1) <= 0 || quickstartPage.getOrElse(1) <= 0) {
Future.successful(NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the page, unfortunately negative pages have not been invented!"))
)
} else {
val repoName: String = owner + "/" + repositoryName
repoService.getFromNeoOrGitHub(request.identity, repoName).flatMap({
case Some(repository) =>
val futureFeedback = repoService.getFeedback(repoName, feedbackPage)
val futureQuickstart = quickstartService.getQuickstartGuidesForRepo(repository, quickstartPage)
val futureFeedbackPageCount = repoService.getFeedbackPageCount(repoName)
val futureQuickstartPageCount = quickstartService.getQuickstartPageCount(repoName)
futureFeedback.flatMap(feedback =>
futureFeedbackPageCount.flatMap(totalFeedbackPages =>
futureQuickstart.flatMap(quickstart =>
futureQuickstartPageCount.flatMap(totalQuickstartPages =>
if (totalFeedbackPages == 0 || feedbackPage.getOrElse(1) <= totalFeedbackPages ||
totalQuickstartPages == 0 || quickstartPage.getOrElse(1) <= totalQuickstartPages
) {
repoService.canAddFeedback(repoName, request.identity).flatMap({
case true => repoService.canUpdateFeedback(repoName, request.identity).map(
canUpdate => Ok(views.html.repository(
gitHubProvider,
request.identity,
repository,
feedback,
quickstart,
totalFeedbackPages,
totalQuickstartPages,
true,
canUpdate
)(owner, repositoryName, feedbackPage.getOrElse(1), quickstartPage.getOrElse(1))))
case false => Future.successful(Ok(views.html.repository(
gitHubProvider,
request.identity,
repository,
feedback,
quickstart,
totalFeedbackPages,
totalQuickstartPages
)(owner, repositoryName, feedbackPage.getOrElse(1), quickstartPage.getOrElse(1))))
})
} else {
Future.successful(NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"The requested page does not exist")))
}
)
)
)
)
case None => Future.successful(NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the repository page, it is likely that you misspelled it, try something else!")))
})
}
}
/**
* Handles the feedback page
*
* @param owner Owner of the repository on the repo system (GitHub)
* @param repositoryName repository name on the repo system (GitHub)
* @return the hml page with the scoring form for the given repository.
*/
def giveFeedbackPage(owner: String, repositoryName: String) = UserAwareAction.async { implicit request =>
val repoName: String = owner + "/" + repositoryName
repoService.getFromNeoOrGitHub(request.identity, repoName).flatMap({
case Some(repository) =>
request.identity match {
case Some(id) => repoService.canAddFeedback(repoName, request.identity).flatMap{
case canAdd => canAdd match {
case false => Future.successful(Redirect(routes.ApplicationController.gitHubRepository(
owner,
repositoryName,
None,
None
).url))
case true => repoService.canUpdateFeedback(repoName, request.identity).flatMap(canUpdate =>
repoService.getMapScoreFromUser(repoName,request.identity).map(map =>
map.isEmpty match {
case false => Ok(views.html.feedbackForm(gitHubProvider, request.identity)(owner, repositoryName, FeedbackForm.form.bind(map), canUpdate))
case true => Ok(views.html.feedbackForm(gitHubProvider, request.identity)(owner, repositoryName, FeedbackForm.form, canUpdate))
}
))
}
}
case None => Future.successful(Ok(views.html.feedbackForm(gitHubProvider, request.identity)(owner, repositoryName, FeedbackForm.form, false)))
}
case None => Future.successful(NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the repository feedback page, it is likely that you misspelled it, try something else!")))
})
}
/**
* Handles the feedback score post
*
* @param owner Owner of the repository on the repo system (GitHub)
* @param repositoryName repository name on the repo system (GitHub)
* @return Redirect to repo page
*/
def postScore(owner: String, repositoryName: String, update: Option[Boolean]) = SecuredAction.async { implicit request =>
FeedbackForm.form.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.feedbackForm(gitHubProvider, Some(request.identity))
(owner, repositoryName, formWithErrors, update.getOrElse(false)))),
data => repoService.giveScoreToRepo(owner,
request.identity,
repositoryName,
data.scoreDocumentation,
data.scoreMaturity,
data.scoreDesign,
data.scoreSupport,
data.feedback
).map(repo => Redirect(routes.ApplicationController.gitHubRepository(
owner,
repositoryName,
None,
None
).url))
)
}
/**
* Handles the quickstart guide post
*
* @param owner Owner of the repository on the repo system (GitHub)
* @param repositoryName repository name on the repo system (GitHub)
* @return Redirect to repo page
*/
def postQuickstartGuide(owner: String, repositoryName: String) = SecuredAction.async { implicit request =>
QuickstartForm.form.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.quickstartGuide(gitHubProvider, Some(request.identity))
(owner, repositoryName, formWithErrors)))
,
data => quickstartService.createQuickstart(
request.identity,
owner + "/" + repositoryName,
data.title,
data.description,
QuickstartForm.validateUrl(data.url)
).map(q => Redirect(routes.ApplicationController.gitHubRepository(
owner,
repositoryName,
None,
None
).url))
)
}
/**
* Handles the feedback page
*
* @param owner Owner of the repository on the repo system (GitHub)
* @param repositoryName repository name on the repo system (GitHub)
* @return the hml page with the scoring form for the given repository.
*/
def createGuidePage(owner: String, repositoryName: String) = UserAwareAction.async { implicit request =>
val repoName: String = owner + "/" + repositoryName
repoService.getFromNeoOrGitHub(request.identity, repoName).map({
case Some(repository) =>
Ok(views.html.quickstartGuide(gitHubProvider, request.identity)(owner, repositoryName, QuickstartForm.form))
case None => NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the repository feedback page, it is likely that you misspelled it, try something else!"))
})
}
/**
* Service for upvoting a guide
*
* @param owner Owner of the repository on the repo system (GitHub)
* @param repositoryName repository name on the repo system (GitHub)
* @param id id of the guide
* @param voteType if the vote is upvote or downvote
* @return the guide
*/
def upVote(owner: String, repositoryName: String, id: Int, voteType: String) = SecuredAction.async { implicit request =>
val repoName: String = owner + "/" + repositoryName
repoService.getFromNeoOrGitHub(Some(request.identity), repoName).flatMap({
case Some(repository) =>
voteType match {
case "upvote" => quickstartService.updateVote(repository, true, id, request.identity)
.map({
case Some(guide) => Redirect(routes.ApplicationController.gitHubRepository(
owner,
repositoryName,
None,
None
).url)
case None => NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the guide."))
})
case "downvote" => quickstartService.updateVote(repository, false, id, request.identity)
.map({
case Some(guide) => Redirect(routes.ApplicationController.gitHubRepository(
owner,
repositoryName,
None,
None
).url)
case None =>
NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the guide, it is likely that you misspelled it, try something else!"))
})
case _ => Future.successful(
MethodNotAllowed(views.html.error("methodNotAllowed", HttpStatus.SC_METHOD_NOT_ALLOWED, "Not Allowed",
"Trying some funny stuff, the incident will be reported"))
)
}
case None => Future.successful(NotFound(views.html.error("notFound", HttpStatus.SC_NOT_FOUND, "Not Found",
"We cannot find the repository feedback page, it is likely that you misspelled it, try something else!")))
})
}
}
| gitlinks/gitrank-web | app/controllers/ApplicationController.scala | Scala | apache-2.0 | 12,665 |
package org.bitcoins.core.util.testprotocol
import org.bitcoins.core.crypto.{ ECPrivateKey, Sha256Hash160Digest }
import org.bitcoins.core.protocol.Address
import org.bitcoins.core.util.BitcoinSLogger
import spray.json._
/**
* Created by tom on 6/14/16.
*/
object Base58ValidTestCaseProtocol extends DefaultJsonProtocol {
import ConfigParamsProtocol._
implicit object Base58ValidTestCaseFormatter extends RootJsonFormat[Base58ValidTestCase] {
override def read(value: JsValue): Base58ValidTestCase = {
val jsArray: JsArray = value match {
case array: JsArray => array
case _: JsValue => throw new RuntimeException("Core test case must be in the format of js array")
}
val elements: Vector[JsValue] = jsArray.elements
val configParams: ConfigParams = elements(2).convertTo[ConfigParams]
def addressOrPrivateKey(elements: Vector[JsValue]): Either[Address, String] = configParams.isPrivKey match {
case false => Left(Address(elements(0).convertTo[String]).get)
case true => Right(elements(0).convertTo[String])
}
def isHashOrPrivKey(elements: Vector[JsValue]): Either[Sha256Hash160Digest, ECPrivateKey] = configParams.addrTypeOrIsCompressed match {
case a if a.isLeft => Left(Sha256Hash160Digest(elements(1).convertTo[String]))
case b if b.isRight => Right(ECPrivateKey(elements(1).convertTo[String]))
}
Base58ValidTestCaseImpl(addressOrPrivateKey(elements), isHashOrPrivKey(elements), configParams)
}
override def write(testCase: Base58ValidTestCase): JsValue = ???
}
}
| Christewart/bitcoin-s-core | src/test/scala/org/bitcoins/core/util/testprotocol/Base58ValidTestCaseProtocol.scala | Scala | mit | 1,595 |
import api.Api
import core.BootedCore
import core.CorePlumbing
import web.Web
object taximeter extends App with BootedCore with CorePlumbing with Api with Web | opyate/taximeter | src/main/scala/RestApp.scala | Scala | mit | 159 |
package pmg.tutorials
import org.scalatest._
import akka.actor.ActorSystem
import akka.util.Timeout
import akka.testkit.TestActorRef
import scala.concurrent.duration._
import scala.concurrent.Await
import akka.pattern.ask
import scala.language.postfixOps
import scala.util.{Success => ScalaSuccess}
class SolvingActorSpec extends FlatSpec with Matchers {
"A SolvingActor" should "return a Success message when the solution matches the input" in {
implicit val system = ActorSystem("BruteForceSystem")
implicit val timeout = Timeout(5 seconds)
val actorRef = TestActorRef(new SolvingActor("password.txt"))
// Lets send a simlulated "Solution" to the actor
val future = actorRef ? Solution("wtf")
// Now we retreive the response similar to the "receive" method in the actor
val ScalaSuccess(Success(solution: String)) = future.value.get
// Finally we make the assertion
solution should be("wtf")
system.shutdown
}
it should "return a Failure message when the solution does not match the input" in {
implicit val system = ActorSystem("BruteForceSystem")
implicit val timeout = Timeout(5 seconds)
val actorRef = TestActorRef(new SolvingActor("password.txt"))
val future = actorRef ? Solution("isThisRight")
future.value.get should be(ScalaSuccess(Failure))
system.shutdown
}
}
| mcross1882/BruteForceAttack | src/test/scala/pmg/tutorials/SolvingActorSpec.scala | Scala | apache-2.0 | 1,488 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this opt except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers.thisLineNumber
import Matchers._
class ShouldBeReadableStructuralLogicalAndSpec extends FunSpec {
val fileName: String = "ShouldBeReadableStructuralLogicalAndSpec.scala"
def wasEqualTo(left: Any, right: Any): String =
FailureMessages("wasEqualTo", left, right)
def wasNotEqualTo(left: Any, right: Any): String =
FailureMessages("wasNotEqualTo", left, right)
def equaled(left: Any, right: Any): String =
FailureMessages("equaled", left, right)
def didNotEqual(left: Any, right: Any): String =
FailureMessages("didNotEqual", left, right)
def wasNotReadable(left: Any): String =
FailureMessages("wasNotReadable", left)
def wasReadable(left: Any): String =
FailureMessages("wasReadable", left)
describe("readable matcher") {
describe("when work with arbitrary object with isReadable() method") {
class MyReadability(value: Boolean) {
def isReadable(): Boolean = value
override def toString = "readability"
}
val objTrue = new MyReadability(true)
val objFalse = new MyReadability(false)
it("should do nothing for when both passed") {
objTrue should (be (readable) and equal (objTrue))
objTrue should (equal (objTrue) and be (readable))
objTrue should (be (readable) and be_== (objTrue))
objTrue should (be_== (objTrue) and be (readable))
}
it("should throw correct TFE when first check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (readable) and equal (objFalse))
}
assert(caught1.message === Some(wasNotReadable(objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objTrue should (equal (objFalse) and be (readable))
}
assert(caught2.message === Some(didNotEqual(objTrue, objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (readable) and be_== (objFalse))
}
assert(caught3.message === Some(wasNotReadable(objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objTrue should (be_== (objFalse) and be (readable))
}
assert(caught4.message === Some(wasNotEqualTo(objTrue, objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
it("should throw correct TFE when second check failed") {
val caught1 = intercept[TestFailedException] {
objTrue should (be (readable) and equal (objFalse))
}
assert(caught1.message === Some(wasReadable(objTrue) + ", but " + didNotEqual(objTrue, objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objFalse) and be (readable))
}
assert(caught2.message === Some(equaled(objFalse, objFalse) + ", but " + wasNotReadable(objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objTrue should (be (readable) and be_== (objFalse))
}
assert(caught3.message === Some(wasReadable(objTrue) + ", but " + wasNotEqualTo(objTrue, objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be_== (objFalse) and be (readable))
}
assert(caught4.message === Some(wasEqualTo(objFalse, objFalse) + ", but " + wasNotReadable(objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
it("should throw correct TFE when both check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (readable) and equal (objTrue))
}
assert(caught1.message === Some(wasNotReadable(objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objTrue) and be (readable))
}
assert(caught2.message === Some(didNotEqual(objFalse, objTrue)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (readable) and be_== (objTrue))
}
assert(caught3.message === Some(wasNotReadable(objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be_== (objTrue) and be (readable))
}
assert(caught4.message === Some(wasNotEqualTo(objFalse, objTrue)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
describe("when work with arbitrary object with isReadable method") {
class MyReadability(value: Boolean) {
def isReadable: Boolean = value
override def toString = "readability"
}
val objTrue = new MyReadability(true)
val objFalse = new MyReadability(false)
it("should do nothing for when both passed") {
objTrue should (be (readable) and equal (objTrue))
objTrue should (equal (objTrue) and be (readable))
objTrue should (be (readable) and be_== (objTrue))
objTrue should (be_== (objTrue) and be (readable))
}
it("should throw correct TFE when first check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (readable) and equal (objFalse))
}
assert(caught1.message === Some(wasNotReadable(objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objTrue should (equal (objFalse) and be (readable))
}
assert(caught2.message === Some(didNotEqual(objTrue, objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (readable) and be_== (objFalse))
}
assert(caught3.message === Some(wasNotReadable(objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objTrue should (be_== (objFalse) and be (readable))
}
assert(caught4.message === Some(wasNotEqualTo(objTrue, objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
it("should throw correct TFE when second check failed") {
val caught1 = intercept[TestFailedException] {
objTrue should (be (readable) and equal (objFalse))
}
assert(caught1.message === Some(wasReadable(objTrue) + ", but " + didNotEqual(objTrue, objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objFalse) and be (readable))
}
assert(caught2.message === Some(equaled(objFalse, objFalse) + ", but " + wasNotReadable(objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objTrue should (be (readable) and be_== (objFalse))
}
assert(caught3.message === Some(wasReadable(objTrue) + ", but " + wasNotEqualTo(objTrue, objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be_== (objFalse) and be (readable))
}
assert(caught4.message === Some(wasEqualTo(objFalse, objFalse) + ", but " + wasNotReadable(objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
it("should throw correct TFE when both check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (readable) and equal (objTrue))
}
assert(caught1.message === Some(wasNotReadable(objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objTrue) and be (readable))
}
assert(caught2.message === Some(didNotEqual(objFalse, objTrue)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (readable) and be_== (objTrue))
}
assert(caught3.message === Some(wasNotReadable(objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be_== (objTrue) and be (readable))
}
assert(caught4.message === Some(wasNotEqualTo(objFalse, objTrue)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
describe("when work with arbitrary object with isReadable val") {
class MyReadability(value: Boolean) {
val isReadable: Boolean = value
override def toString = "readability"
}
val objTrue = new MyReadability(true)
val objFalse = new MyReadability(false)
it("should do nothing for when both passed") {
objTrue should (be (readable) and equal (objTrue))
objTrue should (equal (objTrue) and be (readable))
objTrue should (be (readable) and be_== (objTrue))
objTrue should (be_== (objTrue) and be (readable))
}
it("should throw correct TFE when first check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (readable) and equal (objFalse))
}
assert(caught1.message === Some(wasNotReadable(objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objTrue should (equal (objFalse) and be (readable))
}
assert(caught2.message === Some(didNotEqual(objTrue, objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (readable) and be_== (objFalse))
}
assert(caught3.message === Some(wasNotReadable(objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objTrue should (be_== (objFalse) and be (readable))
}
assert(caught4.message === Some(wasNotEqualTo(objTrue, objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
it("should throw correct TFE when second check failed") {
val caught1 = intercept[TestFailedException] {
objTrue should (be (readable) and equal (objFalse))
}
assert(caught1.message === Some(wasReadable(objTrue) + ", but " + didNotEqual(objTrue, objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objFalse) and be (readable))
}
assert(caught2.message === Some(equaled(objFalse, objFalse) + ", but " + wasNotReadable(objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objTrue should (be (readable) and be_== (objFalse))
}
assert(caught3.message === Some(wasReadable(objTrue) + ", but " + wasNotEqualTo(objTrue, objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be_== (objFalse) and be (readable))
}
assert(caught4.message === Some(wasEqualTo(objFalse, objFalse) + ", but " + wasNotReadable(objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
it("should throw correct TFE when both check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (readable) and equal (objTrue))
}
assert(caught1.message === Some(wasNotReadable(objFalse)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objTrue) and be (readable))
}
assert(caught2.message === Some(didNotEqual(objFalse, objTrue)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (readable) and be_== (objTrue))
}
assert(caught3.message === Some(wasNotReadable(objFalse)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be_== (objTrue) and be (readable))
}
assert(caught4.message === Some(wasNotEqualTo(objFalse, objTrue)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
} | travisbrown/scalatest | src/test/scala/org/scalatest/ShouldBeReadableStructuralLogicalAndSpec.scala | Scala | apache-2.0 | 16,738 |
package io.github.benfradet
//Example https://benfradet.github.io/blog/2015/12/16/Exploring-spark.ml-with-the-Titanic-Kaggle-competition
import org.apache.log4j.{Logger, Level}
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.feature.{IndexToString, VectorAssembler, StringIndexer}
import org.apache.spark.ml.tuning.{CrossValidator, ParamGridBuilder}
import org.apache.spark.sql.{SparkSession, DataFrame, Row}
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
object Titanic {
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.WARN)
if (args.length < 3) {
System.err.println("Usage: Titanic <train file> <test file> <output file>")
System.exit(1)
}
val spark = SparkSession
.builder()
.appName("Titanic")
.getOrCreate()
import spark.implicits._
val (dataDFRaw, predictDFRaw) = loadData(args(0), args(1), spark)
val (dataDFExtra, predictDFExtra) = createExtraFeatures(dataDFRaw, predictDFRaw)
val (dataDFCompleted, predictDFCompleted) = fillNAValues(dataDFExtra, predictDFExtra)
val numericFeatColNames = Seq("Age", "SibSp", "Parch", "Fare", "FamilySize")
val categoricalFeatColNames = Seq("Pclass", "Sex", "Embarked", "Title")
val idxdCategoricalFeatColName = categoricalFeatColNames.map(_ + "Indexed")
val allFeatColNames = numericFeatColNames ++ categoricalFeatColNames
val allIdxdFeatColNames = numericFeatColNames ++ idxdCategoricalFeatColName
val labelColName = "SurvivedString"
val featColName = "Features"
val idColName = "PassengerId"
val allPredictColNames = allFeatColNames ++ Seq(idColName)
val dataDFFiltered = dataDFCompleted.select(labelColName, allPredictColNames: _*)
val predictDFFiltered = predictDFCompleted.select(labelColName, allPredictColNames: _*)
val allData = dataDFFiltered.union(predictDFFiltered)
allData.cache()
val stringIndexers = categoricalFeatColNames.map { colName =>
new StringIndexer()
.setInputCol(colName)
.setOutputCol(colName + "Indexed")
.fit(allData)
}
val idxdLabelColName = "SurvivedIndexed"
// index classes
val labelIndexer = new StringIndexer()
.setInputCol(labelColName)
.setOutputCol(idxdLabelColName)
.fit(allData)
// vector assembler
val assembler = new VectorAssembler()
.setInputCols(Array(allIdxdFeatColNames: _*))
.setOutputCol(featColName)
val randomForest = new RandomForestClassifier()
.setLabelCol(idxdLabelColName)
.setFeaturesCol(featColName)
val labelConverter = new IndexToString()
.setInputCol("prediction")
.setOutputCol("predictedLabel")
.setLabels(labelIndexer.labels)
// define the order of the operations to be performed
val pipeline = new Pipeline().setStages(
(stringIndexers :+ labelIndexer :+ assembler :+ randomForest :+ labelConverter).toArray)
// grid of values to perform cross validation on
val paramGrid = new ParamGridBuilder()
.addGrid(randomForest.maxBins, Array(25, 28, 31))
.addGrid(randomForest.maxDepth, Array(4, 6, 8))
.addGrid(randomForest.impurity, Array("entropy", "gini"))
.build()
val evaluator = new BinaryClassificationEvaluator()
.setLabelCol(idxdLabelColName)
val cv = new CrossValidator()
.setEstimator(pipeline)
.setEvaluator(evaluator)
.setEstimatorParamMaps(paramGrid)
.setNumFolds(10)
// train the model
val crossValidatorModel = cv.fit(dataDFFiltered)
// make predictions
val predictions = crossValidatorModel.transform(predictDFFiltered)
predictions
.withColumn("Survived", col("predictedLabel"))
.select("PassengerId", "Survived")
.coalesce(1)
.write
.format("csv")
.option("header", "true")
.save(args(2))
}
def fillNAValues(trainDF: DataFrame, testDF: DataFrame): (DataFrame, DataFrame) = {
// TODO: train a model on the age column
// fill empty values for the age column
val avgAge = trainDF.select("Age").union(testDF.select("Age"))
.agg(avg("Age"))
.collect() match {
case Array(Row(avg: Double)) => avg
case _ => 0
}
// fill empty values for the fare column
val avgFare = trainDF.select("Fare").union(testDF.select("Fare"))
.agg(avg("Fare"))
.collect() match {
case Array(Row(avg: Double)) => avg
case _ => 0
}
// map to fill na values
val fillNAMap = Map(
"Fare" -> avgFare,
"Age" -> avgAge
)
// udf to fill empty embarked string with S corresponding to Southampton
val embarked: (String => String) = {
case "" => "S"
case a => a
}
val embarkedUDF = udf(embarked)
val newTrainDF = trainDF
.na.fill(fillNAMap)
.withColumn("Embarked", embarkedUDF(col("Embarked")))
val newTestDF = testDF
.na.fill(fillNAMap)
.withColumn("Embarked", embarkedUDF(col("Embarked")))
(newTrainDF, newTestDF)
}
def createExtraFeatures(trainDF: DataFrame, testDF: DataFrame): (DataFrame, DataFrame) = {
// udf to create a FamilySize column as the sum of the SibSp and Parch columns + 1
val familySize: ((Int, Int) => Int) = (sibSp: Int, parCh: Int) => sibSp + parCh + 1
val familySizeUDF = udf(familySize)
// udf to create a Title column extracting the title from the Name column
val Pattern = ".*, (.*?)\\\\..*".r
val titles = Map(
"Mrs" -> "Mrs",
"Lady" -> "Mrs",
"Mme" -> "Mrs",
"Ms" -> "Ms",
"Miss" -> "Miss",
"Mlle" -> "Miss",
"Master" -> "Master",
"Rev" -> "Rev",
"Don" -> "Mr",
"Sir" -> "Sir",
"Dr" -> "Dr",
"Col" -> "Col",
"Capt" -> "Col",
"Major" -> "Col"
)
val title: ((String, String) => String) = {
case (Pattern(t), sex) => titles.get(t) match {
case Some(tt) => tt
case None =>
if (sex == "male") "Mr"
else "Mrs"
}
case _ => "Mr"
}
val titleUDF = udf(title)
val newTrainDF = trainDF
.withColumn("FamilySize", familySizeUDF(col("SibSp"), col("Parch")))
.withColumn("Title", titleUDF(col("Name"), col("Sex")))
.withColumn("SurvivedString", trainDF("Survived").cast(StringType))
val newTestDF = testDF
.withColumn("FamilySize", familySizeUDF(col("SibSp"), col("Parch")))
.withColumn("Title", titleUDF(col("Name"), col("Sex")))
.withColumn("SurvivedString", lit("0").cast(StringType))
(newTrainDF, newTestDF)
}
def loadData(
trainFile: String,
testFile: String,
spark: SparkSession
): (DataFrame, DataFrame) = {
val nullable = true
val schemaArray = Array(
StructField("PassengerId", IntegerType, nullable),
StructField("Survived", IntegerType, nullable),
StructField("Pclass", IntegerType, nullable),
StructField("Name", StringType, nullable),
StructField("Sex", StringType, nullable),
StructField("Age", FloatType, nullable),
StructField("SibSp", IntegerType, nullable),
StructField("Parch", IntegerType, nullable),
StructField("Ticket", StringType, nullable),
StructField("Fare", FloatType, nullable),
StructField("Cabin", StringType, nullable),
StructField("Embarked", StringType, nullable)
)
val trainSchema = StructType(schemaArray)
val testSchema = StructType(schemaArray.filter(p => p.name != "Survived"))
val trainDF = spark.read
.format("csv")
.option("header", "true")
.schema(trainSchema)
.load(trainFile)
val testDF = spark.read
.format("csv")
.option("header", "true")
.schema(testSchema)
.load(testFile)
(trainDF, testDF)
}
}
| fclesio/learning-space | Scala/titanic.scala | Scala | gpl-2.0 | 8,001 |
package com.acme.ShoppingCart.helpers
/*
* Generates a Bearer Token with a length of
* 32 characters (MD5) or 64 characters (SHA-256) according to the
* specification RFC6750 (http://tools.ietf.org/html/rfc6750)
*
* Uniqueness obtained by by hashing system time combined with a
* application supplied 'tokenprefix' such as a sessionid or username
*
* public methods:
* generateMD5Token(tokenprefix: String): String
* generateSHAToken(tokenprefix: String): String
*
* Example usage:
*
* val tokenGenerator = new BearerTokenGeneratorHelper
* val username = "mary.smith"
* val token = tokenGenerator.generateMD5Token(username)
* println(token)
*
* Author: Jeff Steinmetz, @jeffsteinmetz
*
*/
class BearerTokenGeneratorHelper {
import java.security.SecureRandom
import java.security.MessageDigest
val TOKEN_LENGTH = 45 // TOKEN_LENGTH is not the return size from a hash,
// but the total characters used as random token prior to hash
// 45 was selected because System.nanoTime().toString returns
// 19 characters. 45 + 19 = 64. Therefore we are guaranteed
// at least 64 characters (bytes) to use in hash, to avoid MD5 collision < 64
val TOKEN_CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_.-"
val secureRandom = new SecureRandom()
private def toHex(bytes: Array[Byte]): String = bytes.map( "%02x".format(_) ).mkString("")
private def sha(s: String): String = {
toHex(MessageDigest.getInstance("SHA-256").digest(s.getBytes("UTF-8")))
}
private def md5(s: String): String = {
toHex(MessageDigest.getInstance("MD5").digest(s.getBytes("UTF-8")))
}
// use tail recursion, functional style to build string.
private def generateToken(tokenLength: Int) : String = {
val charLen = TOKEN_CHARS.length()
def generateTokenAccumulator(accumulator: String, number: Int) : String = {
if (number == 0) return accumulator
else
generateTokenAccumulator(accumulator + TOKEN_CHARS(secureRandom.nextInt(charLen)).toString, number - 1)
}
generateTokenAccumulator("", tokenLength)
}
/*
* Hash the Token to return a 32 or 64 character HEX String
*
* Parameters:
* tokenprifix: string to concatenate with random generated token prior to HASH to improve uniqueness, such as username
*
* Returns:
* MD5 hash of (username + current time + random token generator) as token, 128 bits, 32 characters
* or
* SHA-256 hash of (username + current time + random token generator) as token, 256 bits, 64 characters
*/
def generateMD5Token(tokenprefix: String): String = {
md5(tokenprefix + System.nanoTime() + generateToken(TOKEN_LENGTH))
}
def generateSHAToken(tokenprefix: String): String = {
sha(tokenprefix + System.nanoTime() + generateToken(TOKEN_LENGTH))
}
}
| prayagupd/finatra-angular-example | src/main/scala/com/acme/ShoppingCart/helpers/BearerTokenGeneratorHelper.scala | Scala | mit | 2,828 |
package ua.kpi.teacherjournal
import android.app.Fragment
import android.os.Bundle
import android.view.Surface
import org.scaloid.common._
trait RichFragment extends TagUtil { self: Fragment =>
implicit def ctx = getActivity
def arg[T](argName: String) =
getArguments.get(argName).asInstanceOf[T]
def setArguments(args: (String, java.io.Serializable)*): this.type = {
val bdl = new Bundle(args.size)
for ((key, value) <- args)
bdl.putSerializable(key, value)
setArguments(bdl)
this
}
def isLandscapeOrientation =
List(Surface.ROTATION_0, Surface.ROTATION_180) contains windowManager.getDefaultDisplay.getRotation
def isPortraitOrientation = !isLandscapeOrientation
}
| sochka/teacher-journal | src/ua/kpi/teacherjournal/RichFragment.scala | Scala | gpl-3.0 | 715 |
package scutil.lang
import minitest._
object IoDisposerTest extends SimpleTestSuite {
test("a combined Disposer should execute both actions in order") {
var tmp = ""
val a = IoDisposer delay { tmp = tmp + "a" }
val b = IoDisposer delay { tmp = tmp + "b" }
val c = a combine b
c.unsafeRun()
assertEquals(tmp, "ab")
}
test("in an exception thrown in the first of a combined Disposer should be work") {
var tmp = 0
var err = null:Exception
val a = IoDisposer delay { sys error "a failed" }
val b = IoDisposer delay { tmp = 2 }
val c = a combine b
try {
c.unsafeRun()
}
catch { case e:Exception =>
err = e
}
assertEquals(tmp, 2)
assertEquals(err.getMessage, "a failed")
}
test("in an exception thrown in the second of a combined Disposer should be work") {
var tmp = 0
var err = null:Exception
val a = IoDisposer delay { tmp = 1 }
val b = IoDisposer delay { sys error "b failed" }
val c = a combine b
try {
c.unsafeRun()
}
catch { case e:Exception =>
err = e
}
assertEquals(tmp, 1)
assertEquals(err.getMessage, "b failed")
}
test("in an exception thrown in both a combined Disposer should be work") {
var err = null:Exception
val a = IoDisposer delay { sys error "a failed" }
val b = IoDisposer delay { sys error "b failed" }
val c = a combine b
try {
c.unsafeRun()
}
catch { case e:Exception =>
err = e
}
assertEquals(err.getMessage, "a failed")
assertEquals(err.getSuppressed.length, 1)
assertEquals(err.getSuppressed()(0).getMessage, "b failed")
}
}
| ritschwumm/scutil | modules/core/src/test/scala/scutil/lang/IoDisposerTest.scala | Scala | bsd-2-clause | 1,559 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.kudu.config
import com.datamountaineer.kcql.{Kcql, WriteModeEnum}
import com.datamountaineer.streamreactor.connect.errors.{ErrorPolicy, ThrowErrorPolicy}
/**
* Created by [email protected] on 13/05/16.
* stream-reactor-maven
*/
case class KuduSettings(kcql: List[Kcql],
topicTables: Map[String, String],
allowAutoCreate: Map[String, Boolean],
allowAutoEvolve: Map[String, Boolean],
fieldsMap: Map[String, Map[String, String]],
ignoreFields: Map[String, Set[String]],
writeModeMap: Map[String, WriteModeEnum],
errorPolicy: ErrorPolicy = new ThrowErrorPolicy,
maxRetries: Int = KuduConfigConstants.NBR_OF_RETIRES_DEFAULT,
schemaRegistryUrl: String,
writeFlushMode: WriteFlushMode.WriteFlushMode,
mutationBufferSpace: Int
)
object KuduSettings {
def apply(config: KuduConfig): KuduSettings = {
val kcql = config.getKCQL
val errorPolicy = config.getErrorPolicy
val maxRetries = config.getNumberRetries
val autoCreate = config.getAutoCreate()
val autoEvolve = config.getAutoEvolve()
val schemaRegUrl = config.getSchemaRegistryUrl
val fieldsMap = config.getFieldsMap()
val ignoreFields = config.getIgnoreFieldsMap()
val writeModeMap = config.getWriteMode()
val topicTables = config.getTableTopic()
val writeFlushMode = config.getWriteFlushMode()
val mutationBufferSpace = config.getInt(KuduConfigConstants.MUTATION_BUFFER_SPACE)
new KuduSettings(kcql = kcql.toList,
topicTables = topicTables,
allowAutoCreate = autoCreate,
allowAutoEvolve = autoEvolve,
fieldsMap = fieldsMap,
ignoreFields = ignoreFields,
writeModeMap = writeModeMap,
errorPolicy = errorPolicy,
maxRetries = maxRetries,
schemaRegistryUrl = schemaRegUrl,
writeFlushMode = writeFlushMode,
mutationBufferSpace = mutationBufferSpace
)
}
}
| CodeSmell/stream-reactor | kafka-connect-kudu/src/main/scala/com/datamountaineer/streamreactor/connect/kudu/config/KuduSettings.scala | Scala | apache-2.0 | 2,780 |
package slamdata.engine
import slamdata.engine.fp._
import slamdata.engine.std._
import slamdata.engine.sql._
import slamdata.engine.analysis._
import slamdata.engine.analysis.fixplate._
import slamdata.engine.physical.mongodb._
import slamdata.engine.fs._
import scalaz.{Node => _, Tree => _, _}
import scalaz.concurrent.{Node => _, _}
import Scalaz._
import scalaz.stream.{Writer => _, _}
import slamdata.engine.config._
sealed trait PhaseResult {
def name: String
}
object PhaseResult {
import argonaut._
import Argonaut._
import slamdata.engine.{Error => SDError}
case class Error(name: String, value: SDError) extends PhaseResult
case class Tree(name: String, value: RenderedTree) extends PhaseResult {
override def toString = name + "\\n" + Show[RenderedTree].shows(value)
}
case class Detail(name: String, value: String) extends PhaseResult {
override def toString = name + "\\n" + value
}
implicit def PhaseResultEncodeJson: EncodeJson[PhaseResult] = EncodeJson {
case PhaseResult.Error(name, value) =>
Json.obj(
"name" := name,
"error" := value.getMessage
)
case PhaseResult.Tree(name, value) =>
Json.obj(
"name" := name,
"tree" := value
)
case PhaseResult.Detail(name, value) =>
Json.obj(
"name" := name,
"detail" := value
)
}
}
sealed trait Backend {
def dataSource: FileSystem
/**
* Executes a query, producing a compilation log and the path where the result
* can be found.
*/
def run(req: QueryRequest): Task[(Vector[PhaseResult], Path)]
/**
* Executes a query, placing the output in the specified resource, returning both
* a compilation log and a source of values from the result set.
*/
def eval(req: QueryRequest): Task[(Vector[PhaseResult], Process[Task, RenderedJson])] = {
for {
_ <- dataSource.delete(req.out)
t <- run(req)
(log, out) = t
proc <- Task.delay(dataSource.scanAll(out))
} yield log -> proc
}
/**
* Executes a query, placing the output in the specified resource, returning only
* a compilation log.
*/
def evalLog(req: QueryRequest): Task[Vector[PhaseResult]] = eval(req).map(_._1)
/**
* Executes a query, placing the output in the specified resource, returning only
* a source of values from the result set.
*/
def evalResults(req: QueryRequest): Process[Task, RenderedJson] = Process.eval(eval(req).map(_._2)) flatMap identity
}
object Backend {
def apply[PhysicalPlan: RenderTree, Config](planner: Planner[PhysicalPlan], evaluator: Evaluator[PhysicalPlan], ds: FileSystem, showNative: (PhysicalPlan, Path) => Cord) = new Backend {
def dataSource = ds
val queryPlanner = planner.queryPlanner(showNative)
def run(req: QueryRequest): Task[(Vector[PhaseResult], Path)] = Task.delay {
import Process.{logged => _, _}
def loggedTask[A](log: Vector[PhaseResult], t: Task[A]): Task[(Vector[PhaseResult], A)] =
new Task(t.get.map(_.bimap({
case e : Error => PhaseError(log, e)
case e => e
},
log -> _)))
val (phases, physical) = queryPlanner(req)
physical.fold[Task[(Vector[PhaseResult], Path)]](
error => Task.fail(PhaseError(phases, error)),
plan => loggedTask(phases, evaluator.execute(plan, req.out))
)
}.join
}
}
case class BackendDefinition(create: PartialFunction[BackendConfig, Task[Backend]]) extends (BackendConfig => Option[Task[Backend]]) {
def apply(config: BackendConfig): Option[Task[Backend]] = create.lift(config)
}
object BackendDefinition {
implicit val BackendDefinitionMonoid = new Monoid[BackendDefinition] {
def zero = BackendDefinition(PartialFunction.empty)
def append(v1: BackendDefinition, v2: => BackendDefinition): BackendDefinition =
BackendDefinition(v1.create.orElse(v2.create))
}
}
| mossprescott/quasar | src/main/scala/slamdata/engine/backend.scala | Scala | agpl-3.0 | 3,926 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package net
import java.nio.ByteBuffer
import java.nio.channels.{ DatagramChannel }
import java.net.{ SocketAddress, DatagramPacket }
import channel.{ IChan, OChan }
/**
* Factory for DatagramSockets
*
*/
object DatagramSocket {
private class DatagramInputChannel(
_socket: => Socket[DatagramPacket],
final val niochan: DatagramChannel,
final val selector: IOSelector,
final val rcvBuf: ByteBuffer) extends InputChannel[DatagramPacket] {
final lazy val socket = _socket
//TODO: SI-3569 final
private[this] final var source: SocketAddress = null
def doRead(rcvBuf: ByteBuffer): Int = {
try {
source = niochan.receive(rcvBuf)
} catch {
case e: java.nio.channels.ClosedChannelException =>
return -1
}
if (source == null) 0 else rcvBuf.position
}
def doCopy(rcvBuf: ByteBuffer): Seg[DatagramPacket] = {
val length = rcvBuf.remaining
val buf = new Array[Byte](length)
rcvBuf.get(buf)
//println(Utils.showHex(buf))
Seg(new DatagramPacket(buf, length, source))
}
def closed() = socket.iClosed()
}
private class DatagramOutputChannel(
_socket: => Socket[DatagramPacket],
final val niochan: DatagramChannel,
final val selector: IOSelector,
final val sndBuf: ByteBuffer) extends OutputChannel[DatagramPacket] {
final lazy val socket = _socket
//TODO: SI-3569 final
private[this] final var target: SocketAddress = null
final def doCopy(seg: Seg[DatagramPacket], sndBuf: ByteBuffer): Seg[DatagramPacket] = {
val head = seg.head
sndBuf.put(ByteBuffer.wrap(head.getData, head.getOffset, head.getLength))
target = head.getSocketAddress
seg.tail
}
def doWrite(sndBuf: ByteBuffer) =
niochan.send(sndBuf, target)
def closed() = socket.oClosed()
}
def apply(niochan: DatagramChannel,
handle: SocketHandle,
selector: IOSelector,
rcvBuf: ByteBuffer,
sndBuf: ByteBuffer): Socket[DatagramPacket] = {
lazy val s: Socket[DatagramPacket] = Socket(
niochan,
selector,
handle,
new DatagramInputChannel(s, niochan, selector, rcvBuf),
new DatagramOutputChannel(s, niochan, selector, sndBuf)
)
s
}
private class ConnectedDatagramInputChannel(
_socket: => Socket[DatagramPacket],
final val niochan: DatagramChannel,
final val selector: IOSelector,
final val rcvBuf: ByteBuffer) extends InputChannel[DatagramPacket] {
final lazy val socket = _socket
def doRead(rcvBuf: ByteBuffer): Int = {
try {
niochan.read(rcvBuf)
} catch {
case e: java.nio.channels.ClosedChannelException =>
return -1
}
}
def doCopy(rcvBuf: ByteBuffer): Seg[DatagramPacket] = {
val length = rcvBuf.remaining
val buf = new Array[Byte](length)
rcvBuf.get(buf)
//println(Utils.showHex(buf))
Seg(new DatagramPacket(buf, length))
}
def closed() = socket.iClosed()
}
private class ConnectedDatagramOutputChannel(
_socket: => Socket[DatagramPacket],
final val niochan: DatagramChannel,
final val selector: IOSelector,
final val sndBuf: ByteBuffer) extends OutputChannel[DatagramPacket] {
final lazy val socket = _socket
final def doCopy(seg: Seg[DatagramPacket], sndBuf: ByteBuffer): Seg[DatagramPacket] = {
val head = seg.head
sndBuf.put(ByteBuffer.wrap(head.getData, head.getOffset, head.getLength))
seg.tail
}
def doWrite(sndBuf: ByteBuffer) =
niochan.write(sndBuf)
def closed() = socket.oClosed()
}
def connect(niochan: DatagramChannel,
handle: SocketHandle,
selector: IOSelector,
rcvBuf: ByteBuffer,
sndBuf: ByteBuffer): Socket[DatagramPacket] = {
assert(niochan.isConnected)
lazy val s: Socket[DatagramPacket] = Socket(
niochan,
selector,
handle,
new ConnectedDatagramInputChannel(s, niochan, selector, rcvBuf),
new ConnectedDatagramOutputChannel(s, niochan, selector, sndBuf)
)
s
}
} | molecule-labs/molecule | molecule-net/src/main/scala/molecule/net/DatagramSocket.scala | Scala | apache-2.0 | 4,849 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.repository
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.policies.RuleId
import com.normation.rudder.domain.reports.bean._
import org.joda.time._
import com.normation.cfclerk.domain.{Cf3PolicyDraftId}
import net.liftweb.common.Box
/**
* An overly simple repository for searching through the cfengine reports
* Can search by CR, by Node, by both, and or by date
* @author Nicolas CHARLES
*
*/
trait ReportsRepository {
/**
* Returns all reports for the ruleId, between the two differents date (optionnally)
*/
def findReportsByRule(
ruleId : RuleId
, serial : Option[Int]
, beginDate: Option[DateTime]
, endDate : Option[DateTime]
) : Seq[Reports]
/**
* Return the last (really the last, serial wise, with full execution) reports for a rule
*/
def findLastReportByRule(
ruleId: RuleId
, serial: Int
, node : Option[NodeId]
) : Seq[Reports]
/**
* Returns all reports for the node, between the two differents date (optionnal)
* for a rule (optionnal) and for a specific serial of this rule (optionnal)
* Note : serial is used only if rule is used
* Note : only the 1000 first entry are returned
*/
def findReportsByNode(
nodeId : NodeId
, ruleId : Option[RuleId]
, serial : Option[Int]
, beginDate: Option[DateTime]
, endDate : Option[DateTime]
) : Seq[Reports]
/**
* All reports for a node and rule/serial, between two date, ordered by date
*/
def findReportsByNode(
nodeId : NodeId
, ruleId : RuleId
, serial : Int
, beginDate: DateTime
, endDate : Option[DateTime]
) : Seq[Reports]
def findExecutionTimeByNode(
nodeId : NodeId
, beginDate: DateTime
, endDate : Option[DateTime]
) : Seq[DateTime]
def getOldestReports() : Box[Option[Reports]]
def getOldestArchivedReports() : Box[Option[Reports]]
def getNewestReportOnNode(nodeid:NodeId) : Box[Option[Reports]]
def getNewestReports() : Box[Option[Reports]]
def getNewestArchivedReports() : Box[Option[Reports]]
def getDatabaseSize(databaseName : String) : Box[Long]
def reportsTable : String
def archiveTable : String
def archiveEntries(date : DateTime) : Box[Int]
def deleteEntries(date : DateTime) : Box[Int]
def getHighestId : Box[Int]
def getLastHundredErrorReports(kinds:List[String]) : Box[Seq[(Reports,Int)]]
def getErrorReportsBeetween(lower : Int, upper:Int,kinds:List[String]) : Box[Seq[Reports]]
} | jooooooon/rudder | rudder-core/src/main/scala/com/normation/rudder/repository/ReportsRepository.scala | Scala | agpl-3.0 | 4,222 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.ssg.bdt.nlp
import org.apache.spark.rdd.RDD
/**
* CRF with support for multiple parallel runs
* L2 regParam = 1/(2.0 * sigma**2)
*/
class CRF private (
private var freq: Int,
private var regParam: Double,
private var maxIterations: Int,
private var tolerance: Double,
private var regularization: String) extends Serializable {
def this() = this(freq = 1, regParam = 0.5, maxIterations = 1000, tolerance = 1E-3, regularization = "L2")
def setRegParam(regParam: Double) = {
this.regParam = regParam
this
}
def setFreq(freq: Int) = {
this.freq = freq
this
}
def setMaxIterations(maxIterations: Int) = {
this.maxIterations = maxIterations
this
}
def setEta(eta: Double) = {
this.tolerance = eta
this
}
def setRegularization(regula: String) = {
this.regularization = regula
this
}
/**
* Internal method to train the CRF model
*
* @param template the template to train the model
* @param trains the source for the training
* @return the model of the source
*/
def runCRF(
template: Array[String],
trains: RDD[Sequence]): CRFModel = {
val featureIdx = new FeatureIndex()
featureIdx.openTemplate(template)
featureIdx.openTagSetDist(trains)
val bcFeatureIdxI = trains.context.broadcast(featureIdx)
val taggers = trains
.map(new Tagger(bcFeatureIdxI.value.labels.size, LearnMode).read(_, bcFeatureIdxI.value))
featureIdx.buildDictionaryDist(taggers, bcFeatureIdxI, freq)
val bcFeatureIdxII = trains.context.broadcast(featureIdx)
val taggerList: RDD[Tagger] = taggers.map(bcFeatureIdxII.value.buildFeatures(_)).cache()
val model = runAlgorithm(taggerList, featureIdx)
taggerList.unpersist()
model
}
/**
*
* @param taggers the tagger in the template
* @param featureIdx the index of the feature
*/
def runAlgorithm(
taggers: RDD[Tagger],
featureIdx: FeatureIndex): CRFModel = {
println("Starting CRF Iterations ( sentences: %d, features: %d, labels: %d )"
.format(taggers.count(), featureIdx.maxID, featureIdx.labels.length))
var updater: UpdaterCRF = null
regularization match {
case "L1" =>
updater = new L1Updater
case "L2" =>
updater = new L2Updater
case _ =>
throw new Exception("only support L1-CRF and L2-CRF now")
}
featureIdx.alpha = new CRFWithLBFGS(new CRFGradient, updater)
.setRegParam(regParam)
.setConvergenceTol(tolerance)
.setNumIterations(maxIterations)
.optimizer(taggers, featureIdx.initAlpha())
featureIdx.saveModel
}
}
/**
* Top-level methods for calling CRF.
*/
object CRF {
/**
* Train CRF Model
*
* @param templates Source templates for training the model
* @param train Source files for training the model
* @return Model
*/
def train(
templates: Array[String],
train: RDD[Sequence],
regParam: Double,
freq: Int,
maxIteration: Int,
eta: Double,
regularization: String): CRFModel = {
new CRF().setRegParam(regParam)
.setFreq(freq)
.setMaxIterations(maxIteration)
.setEta(eta)
.setRegularization(regularization)
.runCRF(templates, train)
}
def train(
templates: Array[String],
train: RDD[Sequence],
regParam: Double,
freq: Int,
maxIteration: Int,
eta: Double): CRFModel = {
new CRF().setRegParam(regParam)
.setFreq(freq)
.setMaxIterations(maxIteration)
.setEta(eta)
.runCRF(templates, train)
}
def train(
templates: Array[String],
train: RDD[Sequence],
regParam: Double,
freq: Int): CRFModel = {
new CRF().setRegParam(regParam)
.setFreq(freq)
.runCRF(templates, train)
}
def train(
templates: Array[String],
train: RDD[Sequence],
regParam: Double,
regularization: String): CRFModel = {
new CRF().setRegParam(regParam)
.setRegularization(regularization)
.runCRF(templates, train)
}
def train(
templates: Array[String],
train: RDD[Sequence],
regularization: String): CRFModel = {
new CRF().setRegularization(regularization)
.runCRF(templates, train)
}
def train(
templates: Array[String],
train: RDD[Sequence]): CRFModel = {
new CRF().runCRF(templates, train)
}
}
| Intel-bigdata/CRF-Spark | src/main/scala/com/intel/ssg/bdt/nlp/CRF.scala | Scala | apache-2.0 | 5,226 |
///*
// active-learning-scala: Active Learning library for Scala
// Copyright (c) 2014 Davi Pereira dos Santos
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// */
//
//package al.strategies
//
//import clean.lib.{CM, Ds}
//import ml.Pattern
//import ml.classifiers.{Learner, RF}
//import ml.models.Model
//
//import scala.util.Random
//
//case class GATU0(learner: Learner, pool: Seq[Pattern], distance_name: String, alpha: Double = 1, beta: Double = 1, debug: Boolean = false)
// extends StrategyWithLearnerAndMaps with MarginMeasure with EntropyMeasure {
// override val toString = "GATU0 a" + alpha + " b" + beta + " (" + distance_name + ")"
// val abr = "\\\\textbf{GATU0" + distance_name.take(3) + "}"
// //+ beta
// val id = if (alpha == 1 && beta == 1 || alpha == 0.5 && beta == 0.5) distance_name match {
// case "eucl" => 54326 + (100000 * (1 - alpha)).toInt
// case "cheb" => 54328 + (100000 * (1 - alpha)).toInt
// case "maha" => 54329 + (100000 * (1 - alpha)).toInt
// case "manh" => 54327 + (100000 * (1 - alpha)).toInt
// } else throw new Error("Parametros inesperados para GATU0.")
//
// protected def next(mapU: => Map[Pattern, Double], mapL: => Map[Pattern, Double], current_model: Model, unlabeled: Seq[Pattern], labeled: Seq[Pattern]) = {
// val hist = Array.fill(nclasses)(0d)
// val entropias = labeled map { lab =>
// val cla = lab.label.toInt
// hist(cla) += 1
// val s = hist.sum
// normalized_entropy(hist.map(_ / s))
// }
// var agnostico = false //começa com gnostico, pois na segunda vez sempre inverte
// var olde = entropias(nclasses - 1)
// entropias.drop(nclasses).foreach { e =>
// if (e < olde) agnostico = !agnostico
// olde = e
// }
//
// //verifica
// val selected = unlabeled maxBy { x =>
// val similarityU = mapU(x) / mapU.size.toDouble
// val similarityL = mapL(x) / mapL.size.toDouble
// if (agnostico)
// math.pow(similarityU, beta) / math.pow(similarityL, alpha)
// else
// (1 - margin(current_model)(x)) * math.pow(similarityU, beta) / math.pow(similarityL, alpha)
// }
// selected
// }
//}
//
| active-learning/active-learning-scala | src/main/scala/al/strategies/GATU0.scala | Scala | gpl-2.0 | 2,854 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import org.scalatest.TestFailedException
import org.scalatest.junit.JUnit3Suite
import kafka.consumer.SimpleConsumer
import kafka.message.Message
import kafka.server.{KafkaConfig, KafkaRequestHandler, KafkaServer}
import kafka.zk.ZooKeeperTestHarness
import org.apache.log4j.{Level, Logger}
import org.junit.Test
import kafka.utils._
import java.util
import kafka.admin.AdminUtils
import util.Properties
import kafka.api.FetchRequestBuilder
import org.junit.Assert.assertTrue
import org.junit.Assert.assertFalse
import org.junit.Assert.assertEquals
import kafka.common.{ErrorMapping, FailedToSendMessageException}
import kafka.serializer.StringEncoder
class ProducerTest extends JUnit3Suite with ZooKeeperTestHarness with Logging{
private val brokerId1 = 0
private val brokerId2 = 1
private var server1: KafkaServer = null
private var server2: KafkaServer = null
private var consumer1: SimpleConsumer = null
private var consumer2: SimpleConsumer = null
private val requestHandlerLogger = Logger.getLogger(classOf[KafkaRequestHandler])
private var servers = List.empty[KafkaServer]
// Creation of consumers is deferred until they are actually needed. This allows us to kill brokers that use random
// ports and then get a consumer instance that will be pointed at the correct port
def getConsumer1() = {
if (consumer1 == null)
consumer1 = new SimpleConsumer("localhost", server1.boundPort(), 1000000, 64*1024, "")
consumer1
}
def getConsumer2() = {
if (consumer2 == null)
consumer2 = new SimpleConsumer("localhost", server2.boundPort(), 100, 64*1024, "")
consumer2
}
override def setUp() {
super.setUp()
// set up 2 brokers with 4 partitions each
val props1 = TestUtils.createBrokerConfig(brokerId1, zkConnect, false)
props1.put("num.partitions", "4")
val config1 = KafkaConfig.fromProps(props1)
val props2 = TestUtils.createBrokerConfig(brokerId2, zkConnect, false)
props2.put("num.partitions", "4")
val config2 = KafkaConfig.fromProps(props2)
server1 = TestUtils.createServer(config1)
server2 = TestUtils.createServer(config2)
servers = List(server1,server2)
val props = new Properties()
props.put("host", "localhost")
props.put("port", server1.boundPort().toString)
// temporarily set request handler logger to a higher level
requestHandlerLogger.setLevel(Level.FATAL)
}
override def tearDown() {
// restore set request handler logger to a higher level
requestHandlerLogger.setLevel(Level.ERROR)
if (consumer1 != null)
consumer1.close()
if (consumer2 != null)
consumer2.close()
server1.shutdown
server2.shutdown
CoreUtils.rm(server1.config.logDirs)
CoreUtils.rm(server2.config.logDirs)
super.tearDown()
}
@Test
def testUpdateBrokerPartitionInfo() {
val topic = "new-topic"
TestUtils.createTopic(zkClient, topic, numPartitions = 1, replicationFactor = 2, servers = servers)
val props = new Properties()
// no need to retry since the send will always fail
props.put("message.send.max.retries", "0")
val producer1 = TestUtils.createProducer[String, String](
brokerList = "localhost:80,localhost:81",
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
producerProps = props)
try{
producer1.send(new KeyedMessage[String, String](topic, "test", "test1"))
fail("Test should fail because the broker list provided are not valid")
} catch {
case e: FailedToSendMessageException => // this is expected
case oe: Throwable => fail("fails with exception", oe)
} finally {
producer1.close()
}
val producer2 = TestUtils.createProducer[String, String](
brokerList = "localhost:80," + TestUtils.getBrokerListStrFromServers(Seq(server1)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName)
try{
producer2.send(new KeyedMessage[String, String](topic, "test", "test1"))
} catch {
case e: Throwable => fail("Should succeed sending the message", e)
} finally {
producer2.close()
}
val producer3 = TestUtils.createProducer[String, String](
brokerList = TestUtils.getBrokerListStrFromServers(Seq(server1, server2)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName)
try{
producer3.send(new KeyedMessage[String, String](topic, "test", "test1"))
} catch {
case e: Throwable => fail("Should succeed sending the message", e)
} finally {
producer3.close()
}
}
@Test
def testSendToNewTopic() {
val props1 = new util.Properties()
props1.put("request.required.acks", "-1")
val topic = "new-topic"
// create topic with 1 partition and await leadership
TestUtils.createTopic(zkClient, topic, numPartitions = 1, replicationFactor = 2, servers = servers)
val producer1 = TestUtils.createProducer[String, String](
brokerList = TestUtils.getBrokerListStrFromServers(Seq(server1, server2)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[StaticPartitioner].getName,
producerProps = props1)
// Available partition ids should be 0.
producer1.send(new KeyedMessage[String, String](topic, "test", "test1"))
producer1.send(new KeyedMessage[String, String](topic, "test", "test2"))
// get the leader
val leaderOpt = ZkUtils.getLeaderForPartition(zkClient, topic, 0)
assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined)
val leader = leaderOpt.get
val messageSet = if(leader == server1.config.brokerId) {
val response1 = getConsumer1().fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, 10000).build())
response1.messageSet("new-topic", 0).iterator.toBuffer
}else {
val response2 = getConsumer2().fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, 10000).build())
response2.messageSet("new-topic", 0).iterator.toBuffer
}
assertEquals("Should have fetched 2 messages", 2, messageSet.size)
assertEquals(new Message(bytes = "test1".getBytes, key = "test".getBytes), messageSet(0).message)
assertEquals(new Message(bytes = "test2".getBytes, key = "test".getBytes), messageSet(1).message)
producer1.close()
val props2 = new util.Properties()
props2.put("request.required.acks", "3")
// no need to retry since the send will always fail
props2.put("message.send.max.retries", "0")
try {
val producer2 = TestUtils.createProducer[String, String](
brokerList = TestUtils.getBrokerListStrFromServers(Seq(server1, server2)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[StaticPartitioner].getName,
producerProps = props2)
producer2.close
fail("we don't support request.required.acks greater than 1")
}
catch {
case iae: IllegalArgumentException => // this is expected
case e: Throwable => fail("Not expected", e)
}
}
@Test
def testSendWithDeadBroker() {
val props = new Properties()
props.put("request.required.acks", "1")
// No need to retry since the topic will be created beforehand and normal send will succeed on the first try.
// Reducing the retries will save the time on the subsequent failure test.
props.put("message.send.max.retries", "0")
val topic = "new-topic"
// create topic
TestUtils.createTopic(zkClient, topic, partitionReplicaAssignment = Map(0->Seq(0), 1->Seq(0), 2->Seq(0), 3->Seq(0)),
servers = servers)
val producer = TestUtils.createProducer[String, String](
brokerList = TestUtils.getBrokerListStrFromServers(Seq(server1, server2)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[StaticPartitioner].getName,
producerProps = props)
try {
// Available partition ids should be 0, 1, 2 and 3, all lead and hosted only
// on broker 0
producer.send(new KeyedMessage[String, String](topic, "test", "test1"))
} catch {
case e: Throwable => fail("Unexpected exception: " + e)
}
// kill the broker
server1.shutdown
server1.awaitShutdown()
try {
// These sends should fail since there are no available brokers
producer.send(new KeyedMessage[String, String](topic, "test", "test1"))
fail("Should fail since no leader exists for the partition.")
} catch {
case e : TestFailedException => throw e // catch and re-throw the failure message
case e2: Throwable => // otherwise success
}
// restart server 1
server1.startup()
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0)
TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0)
try {
// cross check if broker 1 got the messages
val response1 = getConsumer1().fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, 10000).build())
val messageSet1 = response1.messageSet(topic, 0).iterator
assertTrue("Message set should have 1 message", messageSet1.hasNext)
assertEquals(new Message(bytes = "test1".getBytes, key = "test".getBytes), messageSet1.next.message)
assertFalse("Message set should have another message", messageSet1.hasNext)
} catch {
case e: Exception => fail("Not expected", e)
}
producer.close
}
@Test
def testAsyncSendCanCorrectlyFailWithTimeout() {
val timeoutMs = 500
val props = new Properties()
props.put("request.timeout.ms", String.valueOf(timeoutMs))
props.put("request.required.acks", "1")
props.put("message.send.max.retries", "0")
props.put("client.id","ProducerTest-testAsyncSendCanCorrectlyFailWithTimeout")
val producer = TestUtils.createProducer[String, String](
brokerList = TestUtils.getBrokerListStrFromServers(Seq(server1, server2)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[StaticPartitioner].getName,
producerProps = props)
val topic = "new-topic"
// create topics in ZK
TestUtils.createTopic(zkClient, topic, partitionReplicaAssignment = Map(0->Seq(0,1)), servers = servers)
// do a simple test to make sure plumbing is okay
try {
// this message should be assigned to partition 0 whose leader is on broker 0
producer.send(new KeyedMessage[String, String](topic, "test", "test"))
// cross check if brokers got the messages
val response1 = getConsumer1().fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, 10000).build())
val messageSet1 = response1.messageSet("new-topic", 0).iterator
assertTrue("Message set should have 1 message", messageSet1.hasNext)
assertEquals(new Message("test".getBytes), messageSet1.next.message)
} catch {
case e: Throwable => case e: Exception => producer.close; fail("Not expected", e)
}
// stop IO threads and request handling, but leave networking operational
// any requests should be accepted and queue up, but not handled
server1.requestHandlerPool.shutdown()
val t1 = SystemTime.milliseconds
try {
// this message should be assigned to partition 0 whose leader is on broker 0, but
// broker 0 will not response within timeoutMs millis.
producer.send(new KeyedMessage[String, String](topic, "test", "test"))
} catch {
case e: FailedToSendMessageException => /* success */
case e: Exception => fail("Not expected", e)
} finally {
producer.close()
}
val t2 = SystemTime.milliseconds
// make sure we don't wait fewer than timeoutMs
assertTrue((t2-t1) >= timeoutMs)
}
@Test
def testSendNullMessage() {
val producer = TestUtils.createProducer[String, String](
brokerList = TestUtils.getBrokerListStrFromServers(Seq(server1, server2)),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[StaticPartitioner].getName)
try {
// create topic
AdminUtils.createTopic(zkClient, "new-topic", 2, 1)
TestUtils.waitUntilTrue(() =>
AdminUtils.fetchTopicMetadataFromZk("new-topic", zkClient).errorCode != ErrorMapping.UnknownTopicOrPartitionCode,
"Topic new-topic not created after timeout",
waitTime = zookeeper.tickTime)
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, "new-topic", 0)
producer.send(new KeyedMessage[String, String]("new-topic", "key", null))
} finally {
producer.close()
}
}
}
| shawjef3/kafka | core/src/test/scala/unit/kafka/producer/ProducerTest.scala | Scala | apache-2.0 | 13,638 |
/*
* Copyright 2016-2020 Daniel Urban and contributors listed in AUTHORS
* Copyright 2020 Nokia
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.tauri.seals
package tests
import shapeless.test.illTyped
import laws.TestTypes
// TODO: test runtime compat for all of these
class CompatSpec extends BaseSpec {
"Compat[O, N] exists" - {
"isomorphic" - {
import TestTypes.adts.iso._
"case classes" in {
Compat[Adt1.Foo, Adt2.Foo]
Compat[Adt2.Foo, Adt1.Foo]
}
"ADTs" in {
Compat[Adt1, Adt2]
Compat[Adt2, Adt1]
}
}
"when field(s) with default value(s)" - {
import TestTypes.adts.defs._
"one added" in {
Compat[Adt1.C, Adt2.C]
Compat[Adt3.C, Adt1.C]
Compat[Adt1, Adt2]
Compat[Adt3, Adt1]
}
"one removed" in {
Compat[Adt2.C, Adt1.C]
Compat[Adt1.C, Adt3.C]
Compat[Adt2, Adt1]
Compat[Adt1, Adt3]
}
"two added" in {
Compat[Adt3.C, Adt2.C]
Compat[Adt3, Adt2]
}
"two removed" in {
Compat[Adt2.C, Adt3.C]
Compat[Adt2, Adt3]
}
}
"new field with default + one compatible field type change" in {
import TestTypes.adts.defsComp._
Compat[Person1, Person2]
Compat[Person2, Person1]
Compat[C1, C2]
Compat[C2, C1]
}
"new field with default + one compatible field type change (also default)" in {
import TestTypes.adts.defsCompDefs._
Compat[Person1, Person2]
Compat[Person2, Person1]
Compat[C1, C2]
Compat[C2, C1]
}
}
"Negative tests" - {
"when field(s) without default value(s)" - {
import TestTypes.adts.nodefs._
"directly" in {
illTyped("Compat[Adt1.C, Adt2.C]", notFound)
illTyped("Compat[Adt1.C, Adt3.C]", notFound)
illTyped("Compat[Adt2.C, Adt1.C]", notFound)
illTyped("Compat[Adt2.C, Adt3.C]", notFound)
illTyped("Compat[Adt3.C, Adt1.C]", notFound)
illTyped("Compat[Adt3.C, Adt2.C]", notFound)
}
"deep in an ADT" in {
illTyped("Compat[Adt1, Adt2]", notFound)
illTyped("Compat[Adt1, Adt3]", notFound)
illTyped("Compat[Adt2, Adt1]", notFound)
illTyped("Compat[Adt2, Adt3]", notFound)
illTyped("Compat[Adt3, Adt1]", notFound)
illTyped("Compat[Adt3, Adt2]", notFound)
}
}
"when fields are renamed" in {
import TestTypes.adts.rename._
illTyped("Compat[C1, C2]", notFound)
illTyped("Compat[C2, C1]", notFound)
}
"when ADT leafs are renamed" in {
import TestTypes.adts.rename._
illTyped("Compat[v1.Adt, v2.Adt]", notFound)
illTyped("Compat[v2.Adt, v1.Adt]", notFound)
}
"List and Set are different" in {
import TestTypes.collections.{ WithList, WithSet }
illTyped("Compat[WithList, WithSet]", notFound)
}
"Set and Map are different" in {
illTyped("Compat[Set[(Int, String)], Map[Int, String]]", notFound)
}
}
}
| durban/seals | tests/src/test/scala/dev/tauri/seals/tests/CompatSpec.scala | Scala | apache-2.0 | 3,608 |
package com.outr.arango.api
import com.outr.arango.api.model._
import io.youi.client.HttpClient
import io.youi.http.HttpMethod
import io.youi.net._
import io.circe.Json
import scala.concurrent.{ExecutionContext, Future}
object APIGharialGraphEdgeDefinition {
def delete(client: HttpClient, graph: String, definition: String, waitForSync: Option[Boolean] = None, dropCollections: Option[Boolean] = None)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Delete)
.path(path"/_api/gharial/{graph}/edge/{definition}".withArguments(Map("graph" -> graph, "definition" -> definition)), append = true)
.param[Option[Boolean]]("waitForSync", waitForSync, None)
.param[Option[Boolean]]("dropCollections", dropCollections, None)
.call[Json]
def put(client: HttpClient, graph: String, definition: String, waitForSync: Option[Boolean] = None, dropCollections: Option[Boolean] = None, body: GeneralGraphEdgeDefinitionModifyHttpExamples)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Put)
.path(path"/_api/gharial/{graph}/edge/{definition}".withArguments(Map("graph" -> graph, "definition" -> definition)), append = true)
.param[Option[Boolean]]("waitForSync", waitForSync, None)
.param[Option[Boolean]]("dropCollections", dropCollections, None)
.restful[GeneralGraphEdgeDefinitionModifyHttpExamples, Json](body)
} | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/APIGharialGraphEdgeDefinition.scala | Scala | mit | 1,404 |
class Person {
protected[this] var age = 0
def setAge(newAge: Int) { // A person can never get younger
if (newAge > age) age = newAge
}
}
class Manager extends Person {
protected var salary = 0.0
def setSalary(newSalary: Double) { // A manager's salary can never decrease
if (newSalary > salary) salary = newSalary
}
// Can access age from superclass
def description = "a manager who is " + age +
" years old and makes " + salary
def isSeniorTo(other: Manager) =
salary > other.salary
// Can't access age of another person. The following doesn't work:
// age > other.age
}
object Main extends App {
var fred = new Manager
fred.setAge(50)
fred.setSalary(100000)
var wilma = new Manager
wilma.setAge(55)
wilma.setSalary(90000)
if (fred.isSeniorTo(wilma))
println(fred.description + "\\nis senior to " + wilma.description)
else
println(wilma.description + "\\nis senior to " + fred.description)
}
| yeahnoob/scala-impatient-2e-code | src/ch08/sec04/Manager.scala | Scala | gpl-3.0 | 971 |
package me.gregd.cineworld.integration.vue
import java.time.LocalDate
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import me.gregd.cineworld.domain.service.VueService
import me.gregd.cineworld.integration.tmdb.TmdbIntegrationService
import me.gregd.cineworld.integration.vue.VueIntegrationService
import me.gregd.cineworld.util.{FixedClock, NoOpCache}
import monix.execution.Scheduler
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{FunSuite, Matchers}
import play.api.libs.ws.ahc.AhcWSClient
import stub.Stubs
import util.WSClient
class VueServiceTest extends FunSuite with ScalaFutures with IntegrationPatience with Matchers with WSClient {
val date: LocalDate = LocalDate.parse("2017-05-23")
val clock = FixedClock(date)
val tmdb = new TmdbIntegrationService(wsClient, NoOpCache.cache, Scheduler.global, Stubs.tmdb.config)
val repo = new VueIntegrationService(wsClient, NoOpCache.cache, Stubs.vue.config)
val dao = new VueService(repo, clock)
test("retrieveCinemas") {
dao.retrieveCinemas().futureValue should not be empty
}
test("retrieveMoviesAndPerformances for 1010882") {
val listings = dao.retrieveMoviesAndPerformances("10032", clock.today()).futureValue
val (movie, performances) = listings.head
movie.title should not be empty
movie.id should not be empty
movie.poster_url should not be empty
performances should not be empty
}
}
| Grogs/cinema-service | domain/src/test/scala/me/gregd/cineworld/integration/vue/VueServiceTest.scala | Scala | gpl-3.0 | 1,460 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.sql.{Date, Timestamp}
import java.time._
import java.time.Year.isLeap
import java.time.temporal.IsoFields
import java.util.{Locale, TimeZone}
import java.util.concurrent.TimeUnit._
import scala.util.control.NonFatal
import org.apache.spark.unsafe.types.UTF8String
/**
* Helper functions for converting between internal and external date and time representations.
* Dates are exposed externally as java.sql.Date and are represented internally as the number of
* dates since the Unix epoch (1970-01-01). Timestamps are exposed externally as java.sql.Timestamp
* and are stored internally as longs, which are capable of storing timestamps with microsecond
* precision.
*/
object DateTimeUtils {
// we use Int and Long internally to represent [[DateType]] and [[TimestampType]]
type SQLDate = Int
type SQLTimestamp = Long
// see http://stackoverflow.com/questions/466321/convert-unix-timestamp-to-julian
// it's 2440587.5, rounding up to compatible with Hive
final val JULIAN_DAY_OF_EPOCH = 2440588
final val NANOS_PER_MICROS = MICROSECONDS.toNanos(1)
final val NANOS_PER_MILLIS = MILLISECONDS.toNanos(1)
final val NANOS_PER_SECOND = SECONDS.toNanos(1)
final val MICROS_PER_MILLIS = MILLISECONDS.toMicros(1)
final val MICROS_PER_SECOND = SECONDS.toMicros(1)
final val MICROS_PER_DAY = DAYS.toMicros(1)
final val MILLIS_PER_SECOND = SECONDS.toMillis(1)
final val MILLIS_PER_MINUTE = MINUTES.toMillis(1)
final val MILLIS_PER_HOUR = HOURS.toMillis(1)
final val MILLIS_PER_DAY = DAYS.toMillis(1)
final val SECONDS_PER_DAY = DAYS.toSeconds(1)
// number of days between 1.1.1970 and 1.1.2001
final val to2001 = -11323
// this is year -17999, calculation: 50 * daysIn400Year
final val YearZero = -17999
final val toYearZero = to2001 + 7304850
final val TimeZoneGMT = TimeZone.getTimeZone("GMT")
final val TimeZoneUTC = TimeZone.getTimeZone("UTC")
val TIMEZONE_OPTION = "timeZone"
def defaultTimeZone(): TimeZone = TimeZone.getDefault()
def getTimeZone(timeZoneId: String): TimeZone = {
val zoneId = ZoneId.of(timeZoneId, ZoneId.SHORT_IDS)
TimeZone.getTimeZone(zoneId)
}
// we should use the exact day as Int, for example, (year, month, day) -> day
def millisToDays(millisUtc: Long): SQLDate = {
millisToDays(millisUtc, defaultTimeZone())
}
def millisToDays(millisUtc: Long, timeZone: TimeZone): SQLDate = {
// SPARK-6785: use Math.floorDiv so negative number of days (dates before 1970)
// will correctly work as input for function toJavaDate(Int)
val millisLocal = millisUtc + timeZone.getOffset(millisUtc)
Math.floorDiv(millisLocal, MILLIS_PER_DAY).toInt
}
// reverse of millisToDays
def daysToMillis(days: SQLDate): Long = {
daysToMillis(days, defaultTimeZone())
}
def daysToMillis(days: SQLDate, timeZone: TimeZone): Long = {
val millisLocal = days.toLong * MILLIS_PER_DAY
millisLocal - getOffsetFromLocalMillis(millisLocal, timeZone)
}
// Converts Timestamp to string according to Hive TimestampWritable convention.
def timestampToString(tf: TimestampFormatter, us: SQLTimestamp): String = {
tf.format(us)
}
/**
* Returns the number of days since epoch from java.sql.Date.
*/
def fromJavaDate(date: Date): SQLDate = {
millisToDays(date.getTime)
}
/**
* Returns a java.sql.Date from number of days since epoch.
*/
def toJavaDate(daysSinceEpoch: SQLDate): Date = {
new Date(daysToMillis(daysSinceEpoch))
}
/**
* Returns a java.sql.Timestamp from number of micros since epoch.
*/
def toJavaTimestamp(us: SQLTimestamp): Timestamp = {
// setNanos() will overwrite the millisecond part, so the milliseconds should be
// cut off at seconds
var seconds = us / MICROS_PER_SECOND
var micros = us % MICROS_PER_SECOND
// setNanos() can not accept negative value
if (micros < 0) {
micros += MICROS_PER_SECOND
seconds -= 1
}
val t = new Timestamp(SECONDS.toMillis(seconds))
t.setNanos(MICROSECONDS.toNanos(micros).toInt)
t
}
/**
* Returns the number of micros since epoch from java.sql.Timestamp.
*/
def fromJavaTimestamp(t: Timestamp): SQLTimestamp = {
if (t != null) {
MILLISECONDS.toMicros(t.getTime()) + NANOSECONDS.toMicros(t.getNanos()) % NANOS_PER_MICROS
} else {
0L
}
}
/**
* Returns the number of microseconds since epoch from Julian day
* and nanoseconds in a day
*/
def fromJulianDay(day: Int, nanoseconds: Long): SQLTimestamp = {
// use Long to avoid rounding errors
val seconds = (day - JULIAN_DAY_OF_EPOCH).toLong * SECONDS_PER_DAY
SECONDS.toMicros(seconds) + NANOSECONDS.toMicros(nanoseconds)
}
/**
* Returns Julian day and nanoseconds in a day from the number of microseconds
*
* Note: support timestamp since 4717 BC (without negative nanoseconds, compatible with Hive).
*/
def toJulianDay(us: SQLTimestamp): (Int, Long) = {
val julian_us = us + JULIAN_DAY_OF_EPOCH * MICROS_PER_DAY
val day = julian_us / MICROS_PER_DAY
val micros = julian_us % MICROS_PER_DAY
(day.toInt, MICROSECONDS.toNanos(micros))
}
/*
* Converts the timestamp to milliseconds since epoch. In spark timestamp values have microseconds
* precision, so this conversion is lossy.
*/
def toMillis(us: SQLTimestamp): Long = {
// When the timestamp is negative i.e before 1970, we need to adjust the millseconds portion.
// Example - 1965-01-01 10:11:12.123456 is represented as (-157700927876544) in micro precision.
// In millis precision the above needs to be represented as (-157700927877).
Math.floorDiv(us, MICROS_PER_MILLIS)
}
/*
* Converts milliseconds since epoch to SQLTimestamp.
*/
def fromMillis(millis: Long): SQLTimestamp = {
MILLISECONDS.toMicros(millis)
}
/**
* Trim and parse a given UTF8 date string to the corresponding a corresponding [[Long]] value.
* The return type is [[Option]] in order to distinguish between 0L and null. The following
* formats are allowed:
*
* `yyyy`
* `yyyy-[m]m`
* `yyyy-[m]m-[d]d`
* `yyyy-[m]m-[d]d `
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
*/
def stringToTimestamp(s: UTF8String, timeZone: TimeZone): Option[SQLTimestamp] = {
if (s == null) {
return None
}
var tz: Option[Byte] = None
val segments: Array[Int] = Array[Int](1, 1, 1, 0, 0, 0, 0, 0, 0)
var i = 0
var currentSegmentValue = 0
val bytes = s.trim.getBytes
var j = 0
var digitsMilli = 0
var justTime = false
while (j < bytes.length) {
val b = bytes(j)
val parsedValue = b - '0'.toByte
if (parsedValue < 0 || parsedValue > 9) {
if (j == 0 && b == 'T') {
justTime = true
i += 3
} else if (i < 2) {
if (b == '-') {
if (i == 0 && j != 4) {
// year should have exact four digits
return None
}
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else if (i == 0 && b == ':') {
justTime = true
segments(3) = currentSegmentValue
currentSegmentValue = 0
i = 4
} else {
return None
}
} else if (i == 2) {
if (b == ' ' || b == 'T') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
} else if (i == 3 || i == 4) {
if (b == ':') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
} else if (i == 5 || i == 6) {
if (b == 'Z') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
tz = Some(43)
} else if (b == '-' || b == '+') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
tz = Some(b)
} else if (b == '.' && i == 5) {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
if (i == 6 && b != '.') {
i += 1
}
} else {
if (b == ':' || b == ' ') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
}
} else {
if (i == 6) {
digitsMilli += 1
}
currentSegmentValue = currentSegmentValue * 10 + parsedValue
}
j += 1
}
segments(i) = currentSegmentValue
if (!justTime && i == 0 && j != 4) {
// year should have exact four digits
return None
}
while (digitsMilli < 6) {
segments(6) *= 10
digitsMilli += 1
}
// We are truncating the nanosecond part, which results in loss of precision
while (digitsMilli > 6) {
segments(6) /= 10
digitsMilli -= 1
}
try {
val zoneId = if (tz.isEmpty) {
timeZone.toZoneId
} else {
val sign = if (tz.get.toChar == '-') -1 else 1
ZoneId.ofOffset("GMT", ZoneOffset.ofHoursMinutes(sign * segments(7), sign * segments(8)))
}
val nanoseconds = MICROSECONDS.toNanos(segments(6))
val localTime = LocalTime.of(segments(3), segments(4), segments(5), nanoseconds.toInt)
val localDate = if (justTime) {
LocalDate.now(zoneId)
} else {
LocalDate.of(segments(0), segments(1), segments(2))
}
val localDateTime = LocalDateTime.of(localDate, localTime)
val zonedDateTime = ZonedDateTime.of(localDateTime, zoneId)
val instant = Instant.from(zonedDateTime)
Some(instantToMicros(instant))
} catch {
case NonFatal(_) => None
}
}
def instantToMicros(instant: Instant): Long = {
val us = Math.multiplyExact(instant.getEpochSecond, MICROS_PER_SECOND)
val result = Math.addExact(us, NANOSECONDS.toMicros(instant.getNano))
result
}
def microsToInstant(us: Long): Instant = {
val secs = Math.floorDiv(us, MICROS_PER_SECOND)
val mos = Math.floorMod(us, MICROS_PER_SECOND)
Instant.ofEpochSecond(secs, mos * NANOS_PER_MICROS)
}
def instantToDays(instant: Instant): Int = {
val seconds = instant.getEpochSecond
val days = Math.floorDiv(seconds, SECONDS_PER_DAY)
days.toInt
}
def localDateToDays(localDate: LocalDate): Int = localDate.toEpochDay.toInt
def daysToLocalDate(days: Int): LocalDate = LocalDate.ofEpochDay(days)
/**
* Trim and parse a given UTF8 date string to a corresponding [[Int]] value.
* The return type is [[Option]] in order to distinguish between 0 and null. The following
* formats are allowed:
*
* `yyyy`
* `yyyy-[m]m`
* `yyyy-[m]m-[d]d`
* `yyyy-[m]m-[d]d `
* `yyyy-[m]m-[d]d *`
* `yyyy-[m]m-[d]dT*`
*/
def stringToDate(s: UTF8String): Option[SQLDate] = {
if (s == null) {
return None
}
val segments: Array[Int] = Array[Int](1, 1, 1)
var i = 0
var currentSegmentValue = 0
val bytes = s.trim.getBytes
var j = 0
while (j < bytes.length && (i < 3 && !(bytes(j) == ' ' || bytes(j) == 'T'))) {
val b = bytes(j)
if (i < 2 && b == '-') {
if (i == 0 && j != 4) {
// year should have exact four digits
return None
}
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
val parsedValue = b - '0'.toByte
if (parsedValue < 0 || parsedValue > 9) {
return None
} else {
currentSegmentValue = currentSegmentValue * 10 + parsedValue
}
}
j += 1
}
if (i == 0 && j != 4) {
// year should have exact four digits
return None
}
segments(i) = currentSegmentValue
try {
val localDate = LocalDate.of(segments(0), segments(1), segments(2))
val instant = localDate.atStartOfDay(TimeZoneUTC.toZoneId).toInstant
Some(instantToDays(instant))
} catch {
case NonFatal(_) => None
}
}
/**
* Returns the microseconds since year zero (-17999) from microseconds since epoch.
*/
private def absoluteMicroSecond(microsec: SQLTimestamp): SQLTimestamp = {
microsec + toYearZero * MICROS_PER_DAY
}
private def localTimestamp(microsec: SQLTimestamp, timeZone: TimeZone): SQLTimestamp = {
val zoneOffsetUs = MILLISECONDS.toMicros(timeZone.getOffset(MICROSECONDS.toMillis(microsec)))
absoluteMicroSecond(microsec) + zoneOffsetUs
}
/**
* Returns the hour value of a given timestamp value. The timestamp is expressed in microseconds.
*/
def getHours(microsec: SQLTimestamp, timeZone: TimeZone): Int = {
(MICROSECONDS.toHours(localTimestamp(microsec, timeZone)) % 24).toInt
}
/**
* Returns the minute value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getMinutes(microsec: SQLTimestamp, timeZone: TimeZone): Int = {
(MICROSECONDS.toMinutes(localTimestamp(microsec, timeZone)) % 60).toInt
}
/**
* Returns the second value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getSeconds(microsec: SQLTimestamp, timeZone: TimeZone): Int = {
(MICROSECONDS.toSeconds(localTimestamp(microsec, timeZone)) % 60).toInt
}
/**
* Returns the 'day in year' value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getDayInYear(date: SQLDate): Int = {
LocalDate.ofEpochDay(date).getDayOfYear
}
/**
* Returns the year value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getYear(date: SQLDate): Int = {
LocalDate.ofEpochDay(date).getYear
}
/**
* Returns the quarter for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getQuarter(date: SQLDate): Int = {
LocalDate.ofEpochDay(date).get(IsoFields.QUARTER_OF_YEAR)
}
/**
* Split date (expressed in days since 1.1.1970) into four fields:
* year, month (Jan is Month 1), dayInMonth, daysToMonthEnd (0 if it's last day of month).
*/
def splitDate(date: SQLDate): (Int, Int, Int, Int) = {
val ld = LocalDate.ofEpochDay(date)
(ld.getYear, ld.getMonthValue, ld.getDayOfMonth, ld.lengthOfMonth() - ld.getDayOfMonth)
}
/**
* Returns the month value for the given date. The date is expressed in days
* since 1.1.1970. January is month 1.
*/
def getMonth(date: SQLDate): Int = {
LocalDate.ofEpochDay(date).getMonthValue
}
/**
* Returns the 'day of month' value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getDayOfMonth(date: SQLDate): Int = {
LocalDate.ofEpochDay(date).getDayOfMonth
}
/**
* The number of days for each month (not leap year)
*/
private val monthDays = Array(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
/**
* Returns the date value for the first day of the given month.
* The month is expressed in months since year zero (17999 BC), starting from 0.
*/
private def firstDayOfMonth(absoluteMonth: Int): SQLDate = {
val absoluteYear = absoluteMonth / 12
var monthInYear = absoluteMonth - absoluteYear * 12
var date = getDateFromYear(absoluteYear)
if (monthInYear >= 2 && isLeap(absoluteYear + YearZero)) {
date += 1
}
while (monthInYear > 0) {
date += monthDays(monthInYear - 1)
monthInYear -= 1
}
date
}
/**
* Returns the date value for January 1 of the given year.
* The year is expressed in years since year zero (17999 BC), starting from 0.
*/
private def getDateFromYear(absoluteYear: Int): SQLDate = {
val absoluteDays = (absoluteYear * 365 + absoluteYear / 400 - absoluteYear / 100
+ absoluteYear / 4)
absoluteDays - toYearZero
}
/**
* Add date and year-month interval.
* Returns a date value, expressed in days since 1.1.1970.
*/
def dateAddMonths(days: SQLDate, months: Int): SQLDate = {
val (year, monthInYear, dayOfMonth, daysToMonthEnd) = splitDate(days)
val absoluteMonth = (year - YearZero) * 12 + monthInYear - 1 + months
val nonNegativeMonth = if (absoluteMonth >= 0) absoluteMonth else 0
val currentMonthInYear = nonNegativeMonth % 12
val currentYear = nonNegativeMonth / 12
val leapDay = if (currentMonthInYear == 1 && isLeap(currentYear + YearZero)) 1 else 0
val lastDayOfMonth = monthDays(currentMonthInYear) + leapDay
val currentDayInMonth = if (daysToMonthEnd == 0 || dayOfMonth >= lastDayOfMonth) {
// last day of the month
lastDayOfMonth
} else {
dayOfMonth
}
firstDayOfMonth(nonNegativeMonth) + currentDayInMonth - 1
}
/**
* Add timestamp and full interval.
* Returns a timestamp value, expressed in microseconds since 1.1.1970 00:00:00.
*/
def timestampAddInterval(
start: SQLTimestamp,
months: Int,
microseconds: Long,
timeZone: TimeZone): SQLTimestamp = {
val days = millisToDays(MICROSECONDS.toMillis(start), timeZone)
val newDays = dateAddMonths(days, months)
start +
MILLISECONDS.toMicros(daysToMillis(newDays, timeZone) - daysToMillis(days, timeZone)) +
microseconds
}
/**
* Returns number of months between time1 and time2. time1 and time2 are expressed in
* microseconds since 1.1.1970. If time1 is later than time2, the result is positive.
*
* If time1 and time2 are on the same day of month, or both are the last day of month,
* returns, time of day will be ignored.
*
* Otherwise, the difference is calculated based on 31 days per month.
* The result is rounded to 8 decimal places if `roundOff` is set to true.
*/
def monthsBetween(
time1: SQLTimestamp,
time2: SQLTimestamp,
roundOff: Boolean,
timeZone: TimeZone): Double = {
val millis1 = MICROSECONDS.toMillis(time1)
val millis2 = MICROSECONDS.toMillis(time2)
val date1 = millisToDays(millis1, timeZone)
val date2 = millisToDays(millis2, timeZone)
val (year1, monthInYear1, dayInMonth1, daysToMonthEnd1) = splitDate(date1)
val (year2, monthInYear2, dayInMonth2, daysToMonthEnd2) = splitDate(date2)
val months1 = year1 * 12 + monthInYear1
val months2 = year2 * 12 + monthInYear2
val monthDiff = (months1 - months2).toDouble
if (dayInMonth1 == dayInMonth2 || ((daysToMonthEnd1 == 0) && (daysToMonthEnd2 == 0))) {
return monthDiff
}
// using milliseconds can cause precision loss with more than 8 digits
// we follow Hive's implementation which uses seconds
val secondsInDay1 = MILLISECONDS.toSeconds(millis1 - daysToMillis(date1, timeZone))
val secondsInDay2 = MILLISECONDS.toSeconds(millis2 - daysToMillis(date2, timeZone))
val secondsDiff = (dayInMonth1 - dayInMonth2) * SECONDS_PER_DAY + secondsInDay1 - secondsInDay2
val secondsInMonth = DAYS.toSeconds(31)
val diff = monthDiff + secondsDiff / secondsInMonth.toDouble
if (roundOff) {
// rounding to 8 digits
math.round(diff * 1e8) / 1e8
} else {
diff
}
}
// Thursday = 0 since 1970/Jan/01 => Thursday
private val SUNDAY = 3
private val MONDAY = 4
private val TUESDAY = 5
private val WEDNESDAY = 6
private val THURSDAY = 0
private val FRIDAY = 1
private val SATURDAY = 2
/*
* Returns day of week from String. Starting from Thursday, marked as 0.
* (Because 1970-01-01 is Thursday).
*/
def getDayOfWeekFromString(string: UTF8String): Int = {
val dowString = string.toString.toUpperCase(Locale.ROOT)
dowString match {
case "SU" | "SUN" | "SUNDAY" => SUNDAY
case "MO" | "MON" | "MONDAY" => MONDAY
case "TU" | "TUE" | "TUESDAY" => TUESDAY
case "WE" | "WED" | "WEDNESDAY" => WEDNESDAY
case "TH" | "THU" | "THURSDAY" => THURSDAY
case "FR" | "FRI" | "FRIDAY" => FRIDAY
case "SA" | "SAT" | "SATURDAY" => SATURDAY
case _ => -1
}
}
/**
* Returns the first date which is later than startDate and is of the given dayOfWeek.
* dayOfWeek is an integer ranges in [0, 6], and 0 is Thu, 1 is Fri, etc,.
*/
def getNextDateForDayOfWeek(startDate: SQLDate, dayOfWeek: Int): SQLDate = {
startDate + 1 + ((dayOfWeek - 1 - startDate) % 7 + 7) % 7
}
/**
* Returns last day of the month for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getLastDayOfMonth(date: SQLDate): SQLDate = {
val localDate = LocalDate.ofEpochDay(date)
(date - localDate.getDayOfMonth) + localDate.lengthOfMonth()
}
// Visible for testing.
private[sql] val TRUNC_TO_YEAR = 1
private[sql] val TRUNC_TO_MONTH = 2
private[sql] val TRUNC_TO_QUARTER = 3
private[sql] val TRUNC_TO_WEEK = 4
private[sql] val TRUNC_TO_DAY = 5
private[sql] val TRUNC_TO_HOUR = 6
private[sql] val TRUNC_TO_MINUTE = 7
private[sql] val TRUNC_TO_SECOND = 8
private[sql] val TRUNC_INVALID = -1
/**
* Returns the trunc date from original date and trunc level.
* Trunc level should be generated using `parseTruncLevel()`, should only be 1 or 2.
*/
def truncDate(d: SQLDate, level: Int): SQLDate = {
if (level == TRUNC_TO_YEAR) {
d - DateTimeUtils.getDayInYear(d) + 1
} else if (level == TRUNC_TO_MONTH) {
d - DateTimeUtils.getDayOfMonth(d) + 1
} else {
// caller make sure that this should never be reached
sys.error(s"Invalid trunc level: $level")
}
}
/**
* Returns the trunc date time from original date time and trunc level.
* Trunc level should be generated using `parseTruncLevel()`, should be between 1 and 8
*/
def truncTimestamp(t: SQLTimestamp, level: Int, timeZone: TimeZone): SQLTimestamp = {
var millis = MICROSECONDS.toMillis(t)
val truncated = level match {
case TRUNC_TO_YEAR =>
val dDays = millisToDays(millis, timeZone)
daysToMillis(truncDate(dDays, level), timeZone)
case TRUNC_TO_MONTH =>
val dDays = millisToDays(millis, timeZone)
daysToMillis(truncDate(dDays, level), timeZone)
case TRUNC_TO_DAY =>
val offset = timeZone.getOffset(millis)
millis += offset
millis - millis % MILLIS_PER_DAY - offset
case TRUNC_TO_HOUR =>
val offset = timeZone.getOffset(millis)
millis += offset
millis - millis % MILLIS_PER_HOUR - offset
case TRUNC_TO_MINUTE =>
millis - millis % MILLIS_PER_MINUTE
case TRUNC_TO_SECOND =>
millis - millis % MILLIS_PER_SECOND
case TRUNC_TO_WEEK =>
val dDays = millisToDays(millis, timeZone)
val prevMonday = getNextDateForDayOfWeek(dDays - 7, MONDAY)
daysToMillis(prevMonday, timeZone)
case TRUNC_TO_QUARTER =>
val dDays = millisToDays(millis, timeZone)
val daysOfQuarter = LocalDate.ofEpochDay(dDays)
.`with`(IsoFields.DAY_OF_QUARTER, 1L).toEpochDay.toInt
daysToMillis(daysOfQuarter, timeZone)
case _ =>
// caller make sure that this should never be reached
sys.error(s"Invalid trunc level: $level")
}
truncated * MICROS_PER_MILLIS
}
/**
* Returns the truncate level, could be TRUNC_YEAR, TRUNC_MONTH, TRUNC_TO_DAY, TRUNC_TO_HOUR,
* TRUNC_TO_MINUTE, TRUNC_TO_SECOND, TRUNC_TO_WEEK, TRUNC_TO_QUARTER or TRUNC_INVALID,
* TRUNC_INVALID means unsupported truncate level.
*/
def parseTruncLevel(format: UTF8String): Int = {
if (format == null) {
TRUNC_INVALID
} else {
format.toString.toUpperCase(Locale.ROOT) match {
case "YEAR" | "YYYY" | "YY" => TRUNC_TO_YEAR
case "MON" | "MONTH" | "MM" => TRUNC_TO_MONTH
case "DAY" | "DD" => TRUNC_TO_DAY
case "HOUR" => TRUNC_TO_HOUR
case "MINUTE" => TRUNC_TO_MINUTE
case "SECOND" => TRUNC_TO_SECOND
case "WEEK" => TRUNC_TO_WEEK
case "QUARTER" => TRUNC_TO_QUARTER
case _ => TRUNC_INVALID
}
}
}
/**
* Lookup the offset for given millis seconds since 1970-01-01 00:00:00 in given timezone.
* TODO: Improve handling of normalization differences.
* TODO: Replace with JSR-310 or similar system - see SPARK-16788
*/
private[sql] def getOffsetFromLocalMillis(millisLocal: Long, tz: TimeZone): Long = {
var guess = tz.getRawOffset
// the actual offset should be calculated based on milliseconds in UTC
val offset = tz.getOffset(millisLocal - guess)
if (offset != guess) {
guess = tz.getOffset(millisLocal - offset)
if (guess != offset) {
// fallback to do the reverse lookup using java.time.LocalDateTime
// this should only happen near the start or end of DST
val localDate = LocalDate.ofEpochDay(MILLISECONDS.toDays(millisLocal))
val localTime = LocalTime.ofNanoOfDay(MILLISECONDS.toNanos(
Math.floorMod(millisLocal, MILLIS_PER_DAY)))
val localDateTime = LocalDateTime.of(localDate, localTime)
val millisEpoch = localDateTime.atZone(tz.toZoneId).toInstant.toEpochMilli
guess = (millisLocal - millisEpoch).toInt
}
}
guess
}
/**
* Convert the timestamp `ts` from one timezone to another.
*
* TODO: Because of DST, the conversion between UTC and human time is not exactly one-to-one
* mapping, the conversion here may return wrong result, we should make the timestamp
* timezone-aware.
*/
def convertTz(ts: SQLTimestamp, fromZone: TimeZone, toZone: TimeZone): SQLTimestamp = {
// We always use local timezone to parse or format a timestamp
val localZone = defaultTimeZone()
val utcTs = if (fromZone.getID == localZone.getID) {
ts
} else {
// get the human time using local time zone, that actually is in fromZone.
val localZoneOffsetMs = localZone.getOffset(MICROSECONDS.toMillis(ts))
val localTsUs = ts + MILLISECONDS.toMicros(localZoneOffsetMs) // in fromZone
val offsetFromLocalMs = getOffsetFromLocalMillis(MICROSECONDS.toMillis(localTsUs), fromZone)
localTsUs - MILLISECONDS.toMicros(offsetFromLocalMs)
}
if (toZone.getID == localZone.getID) {
utcTs
} else {
val toZoneOffsetMs = toZone.getOffset(MICROSECONDS.toMillis(utcTs))
val localTsUs = utcTs + MILLISECONDS.toMicros(toZoneOffsetMs) // in toZone
// treat it as local timezone, convert to UTC (we could get the expected human time back)
val offsetFromLocalMs = getOffsetFromLocalMillis(MICROSECONDS.toMillis(localTsUs), localZone)
localTsUs - MILLISECONDS.toMicros(offsetFromLocalMs)
}
}
/**
* Returns a timestamp of given timezone from utc timestamp, with the same string
* representation in their timezone.
*/
def fromUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = {
convertTz(time, TimeZoneGMT, getTimeZone(timeZone))
}
/**
* Returns a utc timestamp from a given timestamp from a given timezone, with the same
* string representation in their timezone.
*/
def toUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = {
convertTz(time, getTimeZone(timeZone), TimeZoneGMT)
}
}
| Aegeaner/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala | Scala | apache-2.0 | 29,068 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.attendance.ws.impl
import java.sql.Date
import org.beangle.commons.lang.Dates.{ now, toDate }
import org.beangle.commons.lang.Strings.{ isNotEmpty, replace, substring }
import org.beangle.commons.logging.Logging
import org.beangle.data.jdbc.query.JdbcExecutor
import org.openurp.edu.attendance.ws.domain.{ SigninData, SigninInfo }
import org.openurp.edu.attendance.ws.domain.AttendTypePolicy
import org.openurp.edu.attendance.ws.domain.ShardPolicy.{ activityTable, detailTable, logTable }
import org.openurp.edu.attendance.ws.model.AttendType
import org.openurp.edu.attendance.ws.domain.DateFormats.{ toCourseTime, toDateStr, toTimeStr }
import org.openurp.edu.attendance.ws.web.util.JsonBuilder
import com.google.gson.JsonObject
/**
* 签到服务
*
* @author chaostone
* @version 1.0, 2014/03/22
* @since 0.0.1
*/
class SigninService extends Logging {
var deviceRegistry: DeviceRegistry = _
var executor: JdbcExecutor = _
var attendTypePolicy: AttendTypePolicy = _
var baseDataService: BaseDataService = _
var daySigninCache: DaySigninCache = _
def signin(data: SigninData): JsonObject = {
var retcode, attendTypeId = 0
// 返回消息,学生班级名称,学生姓名
var retmsg, classname, custname = ""
val signinAt = data.signinAt
val signinOn = toDate(data.signinAt)
val signinTime = toCourseTime(signinAt)
deviceRegistry.get(data.devId) match {
case Some(device) =>
try {
val signinInfoOpt = getSigninInfo(signinOn, device.room.id, signinTime, data.cardId)
if (signinInfoOpt.isEmpty) {
retmsg = "非本课程学生"
custname = data.cardId
log("Wrong place or time {}", data)
} else {
val signinInfo = signinInfoOpt.get
val signId = signinInfo.signinId
custname = signinInfo.stdName
attendTypeId = attendTypePolicy.calcAttendType(signinTime, signinInfo)
if (attendTypeId == 0) {
retmsg = "考勤未开始"
log("Time unsuitable {}", data)
} else {
val operator = substring(data.cardId + "(" + custname + ")", 0, 30)
val updatedAt = now
val rscnt = executor.update("update " + detailTable(signinOn) +
" set attend_type_id=?, signin_at=?, dev_id=?, updated_at=?, operator=? where id=? and signin_at is null", attendTypeId, signinAt, device.id, updatedAt, operator, signId)
retmsg = if (0 == rscnt) "已经签到" else AttendType.names(attendTypeId)
logDB(data, "ok")
}
}
} catch {
case e: Exception => {
retmsg = "未知错误" + e.getMessage()
logger.error("signin erorr:", e)
log("Error " + e.getMessage() + " {}", data)
}
}
case None => {
retmsg = "无法连接,没有对应的教室信息"
log("Invalid device {}", data)
}
}
val rs = new JsonBuilder
if (attendTypeId == 0 && isNotEmpty(retmsg)) retcode = -1
rs.add("retcode", retcode).add("retmsg", retmsg)
rs.add("classname", classname).add("stuempno", data.cardId)
rs.add("custname", custname).add("signindate", toDateStr(signinAt))
rs.add("signintime", toTimeStr(signinAt))
rs.mkJson
}
private def getSigninInfo(signinOn: Date, roomId: Int, signinTime: Int, cardId: String): Option[SigninInfo] = {
var rs = daySigninCache.get(signinOn, roomId, signinTime, cardId)
rs match {
case Some(d) => rs
case None => {
val datas = executor.query("select d.id,xs.xm,aa.attend_begin_time,aa.begin_time,aa.end_time from " + detailTable(signinOn) + " d,xsxx_t xs," + activityTable(signinOn) +
" aa where xs.id=d.std_id and aa.id=d.activity_id and aa.course_date = ? and ? between aa.attend_begin_time and aa.end_time and aa.room_id=? and xs.xh=?", signinOn, signinTime, roomId, cardId)
if (!datas.isEmpty) {
val data = datas.head
val attendBegin = data(2).asInstanceOf[Number].intValue
val begin = data(3).asInstanceOf[Number].intValue
val end = data(4).asInstanceOf[Number].intValue
rs = Some(new SigninInfo(data(0).asInstanceOf[Number].longValue(), data(1).toString, attendBegin, begin, end))
}
}
}
rs
}
private def logDB(data: SigninData, msg: String) {
executor.update("insert into " + logTable(toDate(data.signinAt)) +
"(dev_id,card_id,signin_at,created_at,params,remark) values(?,?,?,?,?,?)", data.devId, data.cardId, data.signinAt, now, data.params, msg)
}
private def log(msg: String, data: SigninData) {
logger.info(replace(msg, "{}", data.toString))
logDB(data, replace(msg, "{}", ""))
}
} | openurp/edu-core | attendance/ws/src/main/scala/org/openurp/edu/attendance/ws/impl/SigninService.scala | Scala | gpl-3.0 | 5,536 |
package atari.st.tools
import atari.st.disk.{Disk, Duplicates, DuplicateStatus}
import atari.st.settings.Settings
import atari.st.util.Util
import java.io.{BufferedOutputStream, ByteArrayInputStream, FileOutputStream}
import java.nio.file.{Files, Path}
import scala.collection.mutable
import suiryc.scala.io.{FilesEx, IOStream, PathsEx}
object Deduplicator {
import Core._
case class DeduplicationInfo(
/** Deduplication status */
status: DuplicateStatus.Value,
/** Whether this is an alternative disk */
alternative: Boolean = false,
/** Whether this is an alternative boot sector */
alternativeBootSector: Boolean = false,
/** Original disk, if known */
original: Option[Disk] = None,
/** By-name duplicates checksums that would be kept if present */
wouldKeep: Set[String] = Set.empty,
/** By-name duplicates that are actually kept */
keptChecksums: Set[String] = Set.empty,
/** By-name duplicates that are actually dropped */
droppedChecksums: Set[String] = Set.empty,
/** Unsure by-name duplicates */
unsure: List[Disk] = Nil
) {
/* Various cases:
* - !alternative && !alternativeBootSector: preferred disk
* => move to target
* - alternative && !alternativeBootSector: alternative disk
* => move to target 'alternatives' if not already in an 'alternative' folder
* - !alternative && alternativeBootSector: preferred (boot sector) disk
* => move to target
* - alternative && alternativeBootSector: alternative (boot sector) disk
* => move to target 'alternatives.boot-sector' if allowed and not already in an 'alternative' folder
* => save boot sector to target as 'diskname.alternative.bs' if requested
* Target is the 'preferred' folder if disk is kept, 'others' if dropped.
* If status is unsure, disk is not moved.
*/
def isUnsure: Boolean =
status == DuplicateStatus.unsure
}
/* Already decided duplicates (by checksum). */
val decided: mutable.Map[String, DeduplicationInfo] = mutable.Map()
def deduplicate(inspect: Boolean) {
findDuplicates()
diskChecksums.toList sortBy(_._2.preferred.info.normalizedName) foreach { tuple =>
//val checksum = tuple._1
val duplicates = tuple._2
val preferred = duplicates.preferred
val dedupInfo = decideDuplicates(duplicates)
/* If status is not 'unsure', then there is no unsure duplicates.
* If there are unsure duplicates, then the status is 'unsure'.
*/
checkFormat(preferred.info, dedupInfo.isUnsure)
for (dup <- duplicates.others ::: duplicates.excluded)
checkFormat(dup.info, dedupInfo.isUnsure)
if (!inspect || (dedupInfo.status != DuplicateStatus.keep) ||
(options.showDuplicates && (duplicates.others.nonEmpty || duplicates.excluded.nonEmpty)) ||
(options.showUnique && duplicates.others.isEmpty && duplicates.excluded.isEmpty))
{
if (duplicates.others.isEmpty && duplicates.excluded.isEmpty && (dedupInfo.status == DuplicateStatus.keep))
println(s"Name: ${preferred.info.normalizedName}; Image: ${preferred.info}")
else {
println(s"Name: ${preferred.info.normalizedName}")
println(s" Preferred: ${preferred.info}")
}
if (duplicates.others.nonEmpty)
println(s" Duplicates: ${duplicates.others.map(_.info.path)}")
if (duplicates.excluded.nonEmpty)
println(s" Excluded (for richer disk type): ${duplicates.excluded.map(_.info.path)}")
dedupInfo.status match {
case DuplicateStatus.keep =>
val swith = if (dedupInfo.keptChecksums.nonEmpty) s" with[${dedupInfo.keptChecksums}]" else ""
val sagainst = if (dedupInfo.droppedChecksums.nonEmpty) s" unlike[${dedupInfo.droppedChecksums}]" else ""
val salternative = if (dedupInfo.alternative) "alternative" else "preferred"
if (dedupInfo.alternative || dedupInfo.alternativeBootSector)
dedupInfo.original foreach { disk =>
if (!(disk eq preferred))
println(s" Original: ${disk.info}")
}
if (dedupInfo.keptChecksums.nonEmpty || dedupInfo.droppedChecksums.nonEmpty || dedupInfo.alternative)
println(s" Duplicate by name kept ($salternative)$swith$sagainst")
if (dedupInfo.alternativeBootSector) {
if (!dedupInfo.alternative)
println(" Duplicate with preferred boot sector kept")
else
println(" Duplicate with alternative boot sector kept")
}
if (!inspect)
moveDuplicates(duplicates, dedupInfo)
case DuplicateStatus.drop =>
/* Sanity check: if we are configured to be dropped, but there is no
* associated disk kept, then actually keep this disk.
*/
val actuallyKeep =
dedupInfo.keptChecksums.isEmpty && !dedupInfo.wouldKeep.exists(diskChecksums.contains)
if (actuallyKeep)
println(s" Duplicate by name should be dropped, but actually kept due to missing preferred checksums[${dedupInfo.wouldKeep}]")
else if (dedupInfo.keptChecksums.isEmpty)
println(s" Duplicate by name dropped in favor of other checksums (with different disk name): ${dedupInfo.wouldKeep.find(diskChecksums.contains)}")
else
println(s" Duplicate by name dropped in favor of other checksums: ${dedupInfo.keptChecksums}")
if (!inspect) {
val actualDedupInfo = if (actuallyKeep) dedupInfo.copy(status = DuplicateStatus.keep) else dedupInfo
moveDuplicates(duplicates, actualDedupInfo)
}
case DuplicateStatus.unsure =>
println(s" No de-duplication due to unsure duplicates by name (but not by checksum): ${dedupInfo.unsure.map(_.info)}")
}
}
}
}
def decideDuplicates(duplicates: Duplicates): DeduplicationInfo = {
val checksum = duplicates.preferred.info.checksum
/* For each decision step, if there are unsure duplicates, our status is
* actually unsure.
*/
/* Note: scala compiler does not seem to like 'foreach' inside 'map' etc. */
def decide() {
if (!decided.contains(checksum)) {
/* If duplicates by name are allowed, there is no unsure duplicate and
* we keep this disk. */
if (options.allowByName)
decided(checksum) = DeduplicationInfo(DuplicateStatus.keep)
else {
val disks = duplicates.disks
val names = disks.map(_.info.normalizedName).toSet
val checksums = disks.map(_.info.checksum).toSet
/* Get duplicates by name with different checksums. */
val unsureChecksums =
names.flatMap(name =>
diskNames(name).map(_.info.checksum)
) -- checksums
val unsure =
unsureChecksums.toList.map(diskChecksums(_).preferred)
if (unsure.nonEmpty)
decideByChecksum(DeduplicationInfo(DuplicateStatus.unsure, unsure = unsure))
else
decided(checksum) = DeduplicationInfo(DuplicateStatus.keep)
}
}
/* else: already decided */
}
def decideByChecksum(dedupInfo: DeduplicationInfo) {
/* Check if at least one checksum from the duplicates is known */
val checksums = checksum ::
(dedupInfo.unsure.map(_.info.checksum).toSet - checksum).toList
checksums.toStream.map(Settings.core.duplicatesByName.get).collect {
case Some(v) => v
}.headOption match {
case None =>
decideByChecksum2(dedupInfo)
case Some(dupsByName) =>
val disks = duplicates.preferred :: dedupInfo.unsure
val (knownChecksums, unknownChecksums) =
disks partition { disk =>
dupsByName.status(disk.info.checksum) != DuplicateStatus.unsure
}
val (sameImages, otherImages) =
unknownChecksums partition { disk =>
/* Check images that actually are duplicates (only serial number
* differs) of known disks. */
if (!options.duplicateBootSectorAllow) false
else dupsByName.known exists { knownChecksum =>
diskChecksums.get(knownChecksum).exists { knownDuplicates =>
(knownDuplicates.preferred.info.checksum2 == disk.info.checksum2) &&
(knownDuplicates.preferred.info.bootSector.checksum == disk.info.bootSector.checksum)
}
}
}
val (unsureAlternatives, unsure) =
if (options.duplicateAllowAlternatives)
otherImages.partition(folderIsAlternative)
else
(Nil, otherImages)
if (unsure.nonEmpty) {
val diskDedupInfo = dedupInfo.copy(unsure = unsure)
disks foreach { disk =>
val diskChecksum = disk.info.checksum
decided(diskChecksum) = diskDedupInfo
}
}
else {
val checksums = disks.map(_.info.checksum).toSet
val keptChecksums = dupsByName.kept ++ unsureAlternatives.map(_.info.checksum)
val droppedChecksums = dupsByName.dropped
val original = diskChecksums.get(dupsByName.preferred) map(_.preferred)
/* We got 3 list of disks:
* 1. Known checksums: decided by configuration
*/
knownChecksums foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = dupsByName.status(diskChecksum),
alternative = dupsByName.alternative(diskChecksum),
original = original,
wouldKeep = dupsByName.kept,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum) & checksums,
droppedChecksums = droppedChecksums.filterNot(_ == diskChecksum) & checksums,
unsure = unsure
)
decided(diskChecksum) = diskDedupInfo
}
/* 2. Images that actually have the same checksum than known ones: we
* drop those in favor of the configured ones.
*/
sameImages foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = DuplicateStatus.drop,
wouldKeep = dupsByName.kept,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum) & checksums,
droppedChecksums = droppedChecksums.filterNot(_ == diskChecksum) & checksums,
unsure = unsure
)
decided(diskChecksum) = diskDedupInfo
}
/* 3. Images that do not match but are in 'alternative' folders: we
* keep those as alternatives.
*/
unsureAlternatives foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = DuplicateStatus.keep,
alternative = true,
original = original,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum) & checksums,
unsure = unsure
)
decided(diskChecksum) = diskDedupInfo
}
}
}
}
def decideByChecksum2(dedupInfo: DeduplicationInfo) {
val disks = duplicates.preferred :: dedupInfo.unsure
val (sameChecksum, differentChecksum) =
if (options.duplicateBootSectorAllow)
dedupInfo.unsure.partition(_.info.checksum2 == duplicates.preferred.info.checksum2)
else
(Nil, dedupInfo.unsure)
val (unsureAlternatives, unsure) =
if (!options.duplicateBootSectorAllow)
(Nil, dedupInfo.unsure)
else if (options.duplicateAllowAlternatives)
differentChecksum.partition(folderIsAlternative)
else
(Nil, differentChecksum)
if (unsure.nonEmpty) {
if (options.duplicateAllowAlternatives && (unsure.size == 1) && folderIsAlternative(duplicates.preferred)) {
/* We are an 'alternative' and there is actually only one 'preferred'
* disk. */
val duplicatesAll = sortDuplicates(disks, exclude = false)
val keptChecksums = disks.map(_.info.checksum).toSet
val original = Some(duplicatesAll.preferred)
disks foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = DuplicateStatus.keep,
alternative = diskChecksum != duplicatesAll.preferred.info.checksum,
original = original,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum),
unsure = Nil
)
decided(diskChecksum) = diskDedupInfo
}
}
else {
val diskDedupInfo = dedupInfo.copy(unsure = unsure)
disks foreach { disk =>
val diskChecksum = disk.info.checksum
decided(diskChecksum) = diskDedupInfo
}
}
}
else {
/* We need to sort ourself against previously unsure disks to know
* whether we are the preferred one (to keep) or not.
*/
val duplicatesSame =
sortDuplicates(duplicates.preferred :: sameChecksum, exclude = false)
val duplicatesAll = sortDuplicates(disks, exclude = false)
/* Group boot sector alternatives by actual boot sector checksum.
* Only the first of each group is kept.
*/
val bootsectors = duplicatesSame.disks.groupBy(_.info.bootSector.checksum)
val keptChecksums = bootsectors.map(_._2.head.info.checksum).toSet ++ unsureAlternatives.map(_.info.checksum)
val droppedChecksums = bootsectors.flatMap(_._2.tail).map(_.info.checksum).toSet
val original = Some(duplicatesAll.preferred)
if (!(duplicatesSame.preferred eq duplicatesAll.preferred)) {
/* Special case: if one of the 'alternative' images is preferred,
* then it becomes the overall preferred one, and we become 'simple'
* alternatives.
*/
disks foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = if (keptChecksums.contains(diskChecksum)) DuplicateStatus.keep else DuplicateStatus.drop,
alternative = diskChecksum != duplicatesAll.preferred.info.checksum,
original = original,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum),
droppedChecksums = droppedChecksums.filterNot(_ == diskChecksum),
unsure = unsure
)
decided(diskChecksum) = diskDedupInfo
}
}
else {
/* We are an alternative boot sector (preferred or not) if there
* actually are more than one boot sector. */
val alternativeBootSector = bootsectors.size > 1
/* We got 2 lists of disks:
* 1. Images with same content checksum and only different boot
* sectors: we keep the first disk of each different boot sector
* group, and the preferred disk is the preferred boot sector
*/
duplicatesSame.disks foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = if (keptChecksums.contains(diskChecksum)) DuplicateStatus.keep else DuplicateStatus.drop,
alternative = diskChecksum != duplicatesSame.preferred.info.checksum,
alternativeBootSector = alternativeBootSector,
original = original,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum),
droppedChecksums = droppedChecksums.filterNot(_ == diskChecksum),
unsure = unsure
)
decided(diskChecksum) = diskDedupInfo
}
/* 2. Images that do not match but are in 'alternative' folders: we
* keep/drop those as alternatives.
*/
unsureAlternatives foreach { disk =>
val diskChecksum = disk.info.checksum
val diskDedupInfo =
dedupInfo.copy(
status = DuplicateStatus.keep,
alternative = true,
alternativeBootSector = false,
original = original,
keptChecksums = keptChecksums.filterNot(_ == diskChecksum),
droppedChecksums = droppedChecksums.filterNot(_ == diskChecksum),
unsure = unsure
)
decided(diskChecksum) = diskDedupInfo
}
}
}
}
decide()
/* We have now decided */
decided(checksum)
}
def moveDuplicates(duplicates: Duplicates, dedupInfo: DeduplicationInfo) {
def moveDisk(disk: Disk) {
val output = dedupInfo.status match {
case DuplicateStatus.drop =>
options.outputOthers
case DuplicateStatus.keep =>
val canKeep =
!dedupInfo.alternative ||
!dedupInfo.alternativeBootSector ||
options.duplicateBootSectorAlternativeImage.isDefined
if ((disk eq duplicates.preferred) && canKeep) options.outputPreferred
else options.outputOthers
}
def getNominalTarget(path: Path) =
if (!folderIsAlternative(path.getParent)) path
else path.getParent.resolveSibling(path.getFileName)
val targetRelative0 = disk.root.relativize(disk.info.path)
val originalDisk = dedupInfo.original.getOrElse(disk)
/* We may need to move this disk relatively to the original disk */
val originalTargetRelative =
getNominalTarget(originalDisk.root.relativize(originalDisk.info.path))
def getAlternativeTarget(alternativeName: String) =
if (folderIsAlternative(targetRelative0.getParent))
/* Keep 'alternative' folder relative to original disk */
Option(targetRelative0.getParent.getParent).map { grandparent =>
originalTargetRelative.resolveSibling(grandparent.relativize(targetRelative0))
}.getOrElse(originalTargetRelative.resolveSibling(targetRelative0))
else
/* Create 'alternative' boot sector folder relatively to original disk */
originalTargetRelative.resolveSibling(alternativeName).resolve(targetRelative0.getFileName)
val targetRelative =
if (dedupInfo.alternative) {
if (dedupInfo.alternativeBootSector && options.duplicateBootSectorAlternativeImage.isDefined)
getAlternativeTarget(s"${Settings.core.outputRelativeAlternatives}.${options.duplicateBootSectorAlternativeImage.get}")
else
getAlternativeTarget(Settings.core.outputRelativeAlternatives)
}
else getNominalTarget(targetRelative0)
val target = Util.findTarget(output.resolve(targetRelative))
if (!options.dryRun) {
target.getParent.toFile.mkdirs()
Files.move(disk.info.path, target)
}
if (options.verbose > 1)
println(s"Moved ${disk.info.path} to $target")
if (dedupInfo.alternativeBootSector && dedupInfo.alternative)
options.duplicateBootSectorAlternativeSector foreach { suffix =>
val bsName = s"${PathsEx.atomicName(originalDisk.info.path)}.$suffix"
val target = output.resolve(originalTargetRelative.resolveSibling(bsName))
if (!options.dryRun) {
target.getParent.toFile.mkdirs()
val input = new ByteArrayInputStream(disk.info.bootSector.data)
val output = new BufferedOutputStream(new FileOutputStream(target.toFile))
IOStream.transfer(input, output)
output.flush()
output.close()
FilesEx.setTimes(target, disk.info.times)
}
if (options.verbose > 1)
println(s"Saved ${disk.info.path} boot sector to $target")
}
if (!options.dryRun) {
def delete(path: Path) {
if ((path.compareTo(disk.root) != 0) && path.toFile.delete)
delete(path.getParent)
}
delete(disk.info.path.getParent)
}
}
duplicates.disks.foreach(moveDisk)
}
}
| suiryc/atari-st-tools | src/main/scala/atari/st/tools/Deduplicator.scala | Scala | gpl-3.0 | 20,768 |
/*
* Copyright (c) 2011-2015 EPFL DATA Laboratory
* Copyright (c) 2014-2015 The Squall Collaboration (see NOTICE)
*
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ch.epfl.data.squall.test
import java.util.ArrayList
import ch.epfl.data.squall.utilities.SquallContext
import ch.epfl.data.squall.examples.imperative.shj.HyracksPlan
class HyracksTest extends TestSuite {
val context = new SquallContext();
test("0_01G_hyracks") {
expect(List(3706, 3007, 2536, 2772, 2979)) {
Logging.beginLog("hyracks")
context.setLocal()
val plan = new HyracksPlan("test/data/tpch", ".tbl", context.getConfiguration()).getQueryPlan()
val result = context.submitLocalAndWait("hyracks", plan)
Logging.endLog()
List("BUILDING", "FURNITURE", "MACHINERY", "HOUSEHOLD", "AUTOMOBILE") map
{ result.access(_).get(0) }
}
}
// test("0_01G_hyracks") {
// val query = "0_01G_hyracks"
// val result = runQuery(query)
// assert(result.equals(expectedResultFor(result, query)))
// }
// test("0_01G_hyracks_l3_batch") {
// val query = "0_01G_hyracks_l3_batch"
// val result = runQuery(query)
// assert(result.equals(expectedResultFor(result, query)))
// }
// test("0_01G_hyracks_pre_agg") {
// val query = "0_01G_hyracks_pre_agg"
// val result = runQuery(query)
// assert(result.equals(expectedResultFor(result, query)))
// }
// 0_01G_scalahyracks
}
| epfldata/squall | squall-core/src/test/scala/HyracksTest.scala | Scala | apache-2.0 | 1,977 |
package com.thinkbiganalytics.spark.dataprofiler.function
import com.thinkbiganalytics.spark.dataprofiler.ProfilerConfiguration
import com.thinkbiganalytics.spark.dataprofiler.model.StandardStatisticsModel
import org.apache.spark.sql.types.StructField
/** Creates a statistics model from RDD values.
*
* @param schemaMap the schema map
*/
class PartitionLevelModels(val schemaMap: Map[Int, StructField], val profilerConfiguration: ProfilerConfiguration) extends (Iterator[((Int, Any), Int)] => Iterator[StandardStatisticsModel])
with Serializable {
override def apply(iter: Iterator[((Int, Any), Int)]): Iterator[StandardStatisticsModel] = {
val statisticsModel = new StandardStatisticsModel(profilerConfiguration)
for ((k, v) <- iter) {
statisticsModel.add(k._1, k._2, v.toLong, schemaMap(k._1))
}
Iterator.apply(statisticsModel)
}
}
| peter-gergely-horvath/kylo | integrations/spark/spark-job-profiler/spark-job-profiler-core/src/main/scala/com/thinkbiganalytics/spark/dataprofiler/function/PartitionLevelModels.scala | Scala | apache-2.0 | 902 |
package controllers.sitedata.modality.th
import javax.inject._
import play.api._
import play.api.mvc._
import play.api.data.Form
import play.api.data.Forms._
import play.api.data._
import models.sitedata.SiteInfo
//import models.sitedata.SiteInfoDetail
import models.sitedata.joined.SiteInfoAll
//import models.sitedata.joined.EquipmentInfo
//import models.sitedata.joined.ZoneInfo
import play.api.i18n.Messages
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import services.sitedata.ISiteInfoService
import services.sitedata.joined.ISiteInfoAllService
//import services.sitedata.ISiteInfoDetailService
//import services.sitedata.joined.IEquipmentInfoService
//import services.sitedata.joined.IZoneInfoService
import play.Application
import utils.Awaits
import utils._
import play.api.libs.iteratee.Enumerator
import reports.ReportBuilder
import play.api.Configuration
@Singleton
class SiteInfoController @Inject() (
val messagesApi: MessagesApi,
val applicationconf: Configuration,
val service_siteinfoall: ISiteInfoAllService,
val service_siteinfo: ISiteInfoService
// val service_siteinfodetail: ISiteInfoDetailService,
// val service_zone: IZoneService,
// val service_subzone: ISubZoneService,
// val service_equipmentinfo: IEquipmentInfoService,
// val service_zoneinfo: IZoneInfoService
) extends Controller with I18nSupport {
val siteinfoForm: Form[SiteInfo] = Form(
mapping(
"siteid" -> text,
"sitename" -> text,
"subzoneid" -> longNumber,
"equipmentmodelid" -> longNumber,
"supplement" -> optional(text),
"spipaddress" -> optional(text),
"equipmentipaddress" -> optional(text),
"routeipaddress" -> optional(text),
"note" -> optional(text),
"axedasite" -> boolean
)(models.sitedata.SiteInfo.apply)(models.sitedata.SiteInfo.unapply))
def index = Action { implicit request =>
Logger.info("/th -> SiteInfoController index called.")
val im: IndexedModality = ITH
val siteinfo = Awaits.get(5, service_siteinfo.findByModalityId(im.index)).getOrElse(Seq())
// Logger.info("return th site info are " + siteinfo)
Ok(views.html.sitedata.siteinfo.th.index(siteinfo))
}
def blank = Action { implicit request =>
Logger.info("blank called. ")
Ok(views.html.sitedata.siteinfo.create("", siteinfoForm))
}
def details(siteid: String) = Action { implicit request =>
Logger.info("details called. siteid: " + siteid)
val siteinfo = Awaits.get(5, service_siteinfoall.findBySiteId(siteid)).get
Logger.info("return site info are " + siteinfo)
// val siteinfo = Awaits.get(5, service_siteinfo.findById(siteid)).get
// val siteinfodetail = Awaits.get(5, service_siteinfodetail.findById(siteid)).get
// val zoneinfo = Awaits.get(5, service_zoneinfo.findBySubZoneId(siteinfo.subzoneid)).get
// Logger.info("return zone info are " + zoneinfo)
// val equipmentinfo = Awaits.get(5, service_equipmentinfo.findByEquipmentModelId(siteinfo.equipmentmodelid)).get
// Logger.info("return equipment info are " + equipmentinfo)
Ok(views.html.sitedata.siteinfo.details(siteinfo))
// siteinfodetail,
// zoneinfo,
// equipmentinfo))
}
def insert()= Action { implicit request =>
Logger.info("insert called.")
siteinfoForm.bindFromRequest.fold(
form => {
BadRequest(views.html.sitedata.siteinfo.insert("", form))
},
siteinfo => {
service_siteinfo.insert(siteinfo)
Redirect(controllers.sitedata.modality.th.routes.SiteInfoController.index)
.flashing("success" -> Messages("success.insert", "new siteinfo created"))
})
}
def update(siteid: String) = Action { implicit request =>
Logger.info("updated called. id: " + siteid)
siteinfoForm.bindFromRequest.fold(
form => {
Ok(views.html.sitedata.siteinfo.update("", form))
.flashing("error" -> "Fix the errors!")
},
siteinfo => {
service_siteinfo.update(siteid, siteinfo)
Redirect(controllers.sitedata.modality.th.routes.SiteInfoController.index)
.flashing("success" -> Messages("success.update", siteinfo.sitename))
})
}
def remove(siteid: String)= Action {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
val result = Awaits.get(5, service_siteinfo.findById(siteid))
result.map { siteinfo =>
service_siteinfo.remove(siteid)
Redirect(controllers.sitedata.modality.th.routes.SiteInfoController.index)
.flashing("success" -> Messages("success.delete", siteinfo.sitename))
}.getOrElse(NotFound)
}
def report() = Action {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
val url = applicationconf.getString("slick.dbs.SiteData.db.url").getOrElse("None")
Ok.chunked( Enumerator.fromStream( ReportBuilder.toPdf("SiteInfo_TH.jrxml", url) ) )
.withHeaders(CONTENT_TYPE -> "application/octet-stream")
.withHeaders(CONTENT_DISPOSITION -> "attachment; filename=report-siteinfo_th.pdf"
)
}
}
| tnddn/iv-web | portal/rest-portal/app/controllers/sitedata/modality/th/SiteInfoController.scala | Scala | apache-2.0 | 5,287 |
//package n_queens
import scala.math.abs
object nQueens {
case class Board(size: Int, occupied: List[Int])
def checkDiag(board: Board, pos: Int): Boolean = {
val new_row = board.occupied.length
def diagAttack(row: Int, col: Int): Boolean = {
if(abs(row - new_row) == abs(pos - col))
return true
else
return false
}
val f = ((x:(Int, Int)) => !diagAttack(x._2, x._1))
val positions = board.occupied.zipWithIndex
positions forall f
}
def getAllPos(board: Board):List[Int] = {
val pred = ((x: Int) => checkDiag(board, x) && !board.occupied.contains(x))
(0 until board.size).toList filter pred
}
def getSolutions(board: Board): Iterator[Board] = {
if(board.occupied.length == board.size) {
return Iterator(board)
}
else {
return getAllPos(board) match {
case Nil => Iterator[Board]()
case y => y.toIterator flatMap ((x: Int) => getSolutions(Board(board.size, board.occupied:::List(x))))
}
}
}
def pretty_print(board: Board) {
board.occupied foreach ((x: Int) => print(x.toString + " "))
print("\n")
}
def main(args: Array[String]) {
val size = args(0).toInt
val board = Board(size, List())
getSolutions(board).foreach(pretty_print)
}
}
| Bolt64/my_code | scala/n_queens.scala | Scala | mit | 1,301 |
package org.rz.fps.week2.lecture2
/**
* Currying exercise no 1.
*
* @author github.com/rozasdev
*/
object Exercise1 {
/**
* Performs the calculation of a function f for a given range of Integers (a to b).
*
* @param f The calculation function.
* @param op The operation between non base case functions.
* @param a The initial point of the range.
* @param b The ending point of the range.
* @param zero The base case for the function f.
* @return The product.
*/
def mapReduce(f: Int => Int, op: (Int, Int) => Int, zero: Int)(a: Int, b: Int): Int =
if (a > b) zero else op(f(a), mapReduce(f, op, zero)(a + 1, b))
/**
* Factorial function in terms of product function.
*
* @param a The number whose factorial will be calculated.
* @return The factorial value for a.
*/
def factorial(a: Int): Int = mapReduce(x => x, (x, y) => x * y, 1)(1, a)
/**
* Main function.
* @param args Array of arguments.
*/
def main(args: Array[String]): Unit = {
println(mapReduce(x => x * x, (x, y) => x * y, 1)(3, 4))
println(mapReduce(x => x, (x, y) => x + y, 0)(3, 4))
println(factorial(5))
}
}
| rozasdev/FPS | code/Week2/src/org/rz/fps/week2/lecture2/Exercise1.scala | Scala | apache-2.0 | 1,185 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.orc.utils
import java.sql.Timestamp
import org.locationtech.jts.geom.{Geometry, Point}
import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument, SearchArgumentFactory}
import org.apache.orc.TypeDescription
import org.locationtech.geomesa.filter.{Bounds, FilterHelper}
import org.locationtech.geomesa.fs.storage.orc.OrcFileSystemStorage
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
object OrcSearchArguments {
/**
* Creates a push-down predicate for Orc files based on a CQL filter
*
* @param sft simple feature type
* @param description orc file description
* @param filter cql filter
* @return
*/
def apply(sft: SimpleFeatureType,
description: TypeDescription,
filter: Filter): Option[(SearchArgument, Array[String])] = {
import org.apache.orc.TypeDescription.Category._
val predicates = FilterHelper.propertyNames(filter, sft).distinct.flatMap { prop =>
val binding = sft.getDescriptor(prop).getType.getBinding
val preds = if (classOf[Geometry].isAssignableFrom(binding)) {
if (classOf[Point] == binding) {
FilterHelper.extractGeometries(filter, prop).values.map(addPoint(prop, _))
} else {
// orc doesn't support push-down predicates against complex fields
Seq.empty
}
} else {
val index = sft.indexOf(prop)
// count any geom fields before the property, they take up two columns
val offset = {
var i = 0
var geoms = 0
while (i < index) {
if (classOf[Geometry].isAssignableFrom(sft.getDescriptor(i).getType.getBinding)) {
geoms += 1
}
i += 1
}
geoms
}
val category = description.getChildren.get(index + offset).getCategory
val typeAndConversion = category match {
case BOOLEAN => Some(PredicateLeaf.Type.BOOLEAN, (v: Any) => v)
case INT => Some(PredicateLeaf.Type.LONG, (v: Any) => v.asInstanceOf[java.lang.Integer].longValue)
case LONG => Some(PredicateLeaf.Type.LONG, (v: Any) => v)
case FLOAT => Some(PredicateLeaf.Type.FLOAT, (v: Any) => v.asInstanceOf[java.lang.Float].doubleValue)
case DOUBLE => Some(PredicateLeaf.Type.FLOAT, (v: Any) => v)
case STRING => Some(PredicateLeaf.Type.STRING, (v: Any) => v)
case TIMESTAMP => Some(PredicateLeaf.Type.TIMESTAMP, (v: Any) => new Timestamp(v.asInstanceOf[java.util.Date].getTime))
case BINARY | LIST | MAP => None // orc doesn't support push-down predicates against complex fields
case BYTE | CHAR | SHORT | DATE | DECIMAL | VARCHAR | STRUCT =>
throw new IllegalArgumentException(s"SimpleFeature TypeDefinition should not have type '$category'")
}
typeAndConversion.toSeq.flatMap { case (typ, conversion) =>
FilterHelper.extractAttributeBounds(filter, prop, binding).values.flatMap(add(prop, _, typ, conversion))
}
}
if (preds.isEmpty) {
Seq.empty
} else {
Seq(preds)
}
}
if (predicates.isEmpty) { None } else {
val arg = SearchArgumentFactory.newBuilder
if (predicates.length > 1) {
arg.startAnd()
}
predicates.foreach { preds =>
if (preds.length == 1) {
preds.head.apply(arg)
} else {
arg.startOr()
preds.foreach(_.apply(arg))
arg.end()
}
}
if (predicates.length > 1) {
arg.end()
}
// note: column name array does not matter at all
Some((arg.build, Array.empty))
}
}
private def add(prop: String,
bounds: Bounds[_],
typ: PredicateLeaf.Type,
convert: (Any) => Any): Option[(SearchArgument.Builder) => Unit] = {
if (bounds.isRange) {
if (bounds.isBoundedBothSides) {
// between seems to be endpoint inclusive, so should not have any false negatives regardless of bounds inclusiveness
Some((arg) => arg.between(prop, typ, convert(bounds.lower.value.get), convert(bounds.upper.value.get)))
} else if (bounds.isBounded) {
if (bounds.upper.value.isDefined) {
if (bounds.upper.inclusive) {
Some((arg) => arg.lessThanEquals(prop, typ, convert(bounds.upper.value.get)))
} else {
Some((arg) => arg.lessThan(prop, typ, convert(bounds.upper.value.get)))
}
} else if (bounds.lower.inclusive) {
Some((arg) => arg.startNot().lessThan(prop, typ, convert(bounds.lower.value.get)).end())
} else {
Some((arg) => arg.startNot().lessThanEquals(prop, typ, convert(bounds.lower.value.get)).end())
}
} else {
None
}
} else {
Some((arg) => arg.equals(prop, typ, convert(bounds.lower.value.get)))
}
}
private def addPoint(prop: String, bounds: Geometry): (SearchArgument.Builder) => Unit = {
val x = OrcFileSystemStorage.geometryXField(prop)
val y = OrcFileSystemStorage.geometryYField(prop)
val envelope = bounds.getEnvelopeInternal
(arg) => {
arg.startAnd()
if (envelope.getMinX == envelope.getMaxX) {
arg.equals(x, PredicateLeaf.Type.FLOAT, envelope.getMinX)
} else {
arg.between(x, PredicateLeaf.Type.FLOAT, envelope.getMinX, envelope.getMaxX)
}
if (envelope.getMinY == envelope.getMaxY) {
arg.equals(y, PredicateLeaf.Type.FLOAT, envelope.getMinY)
} else {
arg.between(y, PredicateLeaf.Type.FLOAT, envelope.getMinY, envelope.getMaxY)
}
arg.end()
}
}
}
| ccri/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-orc/src/main/scala/org/locationtech/geomesa/fs/storage/orc/utils/OrcSearchArguments.scala | Scala | apache-2.0 | 6,224 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.actor.simple
import akka.actor.Actor.Receive
import akka.actor._
import akka.pattern.{InfiniteWaitingPromiseActorRef, _}
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.ref.WeakReference
abstract class SimpleActor extends SimpleActorImpl {
def receive: Receive
def ?(msg: Any, timeout: FiniteDuration): Future[Any] = this.ask(msg)(timeout)
def !?(msg: Any, timeout: FiniteDuration): Any = Await.result(this ? (msg, timeout), timeout)
def !?(msg: Any): Any = {
val a = InfiniteWaitingPromiseActorRef(provider)
this.tell(msg, a)
Await.result(a.result.future, Duration.Inf)
}
}
protected[simple] abstract class SimpleActorImpl extends MinimalActorRef {
override def provider: ActorRefProvider = SimpleActorSystem.impl.provider
override def path: ActorPath = internalActor.path
private[this] var _sender: () => ActorRef = () => SimpleActorSystem.impl.deadLetters
final def sender(): ActorRef = _sender()
def receive: Receive
private def internalReceive: Receive = receive
private def internalUpdateSenderProvider(senderProvider: () => ActorRef): Unit = _sender = senderProvider
private[this] val internalActor = SimpleActorImpl.createInternalActor(WeakReference(this))
override def !(message: Any)(implicit sender: ActorRef): Unit = {
internalActor.tell(message, sender)
}
override def finalize(): Unit = {
// also cleanup real actor
internalActor ! PoisonPill
}
}
object SimpleActorImpl {
// weak reference to avoid circular references and provide possibility to gc SimpleActorImpl
private def createInternalActor(actorImpl: WeakReference[SimpleActorImpl]): ActorRef = {
SimpleActorSystem().actorOf(Props(new Actor {
actorImpl.get.map(_.internalUpdateSenderProvider(() => sender()))
override def receive: Receive = new PartialFunction[Any, Unit] {
// actorImpl.get should at most situation return real reference, orElse handler is used for situation
// when SimpleActorImpl is gced but PoisonPill hasn't been delivered yet
override def isDefinedAt(x: Any): Boolean =
actorImpl.get.map(_.internalReceive.isDefinedAt(x)).getOrElse(true)
override def apply(v: Any): Unit =
actorImpl.get.map(_.internalReceive(v)).getOrElse {
// send to dead letter to point that message hasn't been delivered
SimpleActorSystem.impl.deadLetters ! v
// SimpleActorImpl was gc so we also need a cleanup
context.stop(self)
}
}
}))
}
} | arkadius/akka-simple | src/main/scala/akka/actor/simple/SimpleActor.scala | Scala | apache-2.0 | 3,194 |
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2021 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.persistence.jdbc
import akka.actor.ActorSystem
import akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig, SlickConfiguration }
import akka.persistence.jdbc.query.javadsl.JdbcReadJournal
import akka.persistence.jdbc.util.DropCreate
import akka.persistence.jdbc.db.SlickDatabase
import akka.util.Timeout
import com.typesafe.config.{ Config, ConfigFactory, ConfigValue }
import org.scalatest.BeforeAndAfterEach
import slick.jdbc.JdbcBackend.Database
import scala.concurrent.duration._
abstract class SingleActorSystemPerTestSpec(val config: Config)
extends SimpleSpec
with DropCreate
with BeforeAndAfterEach {
def this(config: String = "postgres-application.conf", configOverrides: Map[String, ConfigValue] = Map.empty) =
this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) =>
conf.withValue(path, configValue)
})
implicit val pc: PatienceConfig = PatienceConfig(timeout = 1.minute)
implicit val timeout: Timeout = Timeout(1.minute)
val cfg = config.getConfig("jdbc-journal")
val journalConfig = new JournalConfig(cfg)
val journalTableName =
if (newDao) journalConfig.eventJournalTableConfiguration.tableName
else journalConfig.journalTableConfiguration.tableName
val tables =
if (newDao)
List(journalConfig.eventTagTableConfiguration.tableName, journalConfig.eventJournalTableConfiguration.tableName)
else List(journalConfig.journalTableConfiguration.tableName)
val profile = if (cfg.hasPath("slick.profile")) {
SlickDatabase.profile(cfg, "slick")
} else SlickDatabase.profile(config, "akka-persistence-jdbc.shared-databases.slick")
val readJournalConfig = new ReadJournalConfig(config.getConfig(JdbcReadJournal.Identifier))
// The db is initialized in the before and after each bocks
var dbOpt: Option[Database] = None
def db: Database = {
dbOpt.getOrElse {
val newDb = if (cfg.hasPath("slick.profile")) {
SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db")
} else
SlickDatabase.database(
config,
new SlickConfiguration(config.getConfig("akka-persistence-jdbc.shared-databases.slick")),
"akka-persistence-jdbc.shared-databases.slick.db")
dbOpt = Some(newDb)
newDb
}
}
def closeDb(): Unit = {
dbOpt.foreach(_.close())
dbOpt = None
}
override protected def afterEach(): Unit = {
super.afterEach()
closeDb()
}
override protected def afterAll(): Unit = {
super.afterAll()
closeDb()
}
def withActorSystem(f: ActorSystem => Unit): Unit = {
implicit val system: ActorSystem = ActorSystem("test", config)
f(system)
system.terminate().futureValue
}
}
| dnvriend/akka-persistence-jdbc | core/src/test/scala/akka/persistence/jdbc/SingleActorSystemPerTestSpec.scala | Scala | apache-2.0 | 2,918 |
package ml.sparkling.graph.operators.measures.vertex.betweenness.flow.struct
import org.apache.spark.graphx.VertexId
/**
* Created by mth on 5/2/17.
*/
class CFBCNeighbourFlow(
val src: VertexId,
val dst: VertexId,
val sumOfPotential: Double,
val sumOfDifferences: Double,
val numberOfFlows: Int,
val allCompleted: Boolean,
val anyCompleted: Boolean) extends Serializable {
val key = (src, dst)
}
object CFBCNeighbourFlow extends Serializable {
def apply(src: VertexId,
dst: VertexId,
sumOfPotential: Double = .0,
sumOfDifferences: Double = .0,
numberOfFlows: Int = 0,
allCompleted: Boolean = true,
anyCompleted: Boolean = true
): CFBCNeighbourFlow = new CFBCNeighbourFlow(src, dst, sumOfPotential, sumOfDifferences, numberOfFlows, allCompleted, anyCompleted)
def apply(key: (VertexId, VertexId)): CFBCNeighbourFlow = key match { case (src, dst) => apply(src, dst) }
def apply(flows: Iterable[CFBCFlow], vertex: CFBCVertex): CFBCNeighbourFlow = {
def aggregatePotential(vertexFlow: CFBCFlow)(acc: NeighbourFlowStats, flow: CFBCFlow) =
NeighbourFlowStats.fromFlow(vertexFlow)(flow).merge(acc)
def mergePotential(acc1: NeighbourFlowStats, acc2: NeighbourFlowStats) = acc1.merge(acc2)
val (src, dst) = flows.headOption.map(_.key) match {
case Some(k) => k
case None => throw new RuntimeException("Empty flows!")
}
val aggregaeFunc = aggregatePotential(vertex.getFlow((src, dst))) _
val stats = flows.aggregate(NeighbourFlowStats.empty)(aggregaeFunc, mergePotential)
CFBCNeighbourFlow(src, dst, stats.potential, stats.sumPotentialDiff, flows.size, stats.allCompleted, stats.anyCompleted)
}
class NeighbourFlowStats( val potential: Double,
val sumPotentialDiff: Double,
val allCompleted: Boolean,
val anyCompleted: Boolean) extends Serializable {
def merge(other: NeighbourFlowStats): NeighbourFlowStats = {
NeighbourFlowStats(
potential + other.potential,
sumPotentialDiff + other.sumPotentialDiff,
allCompleted && other.allCompleted,
anyCompleted || other.anyCompleted)
}
}
object NeighbourFlowStats extends Serializable {
def apply(potential: Double, sumPotentialDiff: Double, allCompleted: Boolean, anyCompleted: Boolean): NeighbourFlowStats =
new NeighbourFlowStats(potential, sumPotentialDiff, allCompleted, anyCompleted)
def fromFlow(vertexFlow: CFBCFlow)(nbflow: CFBCFlow): NeighbourFlowStats =
apply(nbflow.potential, Math.abs(nbflow.potential - vertexFlow.potential), nbflow.completed, nbflow.completed)
def empty = apply(.0, .0, true, false)
}
}
| sparkling-graph/sparkling-graph | operators/src/main/scala/ml/sparkling/graph/operators/measures/vertex/betweenness/flow/struct/CFBCNeighbourFlow.scala | Scala | bsd-2-clause | 2,945 |
package name.abhijitsarkar.scala
import name.abhijitsarkar.scala.GitHubJsonProtocol._
import spray.json._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* @author Abhijit Sarkar
*/
/**
* Q7.6: Write a function that reports recent GitHub commits for a project.
* GitHub provides an RSS feed of recent commits for a given user,
* repository, and branch, containing XML that you can parse out with regular expressions.
* Your function should take the user, repository, and branch, read and parse the RSS feed,
* and then print out the commit information.
* This should include the date, title, and author of each commit.
*
* Ans: Instead of parsing XML using regex, which is a bad idea, I used the `spray-json` library
* and GitHub REST API to get and parse JSON.
*/
trait GitHubClient {
def commitLogs(url: String) = {
val commits = concurrent.Future {
io.Source.fromURL(url)
}
commits.map(_.mkString.parseJson.convertTo[List[CommitLog]])
}
}
object GitHubStubClient extends GitHubClient {
def commitLogs(repos: List[Repo]) = {
val f = super.commitLogs(getClass.getResource("/commits.json").toString)
Future sequence List(f)
}
}
object GitHubLiveClient extends GitHubClient {
def commitLogs(repos: List[Repo]) = {
Future sequence repos.map { r =>
val url = s"https://api.github.com/repos/${r.owner}/${r.name}/commits?sha=${r.branch}"
super.commitLogs(url)
}
}
}
| abhijitsarkar/learning-scala | src/main/scala/name/abhijitsarkar/scala/GitHubClient.scala | Scala | gpl-3.0 | 1,497 |
package org.tejo.model
case class MarkdownValue(str: String) extends AnyVal
sealed trait KontribuEnskribo {
def text: MarkdownValue
def autoro: Persono
}
case class Kontribuo(text: MarkdownValue, autoro: Persono) extends KontribuEnskribo
case class MankoDeKontribuo(autoro: Persono) extends KontribuEnskribo {
override def text = MarkdownValue("Ne kontribuis")
} | tomaszym/izabela | actor/src/main/scala/org/tejo/model/Kontribuo.scala | Scala | gpl-2.0 | 371 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master
import org.apache.spark.util.{Utils, IntParam}
/**
* Command-line parser for the master.
*/
private[spark] class MasterArguments(args: Array[String]) {
var host = Utils.localHostName()
var port = 7077
var webUiPort = 8080
// Check for settings in environment variables
if (System.getenv("SPARK_MASTER_HOST") != null) {
host = System.getenv("SPARK_MASTER_HOST")
}
if (System.getenv("SPARK_MASTER_PORT") != null) {
port = System.getenv("SPARK_MASTER_PORT").toInt
}
if (System.getenv("SPARK_MASTER_WEBUI_PORT") != null) {
webUiPort = System.getenv("SPARK_MASTER_WEBUI_PORT").toInt
}
if (System.getProperty("master.ui.port") != null) {
webUiPort = System.getProperty("master.ui.port").toInt
}
parse(args.toList)
def parse(args: List[String]): Unit = args match {
case ("--ip" | "-i") :: value :: tail =>
Utils.checkHost(value, "ip no longer supported, please use hostname " + value)
host = value
parse(tail)
case ("--host" | "-h") :: value :: tail =>
Utils.checkHost(value, "Please use hostname " + value)
host = value
parse(tail)
case ("--port" | "-p") :: IntParam(value) :: tail =>
port = value
parse(tail)
case "--webui-port" :: IntParam(value) :: tail =>
webUiPort = value
parse(tail)
case ("--help" | "-h") :: tail =>
printUsageAndExit(0)
case Nil => {}
case _ =>
printUsageAndExit(1)
}
/**
* Print usage and exit JVM with the given exit code.
*/
def printUsageAndExit(exitCode: Int) {
System.err.println(
"Usage: Master [options]\\n" +
"\\n" +
"Options:\\n" +
" -i HOST, --ip HOST Hostname to listen on (deprecated, please use --host or -h) \\n" +
" -h HOST, --host HOST Hostname to listen on\\n" +
" -p PORT, --port PORT Port to listen on (default: 7077)\\n" +
" --webui-port PORT Port for web UI (default: 8080)")
System.exit(exitCode)
}
}
| mkolod/incubator-spark | core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala | Scala | apache-2.0 | 2,821 |
package ch18
object ex01 {
class Bug {
var pos = 0
var dir = +1
def move(d: Int) = { pos += d * dir; this }
def show() = { println(pos); this }
def turn() = { dir *= -1; this }
}
def main(args: Array[String]): Unit = {
val bugsy1 = new Bug()
bugsy1.move(4)
bugsy1.show
bugsy1.move(6)
bugsy1.show
bugsy1.turn
bugsy1.move(5)
bugsy1.show
val bugsy = new Bug()
bugsy.move(4).show.move(6).show.turn.move(5).show
}
}
| tuxdna/scala-for-the-impatient-exercises | src/main/scala/ch18/ex01.scala | Scala | apache-2.0 | 484 |
package org.yawni.wordnet
import javax.ws.rs._
import javax.ws.rs.core._
import javax.ws.rs.core.Response.Status._
import org.yawni.wordnet._
import org.yawni.util._
import org.yawni.wordnet.POS._
import org.yawni.wordnet.GlossAndExampleUtils._
import scala.xml._
import scala.collection.JavaConverters._
import java.util.TreeSet // don't want List
/**
* Functions to search WordNet and render results as XML NodeSeqs
*/
object Yawni {
def init() = {
// trigger preload
val wn = WordNet.getInstance
val query = "was";
//System.err.println("query: "+query+" results: "+wn.lookupBaseForms(query, POS.ALL));
//println("query: "+query+" results: "+wn.lookupBaseForms(query, POS.ALL));
}
// group by Word
def query(someString: String): NodeSeq = {
val wn = WordNet.getInstance
var results: NodeSeq = NodeSeq.Empty
for (pos <- List(NOUN, VERB, ADJ, ADV)) {
val noCaseForms = new TreeSet(String.CASE_INSENSITIVE_ORDER)
val forms = wn.lookupBaseForms(someString, pos)
for (form <- forms.asScala) {
if (! noCaseForms.contains(form)) {
// block no case duplicates ("hell"/"Hell", "villa"/"Villa")
noCaseForms.add(form)
val word = wn.lookupWord(form, pos)
if (word != null)
results ++= (wordSummary(word) ++ appendSenses(word) ++ <hr/>)
}
}
}
if (results == NodeSeq.Empty) {
if (someString.trim.length != 0)
results ++= <h4>No results found</h4>
}
//println(results)
results
}
private def wordSummary(word: Word) = {
val synsets = word.getSynsets
val taggedCount = word.getTaggedSenseCount
<span>The <span class="pos">{ word.getPOS.getLabel }</span>
<span class="summaryWord">{ WordCaseUtils.getDominantCasedLemma(word) }</span>
has { synsets.size } sense{ if (synsets.size == 1) "" else "s"} ({
if (taggedCount == 0)
"none"
else
if (taggedCount == synsets.size)
if (taggedCount == 2) "both"
else "all"
else
"first " + taggedCount
}
from tagged texts)</span>
}
private def appendSenses(word: Word) = {
<ol>{
for (synset <- word.getSynsets.asScala)
yield <li>{ render(word, synset.getWordSense(word)) }</li>
}</ol>
}
//private def render(word: Word, wordSense: WordSense) = {
// val verbose = false
// wordSense.getSynset.getLongDescription(verbose)
// <span>
// { "{ " + wordSense.getSynset.map(_.getLemma).mkString(" • ") + " } — " + wordSense.getSynset.getGloss }
// </span>
//}
//private def render(word: Word, wordSense: WordSense) = {
// <div class="synset"> { wordSense.getSynset.map(_.getLemma).mkString(" • ") } </div> ++
// <div class="gloss"> { wordSense.getSynset.getGloss } </div>
//}
private def focalWord(word: Word, wordSense: WordSense) = {
if (word.getLowercasedLemma.equalsIgnoreCase(wordSense.getLemma))
<span class="focalWord">{ wordSense.getLemma }</span>
else
Text(wordSense.getLemma)
}
private def render(word: Word, wordSense: WordSense) = {
val synset = wordSense.getSynset
// <div class="synset"> { synset.map(_.getLemma).mkString(" • ") } </div> ++
val wordSenses = synset.iterator()
val synsetXML = new NodeBuffer
if (wordSenses.hasNext) synsetXML.append(focalWord(word, wordSenses.next))
while (wordSenses.hasNext) {
synsetXML.append(Text(" • "))
synsetXML.append(focalWord(word, wordSenses.next))
}
<div class="synset"> { synsetXML } </div>
<div class="gloss">
<div class="definitions"> { getDefinitionsChunk(synset) } </div>
{ renderExamples(synset) }
</div>
}
private def renderExamples(synset: Synset) = {
val examples = getExamplesChunk(synset)
if (! examples.isEmpty)
<div class="examples"> { examples } </div>
else
NodeSeq.Empty
}
}
| nezda/yawni | rest/src/main/scala/org/yawni/wordnet/Yawni.scala | Scala | apache-2.0 | 3,905 |
import org.scalatest.{Matchers, FunSpec}
/**
* Created by djdool on 7/10/15.
*/
class TemplateDoc_AT extends FunSpec with Matchers {
describe("com.monsanto.arch.cloudformation") {
// if this test fails, update it and the package documentation example
it("should verify that the template in the package documentation compiles"){
import com.monsanto.arch.cloudformation.model._
import com.monsanto.arch.cloudformation.model.resource._
import com.monsanto.arch.cloudformation.model.simple.Builders._
object SimpleVPC extends VPCWriter {
val ownerParameter = StringParameter(
name = "Owner",
Description = Some("Individual responsible for this template"),
MinLength = Some(StringBackedInt(1)),
MaxLength = Some(StringBackedInt(64)),
AllowedPattern = Some("[-_ a-zA-Z0-9]*"),
ConstraintDescription = Some("Can contain only alphanumeric characters, spaces, dashes and underscores.")
)
val keyNameParameter = `AWS::EC2::KeyPair::KeyName_Parameter`(
name = "KeyName",
Description = Some("Name of an existing EC2 KeyPair to enable SSH access to the instances"),
ConstraintDescription = Some("Value must be a valid AWS key pair name in your account.")
)
val allowSSHFromParameter = CidrBlockParameter(
name = "AllowSSHFrom",
Description = Some("The net block (CIDR) that SSH is available to.")
)
val simpleParameters = Seq(
ownerParameter,
keyNameParameter,
allowSSHFromParameter
)
val simpleConditions = Seq(
Condition(
name = "ShouldDisablePassword",
function = `Fn::Equals`(
a = ParameterRef(ownerParameter),
b = StringToken("rms")
)
)
)
val amazonLinuxAMIMapping = Mapping[AMIId](
"AmazonLinuxAMI",
Map(
"us-east-1" -> Map("AMI" -> AMIId("ami-1ecae776")),
"us-west-1" -> Map("AMI" -> AMIId("ami-d114f295")),
"us-west-2" -> Map("AMI" -> AMIId("ami-e7527ed7")),
"eu-west-1" -> Map("AMI" -> AMIId("ami-a10897d6"))
)
)
val simpleMappings = Seq(amazonLinuxAMIMapping)
val simpleResourceAndOutputs = withVpc(CidrBlock(10, 0, 0, 0, 16)) { implicit vpc =>
val (internetGatewayResource, gatewayToInternetResource) = withInternetGateway
val publicRouteTable = withRouteTable("Public", 1)
val publicRouteTableRoute = publicRouteTable.withRoute(
visibility = "Public",
routeTableOrdinal = 1,
routeOrdinal = 1,
internetGatewayResource
)
val gatewayStuff = Template.fromResource(internetGatewayResource) ++
gatewayToInternetResource ++
publicRouteTableRoute
val withinAZ = withAZ("us-east-1a") { implicit az =>
withSubnet("PubSubnet1", CidrBlock(10, 0, 0, 1, 24)) { implicit pubSubnet =>
val bastionName = "bastion"
val bastion = ec2(
name = bastionName,
InstanceType = "t2.micro",
KeyName = ParameterRef(keyNameParameter),
ImageId = `Fn::FindInMap`[AMIId](MappingRef(amazonLinuxAMIMapping), `AWS::Region`, "AMI"),
SecurityGroupIds = Seq(),
Tags = AmazonTag.fromName(bastionName),
UserData = Some(`Fn::Base64`(
`Fn::Join`("",
Seq[Token[String]](
"#!/bin/bash -v\n",
"yum update -y --security\n",
"# EOF\n"
)
)
))
)
val sshToBastion = ParameterRef(allowSSHFromParameter) ->- 22 ->- bastion
Template.fromSecurityGroupRoutable(bastion) ++
bastion.map(_.withEIPInVPC("BastionEIP").andOutput("BastionEIP", "Bastion Host EIP")) ++
Template.collapse(sshToBastion)
}
}
gatewayStuff ++
withinAZ
}
val simpleTemplate = simpleResourceAndOutputs ++
Template(
AWSTemplateFormatVersion = "2010-09-09",
Description = "Simple template",
Parameters = Some(simpleParameters),
Conditions = Some(simpleConditions),
Mappings = Some(simpleMappings),
Resources = None,
Outputs = None
)
writeStaxModule("vpc-simple.json", simpleTemplate)
}
SimpleVPC
}
}
}
| joewing/cloudformation-template-generator | src/test/scala/com/monsanto/arch/cloudformation/model/TemplateDoc_AT.scala | Scala | bsd-3-clause | 4,673 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package symtab
import classfile.{ClassfileParser, ReusableDataReader}
import java.io.IOException
import scala.reflect.internal.MissingRequirementError
import scala.reflect.io.{AbstractFile, NoAbstractFile}
import scala.tools.nsc.util.{ClassPath, ClassRepresentation}
import scala.reflect.internal.util.ReusableInstance
import scala.tools.nsc.Reporting.WarningCategory
/** This class ...
*
* @author Martin Odersky
*/
abstract class SymbolLoaders {
val symbolTable: symtab.SymbolTable {
def settings: Settings
}
val platform: backend.Platform {
val symbolTable: SymbolLoaders.this.symbolTable.type
}
import symbolTable._
/**
* Required by ClassfileParser. Check documentation in that class for details.
*/
def lookupMemberAtTyperPhaseIfPossible(sym: Symbol, name: Name): Symbol
/**
* Should forward to `Run.compileLate`. The more principled fix would be to
* determine why this functionality is needed and extract it into a separate
* interface.
*/
protected def compileLate(srcfile: AbstractFile): Unit
// forwards to runReporting.warning, but we don't have global in scope here
def warning(pos: Position, msg: String, category: WarningCategory, site: String): Unit
protected def enterIfNew(owner: Symbol, member: Symbol, completer: SymbolLoader): Symbol = {
assert(owner.info.decls.lookup(member.name) == NoSymbol, owner.fullName + "." + member.name)
owner.info.decls enter member
member
}
protected def signalError(root: Symbol, ex: Throwable): Unit = {
if (settings.isDebug) ex.printStackTrace()
globalError(ex.getMessage() match {
case null => "i/o error while loading " + root.name
case msg => "error while loading " + root.name + ", " + msg
})
}
def newClass(owner: Symbol, name: String): ClassSymbol = owner.newClass(newTypeName(name))
/** Enter class with given `name` into scope of `root`
* and give them `completer` as type.
*/
def enterClass(owner: Symbol, name: String, completer: SymbolLoader): Symbol =
enterClass(owner, newClass(owner, name), completer)
def enterClass(owner: Symbol, clazz: ClassSymbol, completer: SymbolLoader): Symbol = {
clazz setInfo completer
enterIfNew(owner, clazz, completer)
}
def newModule(owner: Symbol, name: String): ModuleSymbol = owner.newModule(newTermName(name))
/** Enter module with given `name` into scope of `root`
* and give them `completer` as type.
*/
def enterModule(owner: Symbol, name: String, completer: SymbolLoader): Symbol =
enterModule(owner, newModule(owner, name), completer)
def enterModule(owner: Symbol, module: ModuleSymbol, completer: SymbolLoader): Symbol = {
module setInfo completer
module.moduleClass setInfo moduleClassLoader
enterIfNew(owner, module, completer)
}
/** Enter package with given `name` into scope of `root`
* and give them `completer` as type.
*/
def enterPackage(root: Symbol, name: String, completer: SymbolLoader): Symbol = {
val pname = newTermName(name)
val preExisting = root.info.decls lookup pname
if (preExisting != NoSymbol) {
// Some jars (often, obfuscated ones) include a package and
// object with the same name. Rather than render them unusable,
// offer a setting to resolve the conflict one way or the other.
// This was motivated by the desire to use YourKit probes, which
// require yjp.jar at runtime. See scala/bug#2089.
if (settings.termConflict.isDefault)
throw new TypeError(
s"$root contains object and package with same name: $name\\none of them needs to be removed from classpath"
)
else if (settings.termConflict.value == "package") {
warning(
NoPosition,
"Resolving package/object name conflict in favor of package " +
preExisting.fullName + ". The object will be inaccessible.",
WarningCategory.Other,
site = "")
root.info.decls.unlink(preExisting)
}
else {
warning(
NoPosition,
"Resolving package/object name conflict in favor of object " +
preExisting.fullName + ". The package will be inaccessible.",
WarningCategory.Other,
site = "")
return NoSymbol
}
}
// todo: find out initialization sequence for pkg/pkg.moduleClass is different from enterModule
val pkg = root.newPackage(pname)
pkg.moduleClass setInfo completer
pkg setInfo pkg.moduleClass.tpe
root.info.decls enter pkg
pkg
}
/** Enter class and module with given `name` into scope of `root`
* and give them `completer` as type.
*/
def enterClassAndModule(root: Symbol, name: TermName, getCompleter: (ClassSymbol, ModuleSymbol) => SymbolLoader): Unit = {
val clazz0 = root.newClass(name.toTypeName)
val module0 = root.newModule(name)
val completer = getCompleter(clazz0, module0)
// enterClass/Module may return an existing symbol instead of the ones we created above
// this may happen when there's both sources and binaries on the classpath, but the class
// name is different from the file name, so the classpath can't match the binary and source
// representation. `companionModule/Class` prefers the source version, so we should be careful
// to reuse the symbols returned below.
val clazz = enterClass(root, clazz0, completer)
val module = enterModule(root, module0, completer)
if (!clazz.isAnonymousClass) {
// Diagnostic for scala/bug#7147
def msg: String = {
def symLocation(sym: Symbol) = if (sym == null) "null" else s"${clazz.fullLocationString} (from ${clazz.associatedFile})"
sm"""Inconsistent class/module symbol pair for `$name` loaded from ${symLocation(root)}.
|clazz = ${symLocation(clazz)}; clazz.companionModule = ${clazz.companionModule}
|module = ${symLocation(module)}; module.companionClass = ${module.companionClass}"""
}
assert(clazz.companionModule == module, msg)
assert(module.companionClass == clazz, msg)
}
}
/** In batch mode: Enter class and module with given `name` into scope of `root`
* and give them a source completer for given `src` as type.
* In IDE mode: Find all toplevel definitions in `src` and enter then into scope of `root`
* with source completer for given `src` as type.
* (overridden in interactive.Global).
*/
def enterToplevelsFromSource(root: Symbol, name: TermName, src: AbstractFile): Unit = {
enterClassAndModule(root, name, (_, _) => new SourcefileLoader(src))
}
/** The package objects of scala and scala.reflect should always
* be loaded in binary if classfiles are available, even if sourcefiles
* are newer. Late-compiling these objects from source leads to compilation
* order issues.
* Note: We do a name-base comparison here because the method is called before we even
* have ReflectPackage defined.
*/
def binaryOnly(owner: Symbol, name: TermName): Boolean =
name == nme.PACKAGE &&
(owner.fullName == "scala" || owner.fullName == "scala.reflect")
/** Initialize toplevel class and module symbols in `owner` from class path representation `classRep`
*/
def initializeFromClassPath(owner: Symbol, classRep: ClassRepresentation): Unit = {
((classRep.binary, classRep.source) : @unchecked) match {
case (Some(bin), Some(src))
if platform.needCompile(bin, src) && !binaryOnly(owner, nameOf(classRep)) =>
if (settings.verbose) inform("[symloader] picked up newer source file for " + src.path)
enterToplevelsFromSource(owner, nameOf(classRep), src)
case (None, Some(src)) =>
if (settings.verbose) inform("[symloader] no class, picked up source file for " + src.path)
enterToplevelsFromSource(owner, nameOf(classRep), src)
case (Some(bin), _) =>
enterClassAndModule(owner, nameOf(classRep), new ClassfileLoader(bin, _, _))
}
}
private def nameOf(classRep: ClassRepresentation): TermName = {
val name = classRep.name
val nameLength = name.length
if (nameLength <= nameCharBuffer.length) {
name.getChars(0, nameLength, nameCharBuffer, 0)
newTermName(nameCharBuffer, 0, nameLength)
} else {
newTermName(name)
}
}
private val nameCharBuffer = new Array[Char](512)
/**
* A lazy type that completes itself by calling parameter doComplete.
* Any linked modules/classes or module classes are also initialized.
* Todo: consider factoring out behavior from TopClassCompleter/SymbolLoader into
* supertrait SymLoader
*/
abstract class SymbolLoader extends SymLoader {
/** Load source or class file for `root`, return */
protected def doComplete(root: Symbol): Unit
def sourcefile: Option[AbstractFile] = None
def associatedFile(self: Symbol): AbstractFile = NoAbstractFile
/**
* Description of the resource (ClassPath, AbstractFile)
* being processed by this loader
*/
protected def description: String
private var ok = false
private def setSource(sym: Symbol): Unit = {
sourcefile foreach (sf => sym match {
case cls: ClassSymbol => cls.associatedFile = sf
case mod: ModuleSymbol => mod.moduleClass.associatedFile = sf
case _ => ()
})
}
override def complete(root: Symbol): Unit = {
val assocFile = associatedFile(root)
currentRunProfilerBeforeCompletion(root, assocFile)
try {
try {
informingProgress("loaded " + description) {
val currentphase = phase
try doComplete(root)
finally phase = currentphase
}
ok = true
setSource(root)
setSource(root.companionSymbol) // module -> class, class -> module
}
catch {
case ex@(_: IOException | _: MissingRequirementError) =>
ok = false
signalError(root, ex)
}
initRoot(root)
if (!root.isPackageClass) initRoot(root.companionSymbol)
} finally {
currentRunProfilerAfterCompletion(root, assocFile)
}
}
override def load(root: Symbol): Unit = { complete(root) }
private def markAbsent(sym: Symbol): Unit = {
val tpe: Type = if (ok) NoType else ErrorType
if (sym != NoSymbol)
sym setInfo tpe
}
private def initRoot(root: Symbol): Unit = {
if (root.rawInfo == this)
List(root, root.moduleClass) foreach markAbsent
else if (root.isClass && !root.isModuleClass)
root.rawInfo.load(root)
}
}
/**
* Loads contents of a package
*/
class PackageLoader(packageName: String, classPath: ClassPath) extends SymbolLoader with FlagAgnosticCompleter {
protected def description = {
val shownPackageName = if (packageName == ClassPath.RootPackage) "<root package>" else packageName
s"package loader $shownPackageName"
}
protected def doComplete(root: Symbol): Unit = {
assert(root.isPackageClass, root)
root.setInfo(new PackageClassInfoType(newScope, root))
val classPathEntries = classPath.list(packageName)
if (!root.isRoot)
for (entry <- classPathEntries.classesAndSources) initializeFromClassPath(root, entry)
if (!root.isEmptyPackageClass) {
for (pkg <- classPathEntries.packages) {
val fullName = pkg.name
val name =
if (packageName == ClassPath.RootPackage) fullName
else fullName.substring(packageName.length + 1)
val packageLoader = new PackageLoader(fullName, classPath)
enterPackage(root, name, packageLoader)
}
openPackageModule(root)
}
}
}
private lazy val classFileDataReader: ReusableInstance[ReusableDataReader] = ReusableInstance[ReusableDataReader](new ReusableDataReader(), initialSize = 1, enabled = isCompilerUniverse)
class ClassfileLoader(val classfile: AbstractFile, clazz: ClassSymbol, module: ModuleSymbol) extends SymbolLoader with FlagAssigningCompleter {
private object classfileParser extends {
val symbolTable: SymbolLoaders.this.symbolTable.type = SymbolLoaders.this.symbolTable
} with ClassfileParser(classFileDataReader) {
override protected def lookupMemberAtTyperPhaseIfPossible(sym: Symbol, name: Name): Symbol =
SymbolLoaders.this.lookupMemberAtTyperPhaseIfPossible(sym, name)
/*
* The type alias and the cast (where the alias is used) is needed due to problem described
* in scala/bug#7585. In this particular case, the problem is that we need to make sure that symbol
* table used by symbol loaders is exactly the same as they one used by classfileParser.
* If you look at the path-dependent types we have here everything should work out ok but
* due to issue described in scala/bug#7585 type-checker cannot tie the knot here.
*
*/
private type SymbolLoadersRefined = SymbolLoaders { val symbolTable: classfileParser.symbolTable.type }
val loaders = SymbolLoaders.this.asInstanceOf[SymbolLoadersRefined]
override def classPath: ClassPath = platform.classPath
}
protected def description = "class file "+ classfile.toString
protected def doComplete(root: Symbol): Unit = {
val start = if (settings.areStatisticsEnabled) statistics.startTimer(statistics.classReadNanos) else null
classfileParser.parse(classfile, clazz, module)
if (clazz.associatedFile eq NoAbstractFile) clazz.associatedFile = classfile
if (module.associatedFile eq NoAbstractFile) module.associatedFile = classfile
if (settings.areStatisticsEnabled) statistics.stopTimer(statistics.classReadNanos, start)
}
override def sourcefile: Option[AbstractFile] = classfileParser.srcfile
override def associatedFile(self: Symbol): AbstractFile = classfile
}
class SourcefileLoader(val srcfile: AbstractFile) extends SymbolLoader with FlagAssigningCompleter {
protected def description = "source file "+ srcfile.toString
override def fromSource = true
override def sourcefile = Some(srcfile)
override def associatedFile(self: Symbol): AbstractFile = srcfile
protected def doComplete(root: Symbol): Unit = compileLate(srcfile)
}
object moduleClassLoader extends SymbolLoader with FlagAssigningCompleter {
protected def description = "module class loader"
protected def doComplete(root: Symbol): Unit = { root.sourceModule.initialize }
override def associatedFile(self: Symbol): AbstractFile = {
val sourceModule = self.sourceModule
sourceModule.rawInfo match {
case loader: SymbolLoader => loader.associatedFile(sourceModule)
case _ => super.associatedFile(self)
}
}
}
/** used from classfile parser to avoid cycles */
var parentsLevel = 0
var pendingLoadActions: List[() => Unit] = Nil
}
| scala/scala | src/compiler/scala/tools/nsc/symtab/SymbolLoaders.scala | Scala | apache-2.0 | 15,249 |
package play.api.cache.redis.impl
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mutable.Specification
import play.api.cache.redis._
import scala.reflect.ClassTag
class RedisSortedSetSpec(implicit ee: ExecutionEnv) extends Specification with ReducedMockito {
import Implicits._
import RedisCacheImplicits._
import org.mockito.ArgumentMatchers._
"Redis Set" should {
"add" in new MockedSortedSet {
connector.sortedSetAdd(anyString, anyVarArgs) returns 5L
set.add(scoreValue) must beEqualTo(set).await
set.add(scoreValue, otherScoreValue) must beEqualTo(set).await
there were one(connector).sortedSetAdd(key, scoreValue)
there were one(connector).sortedSetAdd(key, scoreValue, otherScoreValue)
}
"add (failing)" in new MockedSortedSet {
connector.sortedSetAdd(anyString, anyVarArgs) returns ex
set.add(scoreValue) must beEqualTo(set).await
there were one(connector).sortedSetAdd(key, scoreValue)
}
"contains (hit)" in new MockedSortedSet {
connector.sortedSetScore(beEq(key), beEq(other)) returns Some(1D)
set.contains(other) must beTrue.await
there was one(connector).sortedSetScore(key, other)
}
"contains (miss)" in new MockedSortedSet {
connector.sortedSetScore(beEq(key), beEq(other)) returns None
set.contains(other) must beFalse.await
there was one(connector).sortedSetScore(key, other)
}
"remove" in new MockedSortedSet {
connector.sortedSetRemove(anyString, anyVarArgs) returns 1L
set.remove(value) must beEqualTo(set).await
set.remove(other, value) must beEqualTo(set).await
there were one(connector).sortedSetRemove(key, value)
there were one(connector).sortedSetRemove(key, other, value)
}
"remove (failing)" in new MockedSortedSet {
connector.sortedSetRemove(anyString, anyVarArgs) returns ex
set.remove(value) must beEqualTo(set).await
there was one(connector).sortedSetRemove(key, value)
}
"range" in new MockedSortedSet {
val data = Seq(value, other)
connector.sortedSetRange[String](anyString, anyLong, anyLong)(anyClassTag) returns data
set.range(1, 5) must beEqualTo(data).await
there was one(connector).sortedSetRange(key, 1, 5)(implicitly[ClassTag[String]])
}
"range (reversed)" in new MockedSortedSet {
val data = Seq(value, other)
connector.sortedSetReverseRange[String](anyString, anyLong, anyLong)(anyClassTag) returns data
set.range(1, 5, isReverse = true) must beEqualTo(data).await
there was one(connector).sortedSetReverseRange(key, 1, 5)(implicitly[ClassTag[String]])
}
"size" in new MockedSortedSet {
connector.sortedSetSize(key) returns 2L
set.size must beEqualTo(2L).await
}
"size (failing)" in new MockedSortedSet {
connector.sortedSetSize(key) returns ex
set.size must beEqualTo(0L).await
}
"empty set" in new MockedSortedSet {
connector.sortedSetSize(beEq(key)) returns 0L
set.isEmpty must beTrue.await
set.nonEmpty must beFalse.await
}
"non-empty set" in new MockedSortedSet {
connector.sortedSetSize(beEq(key)) returns 1L
set.isEmpty must beFalse.await
set.nonEmpty must beTrue.await
}
"empty/non-empty set (failing)" in new MockedSortedSet {
connector.sortedSetSize(beEq(key)) returns ex
set.isEmpty must beTrue.await
set.nonEmpty must beFalse.await
}
}
}
| KarelCemus/play-redis | src/test/scala/play/api/cache/redis/impl/RedisSortedSetSpec.scala | Scala | mpl-2.0 | 3,495 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.spray.serialization
import org.json4s._
import org.json4s.JsonAST.JString
import org.json4s.jackson.Serialization
/**
* @author Michael Cuthbert on 1/13/15.
*/
class EnumerationSerializer(enums: Enumeration*) extends Serializer[Enumeration#Value] {
val EnumerationClass = classOf[Enumeration#Value]
val formats = Serialization.formats(NoTypeHints)
def deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), Enumeration#Value] = {
case (TypeInfo(EnumerationClass, _), json) => json match {
case JString(value) => enums.find(_.values.exists(_.toString == value)).get.withName(value)
case value => throw new MappingException("Can't convert " + value + " to " + EnumerationClass)
}
}
def serialize(implicit format: Formats): PartialFunction[Any, JValue] = {
case i: Enumeration#Value => JString(i.toString)
}
}
| Webtrends/wookiee-spray | src/main/scala/com/webtrends/harness/component/spray/serialization/EnumerationSerializer.scala | Scala | apache-2.0 | 1,657 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.concurrent.locks.ReentrantLock
import kafka.cluster.BrokerEndPoint
import kafka.consumer.PartitionTopicInfo
import kafka.utils.{DelayedItem, Pool, ShutdownableThread}
import kafka.common.{ClientIdAndBroker, KafkaException}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.CoreUtils.inLock
import org.apache.kafka.common.errors.CorruptRecordException
import org.apache.kafka.common.protocol.Errors
import AbstractFetcherThread._
import scala.collection.{Map, Set, mutable}
import scala.collection.JavaConverters._
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import com.yammer.metrics.core.Gauge
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.internals.{FatalExitError, PartitionStates}
import org.apache.kafka.common.record.MemoryRecords
/**
* Abstract class for fetching data from multiple partitions from the same broker.
*/
abstract class AbstractFetcherThread(name: String,
clientId: String,
val sourceBroker: BrokerEndPoint,
fetchBackOffMs: Int = 0,
isInterruptible: Boolean = true)
extends ShutdownableThread(name, isInterruptible) {
type REQ <: FetchRequest
type PD <: PartitionData
private val partitionStates = new PartitionStates[PartitionFetchState]
private val partitionMapLock = new ReentrantLock
private val partitionMapCond = partitionMapLock.newCondition()
private val metricId = new ClientIdAndBroker(clientId, sourceBroker.host, sourceBroker.port)
val fetcherStats = new FetcherStats(metricId)
val fetcherLagStats = new FetcherLagStats(metricId)
/* callbacks to be defined in subclass */
// process fetched data
def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: PD)
// handle a partition whose offset is out of range and return a new fetch offset
def handleOffsetOutOfRange(topicPartition: TopicPartition): Long
// deal with partitions with errors, potentially due to leadership changes
def handlePartitionsWithErrors(partitions: Iterable[TopicPartition])
protected def buildFetchRequest(partitionMap: Seq[(TopicPartition, PartitionFetchState)]): REQ
protected def fetch(fetchRequest: REQ): Seq[(TopicPartition, PD)]
override def shutdown(){
initiateShutdown()
inLock(partitionMapLock) {
partitionMapCond.signalAll()
}
awaitShutdown()
// we don't need the lock since the thread has finished shutdown and metric removal is safe
fetcherStats.unregister()
fetcherLagStats.unregister()
}
override def doWork() {
val fetchRequest = inLock(partitionMapLock) {
val fetchRequest = buildFetchRequest(partitionStates.partitionStates.asScala.map { state =>
state.topicPartition -> state.value
})
if (fetchRequest.isEmpty) {
trace("There are no active partitions. Back off for %d ms before sending a fetch request".format(fetchBackOffMs))
partitionMapCond.await(fetchBackOffMs, TimeUnit.MILLISECONDS)
}
fetchRequest
}
if (!fetchRequest.isEmpty)
processFetchRequest(fetchRequest)
}
private def processFetchRequest(fetchRequest: REQ) {
val partitionsWithError = mutable.Set[TopicPartition]()
def updatePartitionsWithError(partition: TopicPartition): Unit = {
partitionsWithError += partition
partitionStates.moveToEnd(partition)
}
var responseData: Seq[(TopicPartition, PD)] = Seq.empty
try {
trace("Issuing to broker %d of fetch request %s".format(sourceBroker.id, fetchRequest))
responseData = fetch(fetchRequest)
} catch {
case t: Throwable =>
if (isRunning.get) {
warn(s"Error in fetch $fetchRequest", t)
inLock(partitionMapLock) {
partitionStates.partitionSet.asScala.foreach(updatePartitionsWithError)
// there is an error occurred while fetching partitions, sleep a while
// note that `ReplicaFetcherThread.handlePartitionsWithError` will also introduce the same delay for every
// partition with error effectively doubling the delay. It would be good to improve this.
partitionMapCond.await(fetchBackOffMs, TimeUnit.MILLISECONDS)
}
}
}
fetcherStats.requestRate.mark()
if (responseData.nonEmpty) {
// process fetched data
inLock(partitionMapLock) {
responseData.foreach { case (topicPartition, partitionData) =>
val topic = topicPartition.topic
val partitionId = topicPartition.partition
Option(partitionStates.stateValue(topicPartition)).foreach(currentPartitionFetchState =>
// we append to the log if the current offset is defined and it is the same as the offset requested during fetch
if (fetchRequest.offset(topicPartition) == currentPartitionFetchState.offset) {
partitionData.error match {
case Errors.NONE =>
try {
val records = partitionData.toRecords
val newOffset = records.shallowEntries.asScala.lastOption.map(_.nextOffset).getOrElse(
currentPartitionFetchState.offset)
fetcherLagStats.getAndMaybePut(topic, partitionId).lag = Math.max(0L, partitionData.highWatermark - newOffset)
// Once we hand off the partition data to the subclass, we can't mess with it any more in this thread
processPartitionData(topicPartition, currentPartitionFetchState.offset, partitionData)
val validBytes = records.validBytes
if (validBytes > 0) {
// Update partitionStates only if there is no exception during processPartitionData
partitionStates.updateAndMoveToEnd(topicPartition, new PartitionFetchState(newOffset))
fetcherStats.byteRate.mark(validBytes)
}
} catch {
case ime: CorruptRecordException =>
// we log the error and continue. This ensures two things
// 1. If there is a corrupt message in a topic partition, it does not bring the fetcher thread down and cause other topic partition to also lag
// 2. If the message is corrupt due to a transient state in the log (truncation, partial writes can cause this), we simply continue and
// should get fixed in the subsequent fetches
logger.error("Found invalid messages during fetch for partition [" + topic + "," + partitionId + "] offset " + currentPartitionFetchState.offset + " error " + ime.getMessage)
updatePartitionsWithError(topicPartition);
case e: Throwable =>
throw new KafkaException("error processing data for partition [%s,%d] offset %d"
.format(topic, partitionId, currentPartitionFetchState.offset), e)
}
case Errors.OFFSET_OUT_OF_RANGE =>
try {
val newOffset = handleOffsetOutOfRange(topicPartition)
partitionStates.updateAndMoveToEnd(topicPartition, new PartitionFetchState(newOffset))
error("Current offset %d for partition [%s,%d] out of range; reset offset to %d"
.format(currentPartitionFetchState.offset, topic, partitionId, newOffset))
} catch {
case e: FatalExitError => throw e
case e: Throwable =>
error("Error getting offset for partition [%s,%d] to broker %d".format(topic, partitionId, sourceBroker.id), e)
updatePartitionsWithError(topicPartition)
}
case _ =>
if (isRunning.get) {
error("Error for partition [%s,%d] to broker %d:%s".format(topic, partitionId, sourceBroker.id,
partitionData.exception.get))
updatePartitionsWithError(topicPartition)
}
}
})
}
}
}
if (partitionsWithError.nonEmpty) {
debug("handling partitions with error for %s".format(partitionsWithError))
handlePartitionsWithErrors(partitionsWithError)
}
}
def addPartitions(partitionAndOffsets: Map[TopicPartition, Long]) {
partitionMapLock.lockInterruptibly()
try {
// If the partitionMap already has the topic/partition, then do not update the map with the old offset
val newPartitionToState = partitionAndOffsets.filter { case (tp, _) =>
!partitionStates.contains(tp)
}.map { case (tp, offset) =>
val fetchState =
if (PartitionTopicInfo.isOffsetInvalid(offset)) new PartitionFetchState(handleOffsetOutOfRange(tp))
else new PartitionFetchState(offset)
tp -> fetchState
}
val existingPartitionToState = partitionStates.partitionStates.asScala.map { state =>
state.topicPartition -> state.value
}.toMap
partitionStates.set((existingPartitionToState ++ newPartitionToState).asJava)
partitionMapCond.signalAll()
} finally partitionMapLock.unlock()
}
def delayPartitions(partitions: Iterable[TopicPartition], delay: Long) {
partitionMapLock.lockInterruptibly()
try {
for (partition <- partitions) {
Option(partitionStates.stateValue(partition)).foreach (currentPartitionFetchState =>
if (currentPartitionFetchState.isActive)
partitionStates.updateAndMoveToEnd(partition, new PartitionFetchState(currentPartitionFetchState.offset, new DelayedItem(delay)))
)
}
partitionMapCond.signalAll()
} finally partitionMapLock.unlock()
}
def removePartitions(topicPartitions: Set[TopicPartition]) {
partitionMapLock.lockInterruptibly()
try {
topicPartitions.foreach { topicPartition =>
partitionStates.remove(topicPartition)
fetcherLagStats.unregister(topicPartition.topic, topicPartition.partition)
}
} finally partitionMapLock.unlock()
}
def partitionCount() = {
partitionMapLock.lockInterruptibly()
try partitionStates.size
finally partitionMapLock.unlock()
}
}
object AbstractFetcherThread {
trait FetchRequest {
def isEmpty: Boolean
def offset(topicPartition: TopicPartition): Long
}
trait PartitionData {
def error: Errors
def exception: Option[Throwable]
def toRecords: MemoryRecords
def highWatermark: Long
}
}
object FetcherMetrics {
val ConsumerLag = "ConsumerLag"
val RequestsPerSec = "RequestsPerSec"
val BytesPerSec = "BytesPerSec"
}
class FetcherLagMetrics(metricId: ClientIdTopicPartition) extends KafkaMetricsGroup {
private[this] val lagVal = new AtomicLong(-1L)
private[this] val tags = Map(
"clientId" -> metricId.clientId,
"topic" -> metricId.topic,
"partition" -> metricId.partitionId.toString)
newGauge(FetcherMetrics.ConsumerLag,
new Gauge[Long] {
def value = lagVal.get
},
tags
)
def lag_=(newLag: Long) {
lagVal.set(newLag)
}
def lag = lagVal.get
def unregister() {
removeMetric(FetcherMetrics.ConsumerLag, tags)
}
}
class FetcherLagStats(metricId: ClientIdAndBroker) {
private val valueFactory = (k: ClientIdTopicPartition) => new FetcherLagMetrics(k)
val stats = new Pool[ClientIdTopicPartition, FetcherLagMetrics](Some(valueFactory))
def getAndMaybePut(topic: String, partitionId: Int): FetcherLagMetrics = {
stats.getAndMaybePut(new ClientIdTopicPartition(metricId.clientId, topic, partitionId))
}
def isReplicaInSync(topic: String, partitionId: Int): Boolean = {
val fetcherLagMetrics = stats.get(new ClientIdTopicPartition(metricId.clientId, topic, partitionId))
if (fetcherLagMetrics != null)
fetcherLagMetrics.lag <= 0
else
false
}
def unregister(topic: String, partitionId: Int) {
val lagMetrics = stats.remove(new ClientIdTopicPartition(metricId.clientId, topic, partitionId))
if (lagMetrics != null) lagMetrics.unregister()
}
def unregister() {
stats.keys.toBuffer.foreach { key: ClientIdTopicPartition =>
unregister(key.topic, key.partitionId)
}
}
}
class FetcherStats(metricId: ClientIdAndBroker) extends KafkaMetricsGroup {
val tags = Map("clientId" -> metricId.clientId,
"brokerHost" -> metricId.brokerHost,
"brokerPort" -> metricId.brokerPort.toString)
val requestRate = newMeter(FetcherMetrics.RequestsPerSec, "requests", TimeUnit.SECONDS, tags)
val byteRate = newMeter(FetcherMetrics.BytesPerSec, "bytes", TimeUnit.SECONDS, tags)
def unregister() {
removeMetric(FetcherMetrics.RequestsPerSec, tags)
removeMetric(FetcherMetrics.BytesPerSec, tags)
}
}
case class ClientIdTopicPartition(clientId: String, topic: String, partitionId: Int) {
override def toString = "%s-%s-%d".format(clientId, topic, partitionId)
}
/**
* case class to keep partition offset and its state(active, inactive)
*/
case class PartitionFetchState(offset: Long, delay: DelayedItem) {
def this(offset: Long) = this(offset, new DelayedItem(0))
def isActive: Boolean = delay.getDelay(TimeUnit.MILLISECONDS) == 0
override def toString = "%d-%b".format(offset, isActive)
}
| ijuma/kafka | core/src/main/scala/kafka/server/AbstractFetcherThread.scala | Scala | apache-2.0 | 14,313 |
package drt.client.services.handlers
import diode.{ActionResult, ModelRW}
import drt.client.actions.Actions.{HideLoader, ShowLoader}
import drt.client.services.LoadingState
class LoaderHandler[M](modelRW: ModelRW[M, LoadingState]) extends LoggingActionHandler(modelRW) {
protected def handle: PartialFunction[Any, ActionResult[M]] = {
case ShowLoader() => updated(LoadingState(isLoading = true))
case HideLoader() => updated(LoadingState(isLoading = false))
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | client/src/main/scala/drt/client/services/handlers/LoaderHandler.scala | Scala | apache-2.0 | 476 |
package ch43_topology_sort
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
class GraphTopology(vertex: Int) {
//define the graph
val adjacency = new Array[mutable.MutableList[Int]](vertex)
for (i <- Range(0, vertex)) {
adjacency(i) = new mutable.MutableList[Int]()
}
def addEdge(startIndex: Int, targetIndex: Int) = {
adjacency(startIndex) += targetIndex
}
def topologySortByKahn(): Array[Int] = {
val seq = new mutable.ArrayBuffer[Int]()
//inDegrees contains all the inDegree for a given node
val inDegrees = new Array[Int](vertex)
for (i <- Range(0, vertex)) {
for (j <- adjacency(i).indices) {
val index = adjacency(i).get(j).get
inDegrees(index) += 1
}
}
val queue = new mutable.Queue[Int]()
for (i <- inDegrees.indices) {
if (inDegrees(i) == 0) {
// means there is no inDegree for this node,
// this could be the starting point of the dependency graph
queue += i
}
}
//start to navigating the graph from the starting point
while (queue.nonEmpty) {
val index = queue.dequeue()
//push to the result
seq += index
for (i <- adjacency(index).indices) {
val inDegreeIndex = adjacency(index).get(i).get
inDegrees(inDegreeIndex) -= 1
if (inDegrees(inDegreeIndex) == 0) {
queue += inDegreeIndex
}
}
}
seq.toArray
}
def topologySortByDFS(): Array[Int] = {
val inverseAdj = new Array[mutable.MutableList[Int]](vertex)
for (i <- Range(0, vertex)) {
inverseAdj(i) = new mutable.MutableList[Int]()
}
//build the inverse adj
for (i <- Range(0, vertex)) {
for (j <- adjacency(i).indices) {
val index = adjacency(i).get(j).get
inverseAdj(index) += i
}
}
val visited = new Array[Boolean](vertex)
val seq = new ArrayBuffer[Int]()
for (i <- Range(0, vertex)) {
if (!visited(i)) {
visited(i) = true
//call dfs
seq ++= dfs(i, inverseAdj, visited)
}
}
seq.toArray
}
def dfs(index: Int, inverseAdj: Array[mutable.MutableList[Int]], visited: Array[Boolean]): ArrayBuffer[Int] = {
val seq = new ArrayBuffer[Int]()
for (i <- inverseAdj(index).indices) {
val sourceIndex = inverseAdj(index).get(i).get
if (!visited(sourceIndex)) {
visited(sourceIndex) = true
seq ++= dfs(sourceIndex, inverseAdj, visited)
}
}
seq += index
seq
}
}
| wangzheng0822/algo | scala/src/main/scala/ch43_topology_sort/GraphTopology.scala | Scala | apache-2.0 | 2,538 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.transaction
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record.{CompressionType, SimpleRecord, MemoryRecords}
import org.junit.Assert.assertEquals
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import scala.collection.JavaConverters._
class TransactionLogTest extends JUnitSuite {
val producerEpoch: Short = 0
val transactionTimeoutMs: Int = 1000
val topicPartitions: Set[TopicPartition] = Set[TopicPartition](new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1),
new TopicPartition("topic2", 0),
new TopicPartition("topic2", 1),
new TopicPartition("topic2", 2))
@Test
def shouldThrowExceptionWriteInvalidTxn() {
val transactionalId = "transactionalId"
val producerId = 23423L
val txnMetadata = TransactionMetadata(transactionalId, producerId, producerEpoch, transactionTimeoutMs, 0)
txnMetadata.addPartitions(topicPartitions)
intercept[IllegalStateException] {
TransactionLog.valueToBytes(txnMetadata.prepareNoTransit())
}
}
@Test
def shouldReadWriteMessages() {
val pidMappings = Map[String, Long]("zero" -> 0L,
"one" -> 1L,
"two" -> 2L,
"three" -> 3L,
"four" -> 4L,
"five" -> 5L)
val transactionStates = Map[Long, TransactionState](0L -> Empty,
1L -> Ongoing,
2L -> PrepareCommit,
3L -> CompleteCommit,
4L -> PrepareAbort,
5L -> CompleteAbort)
// generate transaction log messages
val txnRecords = pidMappings.map { case (transactionalId, producerId) =>
val txnMetadata = TransactionMetadata(transactionalId, producerId, producerEpoch, transactionTimeoutMs,
transactionStates(producerId), 0)
if (!txnMetadata.state.equals(Empty))
txnMetadata.addPartitions(topicPartitions)
val keyBytes = TransactionLog.keyToBytes(transactionalId)
val valueBytes = TransactionLog.valueToBytes(txnMetadata.prepareNoTransit())
new SimpleRecord(keyBytes, valueBytes)
}.toSeq
val records = MemoryRecords.withRecords(0, CompressionType.NONE, txnRecords: _*)
var count = 0
for (record <- records.records.asScala) {
val txnKey = TransactionLog.readTxnRecordKey(record.key)
val transactionalId = txnKey.transactionalId
val txnMetadata = TransactionLog.readTxnRecordValue(transactionalId, record.value)
assertEquals(pidMappings(transactionalId), txnMetadata.producerId)
assertEquals(producerEpoch, txnMetadata.producerEpoch)
assertEquals(transactionTimeoutMs, txnMetadata.txnTimeoutMs)
assertEquals(transactionStates(txnMetadata.producerId), txnMetadata.state)
if (txnMetadata.state.equals(Empty))
assertEquals(Set.empty[TopicPartition], txnMetadata.topicPartitions)
else
assertEquals(topicPartitions, txnMetadata.topicPartitions)
count = count + 1
}
assertEquals(pidMappings.size, count)
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala | Scala | apache-2.0 | 3,762 |
package com.lorandszakacs.util.time_wrappers
import java.{time => jt}
/**
*
* @author Lorand Szakacs, https://github.com/lorandszakacs
* @since 06 May 2018
*
*/
object LocalDateOrdering extends Ordering[jt.LocalDate] {
override def compare(x: jt.LocalDate, y: jt.LocalDate): Int = x.compareTo(y)
}
object InstantOrdering extends Ordering[jt.Instant] {
override def compare(x: jt.Instant, y: jt.Instant): Int = x.compareTo(y)
}
| lorandszakacs/sg-downloader | util/src/main/scala/com/lorandszakacs/util/time_wrappers/timeOrderings.scala | Scala | apache-2.0 | 444 |
/**
* Generated by Scrooge
* version: ?
* rev: ?
* built at: ?
*/
package com.twitter.scrooge.test.gold.thriftscala
import com.twitter.io.Buf
import com.twitter.scrooge.{
InvalidFieldsException,
StructBuilder,
StructBuilderFactory,
TFieldBlob,
ThriftStruct,
ThriftStructField,
ThriftStructFieldInfo,
ThriftStructMetaData,
ValidatingThriftStruct,
ValidatingThriftStructCodec3
}
import org.apache.thrift.protocol._
import org.apache.thrift.transport.TMemoryBuffer
import scala.collection.immutable.{Map => immutable$Map}
import scala.collection.mutable.Builder
import scala.reflect.{ClassTag, classTag}
object OverCapacityException extends ValidatingThriftStructCodec3[OverCapacityException] with StructBuilderFactory[OverCapacityException] {
val Struct: TStruct = new TStruct("OverCapacityException")
val ChillTimeSecondsField: TField = new TField("chillTimeSeconds", TType.I32, 1)
val ChillTimeSecondsFieldManifest: Manifest[Int] = manifest[Int]
/**
* Field information in declaration order.
*/
lazy val fieldInfos: scala.List[ThriftStructFieldInfo] = scala.List[ThriftStructFieldInfo](
new ThriftStructFieldInfo(
ChillTimeSecondsField,
false,
false,
ChillTimeSecondsFieldManifest,
_root_.scala.None,
_root_.scala.None,
immutable$Map.empty[String, String],
immutable$Map.apply[String, String](
("e.field.annotation", "false")
),
None,
_root_.scala.Option(0)
)
)
lazy val structAnnotations: immutable$Map[String, String] =
immutable$Map[String, String](
("e.annotation", "true")
)
private val fieldTypes: IndexedSeq[ClassTag[_]] = IndexedSeq[ClassTag[_]](
classTag[Int].asInstanceOf[ClassTag[_]]
)
private[this] val structFields: Seq[ThriftStructField[OverCapacityException]] = Seq[ThriftStructField[OverCapacityException]](
new ThriftStructField[OverCapacityException](
ChillTimeSecondsField,
_root_.scala.Some(ChillTimeSecondsFieldManifest),
classOf[OverCapacityException]) {
def getValue[R](struct: OverCapacityException): R = struct.chillTimeSeconds.asInstanceOf[R]
}
)
override lazy val metaData: ThriftStructMetaData[OverCapacityException] =
ThriftStructMetaData(this, structFields, fieldInfos, Nil, structAnnotations)
/**
* Checks that all required fields are non-null.
*/
def validate(_item: OverCapacityException): Unit = {
}
/**
* Checks that the struct is a valid as a new instance. If there are any missing required or
* construction required fields, return a non-empty list.
*/
def validateNewInstance(item: OverCapacityException): scala.Seq[com.twitter.scrooge.validation.Issue] = {
val buf = scala.collection.mutable.ListBuffer.empty[com.twitter.scrooge.validation.Issue]
buf ++= validateField(item.chillTimeSeconds)
buf.toList
}
/**
* Validate that all validation annotations on the struct meet the criteria defined in the
* corresponding [[com.twitter.scrooge.validation.ThriftConstraintValidator]].
*/
def validateInstanceValue(item: OverCapacityException): Set[com.twitter.scrooge.thrift_validation.ThriftValidationViolation] = {
val violations = scala.collection.mutable.Set.empty[com.twitter.scrooge.thrift_validation.ThriftValidationViolation]
violations ++= validateFieldValue("chillTimeSeconds", item.chillTimeSeconds, fieldInfos.apply(0).fieldAnnotations, scala.None)
violations.toSet
}
def withoutPassthroughFields(original: OverCapacityException): OverCapacityException =
new OverCapacityException(
chillTimeSeconds = original.chillTimeSeconds
)
lazy val unsafeEmpty: OverCapacityException = {
val chillTimeSeconds: Int = 0
new OverCapacityException(
chillTimeSeconds,
_root_.com.twitter.scrooge.internal.TProtocols.NoPassthroughFields
)
}
def newBuilder(): StructBuilder[OverCapacityException] = new OverCapacityExceptionStructBuilder(_root_.scala.None, fieldTypes)
override def encode(_item: OverCapacityException, _oproto: TProtocol): Unit = {
_item.write(_oproto)
}
override def decode(_iprot: TProtocol): OverCapacityException = {
decodeInternal(_iprot, false)
}
private[this] def decodeInternal(_iprot: TProtocol, lazily: Boolean): OverCapacityException = {
var chillTimeSeconds: Int = 0
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
_iprot.readStructBegin()
do {
val _field = _iprot.readFieldBegin()
val _fieldType = _field.`type`
if (_fieldType == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_root_.com.twitter.scrooge.internal.TProtocols.validateFieldType(TType.I32, _fieldType, "chillTimeSeconds")
chillTimeSeconds = _iprot.readI32()
case _ =>
_passthroughFields = _root_.com.twitter.scrooge.internal.TProtocols.readPassthroughField(_iprot, _field, _passthroughFields)
}
_iprot.readFieldEnd()
}
} while (!_done)
_iprot.readStructEnd()
val _passthroughFieldsResult =
if (_passthroughFields eq null) _root_.com.twitter.scrooge.internal.TProtocols.NoPassthroughFields
else _passthroughFields.result()
new OverCapacityException(
chillTimeSeconds,
_passthroughFieldsResult
)
}
def apply(
chillTimeSeconds: Int
): OverCapacityException =
new OverCapacityException(
chillTimeSeconds
)
def unapply(_item: OverCapacityException): _root_.scala.Option[Int] = _root_.scala.Some(_item.chillTimeSeconds)
}
/**
* Prefer the companion object's [[com.twitter.scrooge.test.gold.thriftscala.OverCapacityException.apply]]
* for construction if you don't need to specify passthrough or
* flags.
*/
class OverCapacityException(
val chillTimeSeconds: Int,
val _passthroughFields: immutable$Map[Short, TFieldBlob],
val flags: Long)
extends _root_.com.twitter.scrooge.ThriftException with _root_.com.twitter.finagle.SourcedException with ThriftStruct
with _root_.scala.Product1[Int]
with ValidatingThriftStruct[OverCapacityException]
with java.io.Serializable
with _root_.com.twitter.finagle.FailureFlags[OverCapacityException]
{
import OverCapacityException._
def this(
chillTimeSeconds: Int,
_passthroughFields: immutable$Map[Short, TFieldBlob]
) = this(
chillTimeSeconds,
_passthroughFields,
_root_.com.twitter.finagle.FailureFlags.Empty
)
def this(
chillTimeSeconds: Int
) = this(
chillTimeSeconds,
immutable$Map.empty
)
def _1: Int = chillTimeSeconds
/**
* Gets a field value encoded as a binary blob using TCompactProtocol. If the specified field
* is present in the passthrough map, that value is returned. Otherwise, if the specified field
* is known and not optional and set to None, then the field is serialized and returned.
*/
def getFieldBlob(_fieldId: Short): _root_.scala.Option[TFieldBlob] = {
val passedthroughValue = _passthroughFields.get(_fieldId)
if (passedthroughValue.isDefined) {
passedthroughValue
} else {
val _buff = new TMemoryBuffer(32)
val _oprot = new TCompactProtocol(_buff)
val _fieldOpt: _root_.scala.Option[TField] = _fieldId match {
case 1 =>
_oprot.writeI32(chillTimeSeconds)
_root_.scala.Some(OverCapacityException.ChillTimeSecondsField)
case _ => _root_.scala.None
}
if (_fieldOpt.isDefined) {
_root_.scala.Some(TFieldBlob(_fieldOpt.get, Buf.ByteArray.Owned(_buff.getArray)))
} else {
_root_.scala.None
}
}
}
/**
* Collects TCompactProtocol-encoded field values according to `getFieldBlob` into a map.
*/
def getFieldBlobs(ids: TraversableOnce[Short]): immutable$Map[Short, TFieldBlob] =
(ids.flatMap { id => getFieldBlob(id).map { fieldBlob => (id, fieldBlob) } }).toMap
/**
* Sets a field using a TCompactProtocol-encoded binary blob. If the field is a known
* field, the blob is decoded and the field is set to the decoded value. If the field
* is unknown and passthrough fields are enabled, then the blob will be stored in
* _passthroughFields.
*/
def setField(_blob: TFieldBlob): OverCapacityException = {
var chillTimeSeconds: Int = this.chillTimeSeconds
var _passthroughFields = this._passthroughFields
val _iprot = _blob.read
_blob.id match {
case 1 =>
chillTimeSeconds = _iprot.readI32()
case _ => _passthroughFields += _root_.scala.Tuple2(_blob.id, _blob)
}
new OverCapacityException(
chillTimeSeconds,
_passthroughFields
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetField(_fieldId: Short): OverCapacityException = {
var chillTimeSeconds: Int = this.chillTimeSeconds
_fieldId match {
case 1 =>
chillTimeSeconds = 0
case _ =>
}
new OverCapacityException(
chillTimeSeconds,
_passthroughFields - _fieldId
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetChillTimeSeconds: OverCapacityException = unsetField(1)
override def write(_oprot: TProtocol): Unit = {
OverCapacityException.validate(this)
_oprot.writeStructBegin(Struct)
_oprot.writeFieldBegin(ChillTimeSecondsField)
_oprot.writeI32(chillTimeSeconds)
_oprot.writeFieldEnd()
_root_.com.twitter.scrooge.internal.TProtocols.finishWritingStruct(_oprot, _passthroughFields)
}
def copy(
chillTimeSeconds: Int = this.chillTimeSeconds,
_passthroughFields: immutable$Map[Short, TFieldBlob] = this._passthroughFields
): OverCapacityException =
new OverCapacityException(
chillTimeSeconds,
_passthroughFields
)
override def canEqual(other: Any): Boolean = other.isInstanceOf[OverCapacityException]
private[this] def _equals(other: OverCapacityException): Boolean =
this.productArity == other.productArity &&
this.productIterator.sameElements(other.productIterator) &&
this.flags == other.flags &&
this._passthroughFields == other._passthroughFields
override def equals(other: Any): Boolean =
canEqual(other) && _equals(other.asInstanceOf[OverCapacityException])
override def hashCode: Int = {
31 * _root_.scala.runtime.ScalaRunTime._hashCode(this) +
_root_.java.lang.Long.hashCode(this.flags)
}
override def toString: String = _root_.scala.runtime.ScalaRunTime._toString(this)
override def productPrefix: String = "OverCapacityException"
def _codec: ValidatingThriftStructCodec3[OverCapacityException] = OverCapacityException
protected def copyWithFlags(flags: Long): OverCapacityException =
new OverCapacityException(
chillTimeSeconds,
_passthroughFields,
flags
)
def newBuilder(): StructBuilder[OverCapacityException] = new OverCapacityExceptionStructBuilder(_root_.scala.Some(this), fieldTypes)
}
private[thriftscala] class OverCapacityExceptionStructBuilder(instance: _root_.scala.Option[OverCapacityException], fieldTypes: IndexedSeq[ClassTag[_]])
extends StructBuilder[OverCapacityException](fieldTypes) {
def build(): OverCapacityException = {
val _fieldArray = fieldArray // shadow variable
if (instance.isDefined) {
val instanceValue = instance.get
OverCapacityException(
if (_fieldArray(0) == null) instanceValue.chillTimeSeconds else _fieldArray(0).asInstanceOf[Int]
)
} else {
if (genericArrayOps(_fieldArray).contains(null)) throw new InvalidFieldsException(structBuildError("OverCapacityException"))
OverCapacityException(
_fieldArray(0).asInstanceOf[Int]
)
}
}
}
| twitter/scrooge | scrooge-generator-tests/src/test/resources/gold_file_output_scala/com/twitter/scrooge/test/gold/thriftscala/OverCapacityException.scala | Scala | apache-2.0 | 12,159 |
package org.jetbrains.plugins.scala.lang.completion3
import com.intellij.codeInsight.completion.CompletionType
import org.junit.Assert
import com.intellij.openapi.vfs.VfsUtil
import com.intellij.codeInsight.lookup.LookupElementPresentation
import org.jetbrains.plugins.scala.codeInsight.ScalaCodeInsightTestBase
/**
* User: Alexander Podkhalyuzin
* Date: 28.10.11
*/
class ScalaSmartCompletionTest extends ScalaCodeInsightTestBase {
def testAfterPlaceholder() {
val fileText =
"""
|class A {
| class B {def concat: B = new B}
| val f: B => B = _.<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class A {
| class B {def concat: B = new B}
| val f: B => B = _.concat<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString == "concat").get)
checkResultByText(resultText)
}
def testTimeUnit1() {
val fileText =
"""
|class TimeUnit
|object TimeUnit {
| val HOURS = new TimeUnit
| val DAYS = new TimeUnit
|}
|
|def foo() = {
| bar(TimeUnit.<caret>HOURS)
|}
|
|def bar(unit: TimeUnit) {}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class TimeUnit
|object TimeUnit {
| val HOURS = new TimeUnit
| val DAYS = new TimeUnit
|}
|
|def foo() = {
| bar(TimeUnit.DAYS<caret>)
|}
|
|def bar(unit: TimeUnit) {}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString.contains("DAYS")).get)
checkResultByText(resultText)
}
def testTimeUnit2() {
val fileText =
"""
|class TimeUnit
|object TimeUnit {
| val HOURS = new TimeUnit
| val DAYS = new TimeUnit
|}
|
|def foo() = {
| bar(Time<caret>Unit.HOURS)
|}
|
|def bar(unit: TimeUnit) {}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class TimeUnit
|object TimeUnit {
| val HOURS = new TimeUnit
| val DAYS = new TimeUnit
|}
|
|def foo() = {
| bar(TimeUnit.DAYS<caret>)
|}
|
|def bar(unit: TimeUnit) {}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString.contains("DAYS")).get)
checkResultByText(resultText)
}
def testAfterNew() {
val fileText =
"""
|import scala.collection.mutable.HashSet
|class A {
| val f: HashSet[String] = new <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|import scala.collection.mutable
|import scala.collection.mutable.HashSet
|class A {
| val f: HashSet[String] = new mutable.HashSet[String]()
|}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString == "HashSet").get, '[')
checkResultByText(resultText)
}
def testFilterPrivates() {
val fileText =
"""
|class Test {
| def foo(): String = ""
| private def bar(): String = ""
|}
|
|object O extends App {
| val s: String = new Test().bar<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
Assert.assertNull(activeLookup)
}
def testFilterObjectDouble() {
val fileText =
"""
|class Test {
| val x: Double = <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
Assert.assertTrue(activeLookup.find(_.getLookupString == "Double") == None)
}
def testFalse() {
val fileText =
"""
|class A {
| val f: Boolean = <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class A {
| val f: Boolean = false<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString == "false").get, '\\t')
checkResultByText(resultText)
}
def testClassOf() {
val fileText =
"""
|class A {
| val f: Class[_] = <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class A {
| val f: Class[_] = classOf[<caret>]
|}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString == "classOf").get, '\\t')
checkResultByText(resultText)
}
def testSmartRenamed() {
val fileText =
"""
|import java.util.{ArrayList => BLLLL}
|object Test extends App {
| val al: java.util.List[Int] = new BL<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|import java.util.{ArrayList => BLLLL}
|object Test extends App {
| val al: java.util.List[Int] = new BLLLL[Int](<caret>)
|}
""".stripMargin.replaceAll("\\r", "").trim()
completeLookupItem(activeLookup.find(le => le.getLookupString == "BLLLL").get, '\\t')
checkResultByText(resultText)
}
def testThis() {
val fileText =
"""
|class TT {
| val al: TT = <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class TT {
| val al: TT = this<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "this").get, '\\t')
checkResultByText(resultText)
}
def testInnerThis() {
val fileText =
"""
|class TT {
| class GG {
| val al: GG = <caret>
| }
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class TT {
| class GG {
| val al: GG = this<caret>
| }
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "this").get, '\\t')
checkResultByText(resultText)
}
def testOuterThis() {
val fileText =
"""
|class TT {
| class GG {
| val al: TT = <caret>
| }
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class TT {
| class GG {
| val al: TT = TT.this<caret>
| }
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "TT.this").get, '\\t')
checkResultByText(resultText)
}
def testWhile() {
val fileText =
"""
|while (<caret>) {}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|while (true<caret>) {}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "true").get, '\\t')
checkResultByText(resultText)
}
def testDoWhile() {
val fileText =
"""
|do {} while (<caret>)
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|do {} while (true<caret>)
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "true").get, '\\t')
checkResultByText(resultText)
}
def testNewFunction() {
val fileText =
"""
|val x: Int => String = new <caret>
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|val x: Int => String = new Function1[Int, String] {
| def apply(v1: Int): String = <selection>???</selection>
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "Function1").get, '\\t')
checkResultByText(resultText)
}
def testEtaExpansion() {
val fileText =
"""
|def foo(x: Int): String = x.toString
|val x: Int => String = <caret>
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|def foo(x: Int): String = x.toString
|val x: Int => String = foo _<caret>
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "foo").get, '\\t')
checkResultByText(resultText)
}
def testJavaEnum() {
val javaFileText =
"""
|package a;
|
|public enum Java {
| aaa, bbb, ccc
|}
""".stripMargin('|').replaceAll("\\r", "").trim()
val fileText =
"""
|import a.Java
|class A {
| val x: Java = a<caret>
|}
""".stripMargin('|').replaceAll("\\r", "").trim()
val myVFile = getSourceRootAdapter.createChildDirectory(null, "a").createChildData(null, "Java.java")
VfsUtil.saveText(myVFile, javaFileText)
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|import a.Java
|class A {
| val x: Java = Java.aaa<caret>
|}
""".stripMargin('|').replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "aaa").get, '\\t')
checkResultByText(resultText)
}
def testScalaEnum() {
val fileText =
"""
|object Scala extends Enumeration {type Scala = Value; val aaa, bbb, ccc = Value}
|class A {
| val x: Scala.Scala = a<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|object Scala extends Enumeration {type Scala = Value; val aaa, bbb, ccc = Value}
|class A {
| val x: Scala.Scala = Scala.aaa<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "aaa").get, '\\t')
checkResultByText(resultText)
}
def testScalaFactoryMethod() {
val fileText =
"""
|class Scala
|object Scala {
| def getInstance() = new Scala
|}
|class A {
| val x: Scala = get<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class Scala
|object Scala {
| def getInstance() = new Scala
|}
|class A {
| val x: Scala = Scala.getInstance()<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "getInstance").get, '\\t')
checkResultByText(resultText)
}
def testScalaFactoryApply() {
val fileText =
"""
|case class Scala()
|class A {
| val x: Scala = <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|case class Scala()
|class A {
| val x: Scala = Scala.apply()<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "apply").get, '\\t')
checkResultByText(resultText)
}
def testScalaHashSetEmpty() {
val fileText =
"""
|import collection.mutable.HashSet
|class A {
| val x: HashSet[String] = <caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|import collection.mutable.HashSet
|class A {
| val x: HashSet[String] = HashSet.empty<caret>
|}
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "empty").get, '\\t')
checkResultByText(resultText)
}
def testTwoGenerics() {
val fileText =
"""
|class A[T, K](s: Int)
|
|class B[T, K](s: Int) extends A[T, K](s)
|
|val map: A[Int,Int] = new <caret>
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(1, CompletionType.SMART)
val resultText =
"""
|class A[T, K](s: Int)
|
|class B[T, K](s: Int) extends A[T, K](s)
|
|val map: A[Int,Int] = new B[Int, Int](<caret>)
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "B").get, '\\t')
checkResultByText(resultText)
}
def testChainedSecondCompletion() {
val fileText =
"""
|object YY {
| def foo(): YY = new YY
| val x: OP = <caret>
|}
|class YY {
| def goo(x: Int) = new OP
|}
|class OP
""".stripMargin.replaceAll("\\r", "").trim()
configureFromFileTextAdapter("dummy.scala", fileText)
val (activeLookup, _) = complete(2, CompletionType.SMART)
val resultText =
"""
|object YY {
| def foo(): YY = new YY
| val x: OP = foo().goo(<caret>)
|}
|class YY {
| def goo(x: Int) = new OP
|}
|class OP
""".stripMargin.replaceAll("\\r", "").trim()
if (activeLookup != null) completeLookupItem(activeLookup.find(le => le.getLookupString == "foo.goo").get, '\\t')
checkResultByText(resultText)
}
} | consulo/consulo-scala | test/org/jetbrains/plugins/scala/lang/completion3/ScalaSmartCompletionTest.scala | Scala | apache-2.0 | 16,018 |
/*
* Copyright (c) 2011 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.examples
object NewtypeExampes extends App {
import shapeless._
import newtype._
// MyString is a new type with String as its underlying representation and with its operations
// provided by MyStringOps
type MyString = Newtype[String, MyStringOps]
// MyString constructor
def MyString(s : String) : MyString = newtype(s)
// Expose String#size as MyString#mySize. No other operations of String are accessible
case class MyStringOps(s : String) {
def mySize = s.size
}
implicit val mkOps = MyStringOps
val ms = MyString("foo")
//val s : String = ms // Does not compile
//val ms2 : MyString = "foo" // Does not compile
//ms.size // Does not compile
assert(ms.mySize == 3) // Compiles. Assertion satisfied.
val s2 = "bar"
val ms2 = MyString(s2)
// Verify that this is an unboxed representation
assert(ms2 eq (s2 : AnyRef))
}
| mandubian/shapeless | examples/src/main/scala/shapeless/examples/newtype.scala | Scala | apache-2.0 | 1,542 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.File
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.util.{MutableURLClassLoader, Utils}
private[deploy] object DependencyUtils {
def resolveMavenDependencies(
packagesExclusions: String,
packages: String,
repositories: String,
ivyRepoPath: String): String = {
val exclusions: Seq[String] =
if (!StringUtils.isBlank(packagesExclusions)) {
packagesExclusions.split(",")
} else {
Nil
}
// Create the IvySettings, either load from file or build defaults
val ivySettings = sys.props.get("spark.jars.ivySettings").map { ivySettingsFile =>
SparkSubmitUtils.loadIvySettings(ivySettingsFile, Option(repositories), Option(ivyRepoPath))
}.getOrElse {
SparkSubmitUtils.buildIvySettings(Option(repositories), Option(ivyRepoPath))
}
SparkSubmitUtils.resolveMavenCoordinates(packages, ivySettings, exclusions = exclusions)
}
def resolveAndDownloadJars(
jars: String,
userJar: String,
sparkConf: SparkConf,
hadoopConf: Configuration,
secMgr: SecurityManager): String = {
val targetDir = Utils.createTempDir()
Option(jars)
.map {
resolveGlobPaths(_, hadoopConf)
.split(",")
.filterNot(_.contains(userJar.split("/").last))
.mkString(",")
}
.filterNot(_ == "")
.map(downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr))
.orNull
}
def addJarsToClassPath(jars: String, loader: MutableURLClassLoader): Unit = {
if (jars != null) {
for (jar <- jars.split(",")) {
SparkSubmit.addJarToClasspath(jar, loader)
}
}
}
/**
* Download a list of remote files to temp local files. If the file is local, the original file
* will be returned.
*
* @param fileList A comma separated file list.
* @param targetDir A temporary directory for which downloaded files.
* @param sparkConf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param secMgr Spark security manager.
* @return A comma separated local files list.
*/
def downloadFileList(
fileList: String,
targetDir: File,
sparkConf: SparkConf,
hadoopConf: Configuration,
secMgr: SecurityManager): String = {
require(fileList != null, "fileList cannot be null.")
fileList.split(",")
.map(downloadFile(_, targetDir, sparkConf, hadoopConf, secMgr))
.mkString(",")
}
/**
* Download a file from the remote to a local temporary directory. If the input path points to
* a local path, returns it with no operation.
*
* @param path A file path from where the files will be downloaded.
* @param targetDir A temporary directory for which downloaded files.
* @param sparkConf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param secMgr Spark security manager.
* @return Path to the local file.
*/
def downloadFile(
path: String,
targetDir: File,
sparkConf: SparkConf,
hadoopConf: Configuration,
secMgr: SecurityManager): String = {
require(path != null, "path cannot be null.")
val uri = Utils.resolveURI(path)
uri.getScheme match {
case "file" | "local" => path
case _ =>
val fname = new Path(uri).getName()
val localFile = Utils.doFetchFile(uri.toString(), targetDir, fname, sparkConf, secMgr,
hadoopConf)
localFile.toURI().toString()
}
}
def resolveGlobPaths(paths: String, hadoopConf: Configuration): String = {
require(paths != null, "paths cannot be null.")
paths.split(",").map(_.trim).filter(_.nonEmpty).flatMap { path =>
val uri = Utils.resolveURI(path)
uri.getScheme match {
case "local" | "http" | "https" | "ftp" => Array(path)
case _ =>
val fs = FileSystem.get(uri, hadoopConf)
Option(fs.globStatus(new Path(uri))).map { status =>
status.filter(_.isFile).map(_.getPath.toUri.toString)
}.getOrElse(Array(path))
}
}.mkString(",")
}
}
| narahari92/spark | core/src/main/scala/org/apache/spark/deploy/DependencyUtils.scala | Scala | apache-2.0 | 5,066 |
package com.eevolution.context.dictionary.infrastructure.service.impl
import java.util.UUID
import com.eevolution.context.dictionary.infrastructure.repository.ViewRepository
import com.eevolution.context.dictionary.infrastructure.service.ViewService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 21/11/17.
*/
/**
* View Service Implementation
* @param registry
* @param viewRepository
*/
class ViewServiceImpl (registry: PersistentEntityRegistry, viewRepository: ViewRepository) extends ViewService {
private val DefaultPageSize = 10
override def getAll() = ServiceCall {_ => viewRepository.getAll()}
override def getAllByPage(page : Option[Int], pageSize : Option[Int]) = ServiceCall{_ => viewRepository.getAllByPage(page.getOrElse(0) , pageSize.getOrElse(DefaultPageSize))}
override def getById(id: Int) = ServiceCall { _ => viewRepository.getById(id)}
override def getByUUID(uuid: UUID) = ServiceCall { _ => viewRepository.getByUUID(uuid)}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/impl/ViewServiceImpl.scala | Scala | gpl-3.0 | 1,973 |
/*
* Copyright 2014 websudos ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.websudos.reactiveneo.client
import java.nio.charset.Charset
import org.jboss.netty.handler.codec.http.{HttpResponse, HttpResponseStatus}
import play.api.data.validation.ValidationError
import play.api.libs.json._
import scala.collection.immutable.Seq
import scala.util.Try
/**
* Parser abstraction to used to parse JSON format of HttpResult content. To use this base class implementation of
* a `reads` method needs to be provided.
*/
abstract class JsonParser[R] extends ResultParser[R] {
/**
* Implementation of of converter from JsValue to target type.
* @return Returns converted value.
*/
def parseResult(js: JsValue): R
private def parseJson(s: String): Try[R] = {
val json = Json.parse(s)
Try {
parseResult(json)
}
}
private[this] def singleErrorMessage(error: (JsPath, scala.Seq[ValidationError])) = {
val (path: JsPath, errors: Seq[ValidationError]) = error
val message = errors.foldLeft(errors.head.message)((acc,err) => s"$acc,${err.message}")
s"Errors at $path: $message"
}
private[client] def buildErrorMessage(error: JsError) = {
error.errors.tail.foldLeft(singleErrorMessage(error.errors.head))((acc,err) => s"acc,${singleErrorMessage(err)}")
}
override def parseResult(response: HttpResponse): Try[R] = {
if(response.getStatus.getCode == HttpResponseStatus.OK.getCode) {
parseJson(response.getContent.toString(Charset.forName("UTF-8")))
} else {
throw new InvalidResponseException(s"Response status <${response.getStatus}> is not valid")
}
}
}
/**
* Exception indicating a problem when decoding resulting object value from JSON tree.
* @param msg Error message.
*/
class JsonValidationException(msg: String) extends Exception
class InvalidResponseException(msg: String) extends Exception | zarthross/reactiveneo | reactiveneo-dsl/src/main/scala/com/websudos/reactiveneo/client/JsonParser.scala | Scala | gpl-2.0 | 2,416 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.graph
import org.scalatest.{ WordSpec, Matchers }
import org.trustedanalytics.sparktk.frame.{ DataTypes, Column, FrameSchema }
import org.trustedanalytics.sparktk.graph.internal.GraphSchema
class GraphSchemaTest extends WordSpec with Matchers {
"validateVerticesSchema accepts valid columns" in {
val good1 = FrameSchema(List(Column("id", DataTypes.str), Column("b", DataTypes.str)))
val good2 = FrameSchema(List(Column("c", DataTypes.str), Column("id", DataTypes.int)))
GraphSchema.validateSchemaForVerticesFrame(good1)
GraphSchema.validateSchemaForVerticesFrame(good2)
}
"validateVerticesSchema should throw exception on schemas missing requisite column names" in {
val bad1 = FrameSchema(List(Column("a", DataTypes.str), Column("b", DataTypes.str)))
intercept[IllegalArgumentException] {
GraphSchema.validateSchemaForVerticesFrame(bad1)
}
}
"validateEdgesSchema accepts valid columns" in {
val good1 = FrameSchema(List(Column("src", DataTypes.str), Column("dst", DataTypes.str)))
val good2 = FrameSchema(List(Column("dst", DataTypes.str), Column("id", DataTypes.int), Column("src", DataTypes.int)))
GraphSchema.validateSchemaForEdgesFrame(good1)
GraphSchema.validateSchemaForEdgesFrame(good2)
}
"validateEdgesSchema should throw exception on schemas missing requisite column names" in {
val bad1 = FrameSchema(List(Column("a", DataTypes.str), Column("b", DataTypes.str)))
val bad2 = FrameSchema(List(Column("src", DataTypes.str), Column("b", DataTypes.str)))
val bad3 = FrameSchema(List(Column("source", DataTypes.str), Column("dst", DataTypes.str)))
intercept[IllegalArgumentException] {
GraphSchema.validateSchemaForEdgesFrame(bad1)
}
intercept[IllegalArgumentException] {
GraphSchema.validateSchemaForEdgesFrame(bad2)
}
intercept[IllegalArgumentException] {
GraphSchema.validateSchemaForEdgesFrame(bad3)
}
}
}
| ashaarunkumar/spark-tk | sparktk-core/src/test/scala/org/trustedanalytics/sparktk/graph/GraphSchemaTest.scala | Scala | apache-2.0 | 2,675 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.fim
import org.apache.spark.Partitioner
import org.apache.spark.mllib.fim.PPCTree.PPCTreeNode
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import scala.util.control.Breaks
class PPCTree[Item: ClassTag](
val numFreqItems: Int,
val minCount: Long,
val rankToItem: Map[Int, Item]) extends Serializable {
val root: PPCTreeNode = new PPCTreeNode()
var itemsetCount = new Array[Int]((this.numFreqItems - 1) * this.numFreqItems / 2)
var PPCTreeNodeCount = 0
var nodesetRowIndex = 0
var firstNodesetBegin = 0
var resultLen = 0
val result = Array[Int](numFreqItems)
var headTable: Array[PPCTreeNode] = null
var headTableLen: Array[Int] = null
var bfCursor = 0
var bfRowIndex = 0
val bfSize = 100000
var bfCurrentSize = this.bfSize * 10
val bf = new Array[Array[Int]](100000)
bf(0)= new Array[Int](bfCurrentSize)
var diffCursor = 0
var diffRowIndex = 0
var diffSize = 1000000
var diffCurrentSize = this.diffSize * 10
val diff = new Array[Array[Int]](100000)
diff(0) = new Array[Int](diffCurrentSize)
def add(t: Array[Int]): this.type = {
var curPos = 0
var curRoot = root
var rightSibling: PPCTreeNode = null
val outerLoop = new Breaks()
outerLoop.breakable(
while (curPos != t.size) {
var child = curRoot.firstChild
val innerLoop = new Breaks()
innerLoop.breakable(
while (child != null) {
if (child.label == t(curPos)) {
curPos += 1
child.count += 1
curRoot = child
innerLoop.break()
}
if (child.rightSibling == null) {
rightSibling = child
child = null
innerLoop.break()
}
child = child.rightSibling
}
)
if (child == null) {
outerLoop.break()
}
}
)
for (i <- curPos until t.size) {
val node = new PPCTreeNode()
node.label = t(i)
if (rightSibling != null) {
rightSibling.rightSibling = node
rightSibling = null
} else {
curRoot.firstChild = node
}
node.rightSibling = null
node.firstChild = null
node.parent = curRoot
node.count = 1
curRoot = node
PPCTreeNodeCount += 1
}
this
}
def genNodesets(id: Int, partitioner: Partitioner): this.type = {
this.headTable = new Array[PPCTreeNode](this.numFreqItems)
this.headTableLen = new Array[Int](this.numFreqItems)
val tempHead = new Array[PPCTreeNode](this.numFreqItems)
var curRoot = root.firstChild
var pre = 0
var last = 0
while (curRoot != null) {
curRoot.foreIndex = pre
pre += 1
if (headTable(curRoot.label) == null) {
headTable(curRoot.label) = curRoot
tempHead(curRoot.label) = curRoot
} else {
tempHead(curRoot.label).labelSibling = curRoot
tempHead(curRoot.label) = curRoot
}
headTableLen(curRoot.label) += 1
if (partitioner.getPartition(curRoot.label) == id) {
var temp: PPCTreeNode = curRoot.parent
while(temp.label != -1) {
this.itemsetCount(curRoot.label * (curRoot.label - 1) / 2 + temp.label) += curRoot.count
temp = temp.parent
}
}
if (curRoot.firstChild != null) {
curRoot = curRoot.firstChild
} else {
curRoot.backIndex = last
last += 1
if (curRoot.rightSibling != null) {
curRoot = curRoot.rightSibling
} else {
curRoot = curRoot.parent
val loop = new Breaks()
loop.breakable(
while(curRoot != null) {
curRoot.backIndex = last
last += 1
if(curRoot.rightSibling != null) {
curRoot = curRoot.rightSibling
loop.break()
}
curRoot = curRoot.parent
}
)
}
}
}
this
}
def mine(rankToCount: Array[(Int, Long)], id: Int, partitioner: Partitioner): ArrayBuffer[(Array[Item], Long)] = {
val patternTree = new PatternTree(this)
patternTree.initialize(rankToCount)
patternTree.mine(id, partitioner)
}
}
object PPCTree {
class PPCTreeNode extends Serializable {
var label: Int = -1
var firstChild: PPCTreeNode = null
var rightSibling: PPCTreeNode = null
var labelSibling: PPCTreeNode = null
var parent: PPCTreeNode = null
var count: Int = -1
var foreIndex: Int = -1
var backIndex: Int = -1
}
}
| hibayesian/spark-fim | src/main/scala/org/apache/spark/mllib/fim/PPCTree.scala | Scala | apache-2.0 | 5,591 |
package com.signalcollect.dcop.evaluation
import scala.collection.concurrent.TrieMap
import scala.collection.immutable
import scala.collection.mutable
import scala.util.Random
import com.signalcollect.dcop.graph.DcopEdge
import com.signalcollect.dcop.graph.RankedDcopEdge
import com.signalcollect.dcop.graph.RankedDcopVertex
import com.signalcollect.dcop.graph.SimpleDcopVertex
import com.signalcollect.dcop.modules.Configuration
import com.signalcollect.dcop.modules.RankedConfig
import com.signalcollect.dcop.modules.SimpleConfig
import com.signalcollect.dcop.modules.UtilityConfig
object Factories {
def simpleConfig[AgentId, Action, UtilityType, DefaultUtility](
defaultUtility: => DefaultUtility,
neighborhoodCache: NeighborhoodCache[AgentId, Action] = new NeighborhoodCache[AgentId, Action],
defaultUtilityCache: FunctionCache[UtilityType] = new FunctionCache[UtilityType])(
agentId: AgentId,
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]],
utilities: collection.Map[(AgentId, Action, Action), UtilityType])(implicit ev: DefaultUtility => UtilityType, utilEv: Ordering[UtilityType]) = {
val (a, b, c) = neighborhoodCache(domain, domainNeighborhood)
new EavSimpleConfig(agentId, domain(0), a, b, c, utilities, defaultUtilityCache(defaultUtility))
}
def simpleConfigRandom[AgentId, Action, UtilityType, DefaultUtility](
defaultUtility: => DefaultUtility,
neighborhoodCache: RandomNeighborhoodCache[AgentId, Action] = new RandomNeighborhoodCache[AgentId, Action],
defaultUtilityCache: FunctionCache[UtilityType] = new FunctionCache[UtilityType])(
agentId: AgentId,
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]],
utilities: collection.Map[(AgentId, Action, Action), UtilityType])(implicit ev: DefaultUtility => UtilityType, utilEv: Ordering[UtilityType]) = {
val (a, b, c, d) = neighborhoodCache(agentId, domain, domainNeighborhood)
new EavSimpleConfig(agentId, a, b, c, d, utilities, defaultUtilityCache(defaultUtility))
}
def rankedConfig[AgentId, Action, UtilityType, DefaultUtility](
defaultUtility: => DefaultUtility,
neighborhoodCache: NeighborhoodCache[AgentId, Action] = new NeighborhoodCache[AgentId, Action],
defaultUtilityCache: FunctionCache[UtilityType] = new FunctionCache[UtilityType])(
agentId: AgentId,
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]],
utilities: collection.Map[(AgentId, Action, Action), UtilityType])(implicit ev: DefaultUtility => UtilityType, utilEv: Numeric[UtilityType]) = {
val (a, b, c) = neighborhoodCache(domain, domainNeighborhood)
new EavRankedConfig(agentId, domain(0), a, b, c, utilities, defaultUtilityCache(defaultUtility))
}
def rankedConfigRandom[AgentId, Action, UtilityType, DefaultUtility](
defaultUtility: => DefaultUtility,
neighborhoodCache: RandomNeighborhoodCache[AgentId, Action] = new RandomNeighborhoodCache[AgentId, Action],
defaultUtilityCache: FunctionCache[UtilityType] = new FunctionCache[UtilityType])(
agentId: AgentId,
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]],
utilities: collection.Map[(AgentId, Action, Action), UtilityType])(implicit ev: DefaultUtility => UtilityType, utilEv: Numeric[UtilityType]) = {
val (a, b, c, d) = neighborhoodCache(agentId, domain, domainNeighborhood)
new EavRankedConfig(agentId, a, b, c, d, utilities, defaultUtilityCache(defaultUtility))
}
def adoptConfig[AgentId, Action, UtilityType, DefaultUtility](
defaultUtility: => DefaultUtility,
neighborhoodCache: NeighborhoodCache[AgentId, Action] = new NeighborhoodCache[AgentId, Action],
defaultUtilityCache: FunctionCache[UtilityType] = new FunctionCache[UtilityType])(
agentId: AgentId,
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]],
utilities: collection.Map[(AgentId, Action, Action), UtilityType])(implicit ev: DefaultUtility => UtilityType, utilEv: Numeric[UtilityType]) = {
val (a, _, c) = neighborhoodCache(domain, domainNeighborhood)
new EavAdoptConfig(agentId, domain(0), a, Map.empty, c, utilities, defaultUtilityCache(defaultUtility))
}
def simpleDsaAVertex[AgentId, Action, Config <: SimpleConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config], UtilityType](
changeProbability: Double,
debug: Boolean = false)(
config: Config with SimpleConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Numeric[UtilityType]) =
new SimpleDcopVertex(config)(new EavSimpleDsaAOptimizer(changeProbability), debug = debug)
def simpleDsaBVertex[AgentId, Action, Config <: SimpleConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config], UtilityType](
changeProbability: Double,
debug: Boolean = false)(
config: Config with SimpleConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Numeric[UtilityType]) =
new SimpleDcopVertex(config)(new EavSimpleDsaBOptimizer(changeProbability), debug = debug)
def simpleDsanVertex[AgentId, Action, Config <: SimpleConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config], UtilityType](
changeProbability: Double,
constant: UtilityType,
kval: UtilityType,
debug: Boolean = false)(
config: Config with SimpleConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Numeric[UtilityType]) =
new SimpleDcopVertex(config)(new EavSimpleDsanOptimizer(changeProbability, constant, kval), debug = debug)
def rankedDsaAVertex[AgentId, Action, Config <: RankedConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config], UtilityType](
changeProbability: Double,
baseRank: (Int, Int),
unchangedMoveRankFactor: (Int, Int) = (1, 1),
unchangedMoveRankAddend: (Int, Int) = (0, 1),
changedMoveRankFactor: (Int, Int) = (1, 1),
changedMoveRankAddend: (Int, Int) = (0, 1),
debug: Boolean = false)(
config: Config with RankedConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Fractional[UtilityType]) =
new RankedDcopVertex(config)(
new EavRankedDsaAOptimizer(changeProbability),
baseRank = baseRank,
unchangedMoveRankFactor = unchangedMoveRankFactor,
unchangedMoveRankAddend = unchangedMoveRankAddend,
changedMoveRankFactor = changedMoveRankFactor,
changedMoveRankAddend = changedMoveRankAddend,
debug = debug)
def rankedDsaBVertex[AgentId, Action, Config <: RankedConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config], UtilityType](
changeProbability: Double,
baseRank: (Int, Int),
unchangedMoveRankFactor: (Int, Int) = (1, 1),
unchangedMoveRankAddend: (Int, Int) = (0, 1),
changedMoveRankFactor: (Int, Int) = (1, 1),
changedMoveRankAddend: (Int, Int) = (0, 1),
debug: Boolean = false)(
config: Config with RankedConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Fractional[UtilityType]) =
new RankedDcopVertex(config)(
new EavRankedDsaBOptimizer(changeProbability),
baseRank = baseRank,
unchangedMoveRankFactor = unchangedMoveRankFactor,
unchangedMoveRankAddend = unchangedMoveRankAddend,
changedMoveRankFactor = changedMoveRankFactor,
changedMoveRankAddend = changedMoveRankAddend,
debug = debug)
def rankedDsanVertex[AgentId, Action, Config <: RankedConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config], UtilityType](
changeProbability: Double,
constant: UtilityType,
kval: UtilityType,
baseRank: (Int, Int),
unchangedMoveRankFactor: (Int, Int) = (1, 1),
unchangedMoveRankAddend: (Int, Int) = (0, 1),
changedMoveRankFactor: (Int, Int) = (1, 1),
changedMoveRankAddend: (Int, Int) = (0, 1),
debug: Boolean = false)(
config: Config with RankedConfig[AgentId, Action, UtilityType, Config] with EavConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Fractional[UtilityType]) =
new RankedDcopVertex(config)(
new EavRankedDsanOptimizer(changeProbability, constant, kval),
baseRank = baseRank,
unchangedMoveRankFactor = unchangedMoveRankFactor,
unchangedMoveRankAddend = unchangedMoveRankAddend,
changedMoveRankFactor = changedMoveRankFactor,
changedMoveRankAddend = changedMoveRankAddend,
debug = debug)
def adoptVertex[AgentId, Action, Config <: AdoptConfig[AgentId, Action, UtilityType, Config], UtilityType](
debug: Boolean = false)(
config: Config with AdoptConfig[AgentId, Action, UtilityType, Config])(implicit utilEv: Numeric[UtilityType]) =
new AdoptDcopVertex(config)(new AdoptOptimizer, debug = debug)
def dcopEdge[AgentId]()(config: Configuration[AgentId, _, _]) =
new DcopEdge(config.centralVariableAssignment._1)
def rankedEdge[AgentId, UtilityType]()(config: UtilityConfig[AgentId, _, UtilityType, _])(implicit utilEv: Fractional[UtilityType]) =
new RankedDcopEdge(config.centralVariableAssignment._1)
def adoptEdge[AgentId]()(config: Configuration[AgentId, _, _]) =
new AdoptDcopEdge(config.centralVariableAssignment._1)
protected class NeighborhoodCache[AgentId, Action] {
private[this] val domainCache = TrieMap.empty[Seq[Action], Set[Action]]
private[this] val neighborhoodCache = TrieMap.empty[collection.Map[AgentId, Seq[Action]], (Map[AgentId, Action], collection.Map[AgentId, Set[Action]])]
/**
* The first value of the given domain is assigned as initial action.
*/
def apply(
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]]) = {
val (x, y) =
getOrElseUpdate(neighborhoodCache, domainNeighborhood, (
domainNeighborhood.mapValues(_(0)).view.toMap,
mutable.LinkedHashMap(domainNeighborhood.mapValues(x =>
getOrElseUpdate(domainCache, x, immutable.ListSet(x.reverse: _*))).toSeq: _*)))
(getOrElseUpdate(domainCache, domain, immutable.ListSet(domain.reverse: _*)), x, y)
}
}
protected class RandomNeighborhoodCache[AgentId, Action](random: Random = Random) {
private[this] val actionCache = TrieMap.empty[AgentId, Action]
private[this] val domainCache = TrieMap.empty[Seq[Action], Set[Action]]
private[this] val neighborhoodCache = TrieMap.empty[collection.Map[AgentId, Seq[Action]], (Map[AgentId, Action], collection.Map[AgentId, Set[Action]])]
/**
* A random value of the given domain is assigned as initial action.
*/
def apply(
agentId: AgentId,
domain: Seq[Action],
domainNeighborhood: collection.Map[AgentId, Seq[Action]]) = {
val (x, y) =
getOrElseUpdate(neighborhoodCache, domainNeighborhood, (
domainNeighborhood.map(x => (x._1,
getOrElseUpdate(actionCache, x._1, x._2(random.nextInt(x._2.length))))).toMap,
mutable.LinkedHashMap(domainNeighborhood.mapValues(x =>
getOrElseUpdate(domainCache, x, immutable.ListSet(x.reverse: _*))).toSeq: _*)))
(getOrElseUpdate(actionCache, agentId, domain(random.nextInt(domain.length))),
getOrElseUpdate(domainCache, domain, immutable.ListSet(domain.reverse: _*)), x, y)
}
}
protected class FunctionCache[A] {
private[this] val cache = TrieMap.empty[Any, A]
def apply[B](x: B)(implicit f: B => A): A = getOrElseUpdate(cache, x, f(x))
}
/**
* If given key is already in given map, returns associated value.
*
* Otherwise, computes value from given expression `op`, stores with key
* in map and returns that value.
*
* This is an atomic operation.
* @param map the map to use
* @param key the key to test
* @param op the computation yielding the value to associate with `key`. It
* may be executed even if `key` is already in map.
* @return the value associated with key (either previously or as a result
* of executing the method).
*/
private def getOrElseUpdate[A, B](map: collection.concurrent.Map[A, B], key: A, op: => B): B =
map.get(key) match {
case Some(v) => v
case None =>
val v = op
map.putIfAbsent(key, v).getOrElse(v)
}
}
| flueckiger/dcop-algorithms-evaluation | src/main/scala/com/signalcollect/dcop/evaluation/Factories.scala | Scala | apache-2.0 | 12,927 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.metamx.tranquility.test.common
import org.slf4j.bridge.SLF4JBridgeHandler
object JulUtils
{
def routeJulThroughSlf4j() {
SLF4JBridgeHandler.removeHandlersForRootLogger()
SLF4JBridgeHandler.install()
}
}
| druid-io/tranquility | core/src/test/scala/com/metamx/tranquility/test/common/JulUtils.scala | Scala | apache-2.0 | 1,041 |
package name.abhijitsarkar.scala
/**
* @author Abhijit Sarkar
*/
object Ch10 {
/**
* Q10.1: How would you extend a function? What are some of the applications for a class or trait
* that extends `Function1[A,B]`? If you are writing such a class or trait,
* would you extend `Function1[A,B]` or choose to extend `A => B` ?
*
* Ans: There can be numerous applications for extending `Function1[A,B]`. Imagine a class that models
* complex numbers. The additive inverse of a complex number is one that produces a value of
* zero when it is added to the original complex number. We could use a `Function1[Complex, Complex]` for this.
* As another example, a string utility object could contains several `Function1[String, A]` methods
* that safely trim a string or safely convert to the type `A`.
*
* Extending `A => B` is adequate when there are no additional parameters needed for the function to operate,
* and the arguments to the `apply` method are sufficient. Otherwise, we need to extend `Function1` with a
* constructor argument that will later be used by the `apply` method. As an example, consider a filter
* function `A => Boolean`. In order to make a determination in the `apply` method,
* we may need more information passed to the constructor.
*/
/**
* Q10.2: How would you write a function type for a function that has two parameter lists,
* each with a single integer, and returns a single integer?
* If you wrote it as a FunctionX class, what would the exact class and type parameters contain?
*
* Ans:
* {{{
* def f(a: Int)(b: Int): Int = ???
* Function2[Int, Int, Int]
* }}}
*/
/**
* Q10.3 :A popular use for implicit parameters is for a default setting that works most of the time
* but may be overridden in special cases. Assume you are writing a sorting function
* that takes lines of text, and the lines may start with a right-aligned number.
* If you want to sort using the numbers, which may be prefixed by spaces,
* how would you encode this ability in an implicit parameter?
* How would you allow users to override this behavior and ignore the numbers for sorting?
*
* Ans: Check out the test cases for usage.
* Good reads:
* [[http://docs.scala-lang.org/tutorials/FAQ/finding-implicits.html Where does Scala look for implicits?]]
* [[http://stackoverflow.com/questions/19345030/easy-idiomatic-way-to-define-ordering-for-a-simple-case-class easy idiomatic way to define Ordering for a simple case class]]
*/
def sortLines(l: List[String])(implicit o: Ordering[String]) = l.sorted
/**
* Q10.6: How would you add a `sum` method on all tuples, which returns the sum of all numeric values in a tuple?
* For example, `('a', "hi", 2.5, 1, true).sum` should return 3.5
*
* Ans: Check out http://stackoverflow.com/questions/35148463/scala-implicit-conversion-of-any-to-numeric/
*/
implicit class PimpedProduct(val p: Product) {
def sum = p.productIterator.collect {
case x: java.lang.Number => x.doubleValue
}.sum
}
}
| abhijitsarkar/learning-scala | src/main/scala/name/abhijitsarkar/scala/Ch10.scala | Scala | gpl-3.0 | 3,141 |
import sbt._
import scala.util.Try
object Count {
private var count = 0
def get: Int = count
def increment(): Unit = count += 1
def reset(): Unit = count = 0
def reloadCount(file: File): Int = Try(IO.read(file).toInt).getOrElse(0)
}
| sbt/sbt | sbt-app/src/sbt-test/watch/on-start-watch/project/Count.scala | Scala | apache-2.0 | 244 |
package net.defoo
import java.io.File
/**
* Created by derek on 09/03/14.
*/
sealed case class CannotAccessBackupHomeException(backupHome: File)
extends Exception(s"Cannot access backupHome ${backupHome.getAbsolutePath}")
| kcderek/scala-time-machine | src/main/scala/net/defoo/BackupActorException.scala | Scala | apache-2.0 | 228 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastorage.store.impl.FileFactory
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.examples.util.ExampleUtils
/**
* configure alluxio:
* 1.start alluxio
* 2.upload the jar :"/alluxio_path/core/client/target/
* alluxio-core-client-YOUR-VERSION-jar-with-dependencies.jar"
* 3.Get more detail at:http://www.alluxio.org/docs/master/en/Running-Spark-on-Alluxio.html
*/
object AlluxioExample {
def main(args: Array[String]) {
val cc = ExampleUtils.createCarbonContext("AlluxioExample")
cc.sparkContext.hadoopConfiguration.set("fs.alluxio.impl", "alluxio.hadoop.FileSystem")
FileFactory.getConfiguration.set("fs.alluxio.impl", "alluxio.hadoop.FileSystem")
// Specify timestamp format based on raw data
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
cc.sql("DROP TABLE IF EXISTS t3")
cc.sql("""
CREATE TABLE IF NOT EXISTS t3
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'carbondata'
""")
cc.sql(s"""
LOAD DATA LOCAL INPATH 'alluxio://localhost:19998/data.csv' into table t3
""")
cc.sql("""
SELECT country, count(salary) AS amount
FROM t3
WHERE country IN ('china','france')
GROUP BY country
""").show()
cc.sql("DROP TABLE IF EXISTS t3")
}
}
| ashokblend/incubator-carbondata | examples/spark/src/main/scala/org/apache/carbondata/examples/AlluxioExample.scala | Scala | apache-2.0 | 2,434 |
import com.typesafe.sbt.pgp.PgpKeys._
import org.scalajs.sbtplugin.ScalaJSPlugin
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport._
import sbt.Keys._
import sbt._
object ScalajsReactComponents extends Build {
val Scala211 = "2.11.7"
val scalajsReactVersion = "0.9.2"
val scalaCSSVersion = "0.3.0"
type PE = Project => Project
def commonSettings: PE =
_.enablePlugins(ScalaJSPlugin)
.settings(
organization := "com.github.chandu0101.scalajs-react-components",
version := "0.2.0-SNAPSHOT",
homepage := Some(url("https://github.com/chandu0101/scalajs-react-components")),
licenses += ("Apache-2.0", url("http://opensource.org/licenses/Apache-2.0")),
scalaVersion := Scala211,
scalacOptions ++= Seq("-deprecation", "-unchecked", "-feature",
"-language:postfixOps", "-language:implicitConversions",
"-language:higherKinds", "-language:existentials"),
updateOptions := updateOptions.value.withCachedResolution(true),
dependencyOverrides ++= Set(
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-js" %% "scalajs-test-interface" % "0.6.5"
)
)
def preventPublication: PE =
_.settings(
publishArtifact := false,
publishLocalSigned := (), // doesn't work
publishSigned := (), // doesn't work
packagedArtifacts := Map.empty) // doesn't work - https://github.com/sbt/sbt-pgp/issues/42
def publicationSettings: PE =
_.settings(
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
pomExtra :=
<scm>
<connection>scm:git:github.com:chandu0101/scalajs-react-components</connection>
<developerConnection>scm:git:[email protected]:chandu0101/scalajs-react-components.git</developerConnection>
<url>github.com:chandu0101/scalajs-react-components.git</url>
</scm>
<developers>
<developer>
<id>chandu0101</id>
<name>Chandra Sekhar Kode</name>
</developer>
</developers>)
.configure(sourceMapsToGithub)
def sourceMapsToGithub: PE =
p => p.settings(
scalacOptions ++= (if (isSnapshot.value) Seq.empty else Seq({
val a = p.base.toURI.toString.replaceFirst("[^/]+/?$", "")
val g = "https://raw.githubusercontent.com/chandu0101/scalajs-react-components"
s"-P:scalajs:mapSourceURI:$a->$g/v${version.value}/"
}))
)
def utestSettings: PE =
_.configure(useReact("test"))
.settings(
libraryDependencies += "com.lihaoyi" %%% "utest" % "0.3.0",
testFrameworks += new TestFramework("utest.runner.Framework"),
scalaJSStage in Test := FastOptStage,
requiresDOM := true,
jsEnv in Test := PhantomJSEnv().value)
def useReact(scope: String = "compile"): PE =
_.settings(
libraryDependencies += "com.github.japgolly.scalajs-react" %%% "extra" % scalajsReactVersion
)
val jsDir = "demo/assets"
def createLauncher(scope: String = "compile"): PE =
_.settings(persistLauncher := true,
persistLauncher in Test := false,
crossTarget in (Compile, fullOptJS) := file(jsDir),
crossTarget in (Compile, fastOptJS) := file(jsDir),
// crossTarget in (Compile, packageLauncher) := file(jsDir),
artifactPath in (Compile, fastOptJS) := ((crossTarget in (Compile, fastOptJS)).value /
((moduleName in fastOptJS).value + "-opt.js"))
)
def addCommandAliases(m: (String, String)*) = {
val s = m.map(p => addCommandAlias(p._1, p._2)).reduce(_ ++ _)
(_: Project).settings(s: _*)
}
// ==============================================================================================
lazy val root = Project("root", file("."))
.aggregate(macros, core, demo)
.configure(commonSettings, preventPublication, addCommandAliases(
"t" -> "; test:compile ; test/test",
"tt" -> ";+test:compile ;+test/test",
"T" -> "; clean ;t",
"TT" -> ";+clean ;tt"))
// ==============================================================================================
lazy val macros = project
.configure(commonSettings, utestSettings, preventPublication)
.settings(
name := "macros",
libraryDependencies ++= Seq(
"org.scalatest" %%% "scalatest" % "3.0.0-M6" % Test
)
)
// ==============================================================================================
lazy val core = project
.configure(commonSettings, publicationSettings)
.dependsOn(macros)
.settings(
name := "core",
libraryDependencies ++= Seq(
"com.github.japgolly.scalajs-react" %%% "core" % scalajsReactVersion,
"com.github.japgolly.scalajs-react" %%% "extra" % scalajsReactVersion,
"com.github.japgolly.scalacss" %%% "core" % scalaCSSVersion,
"com.github.japgolly.scalacss" %%% "ext-react" % scalaCSSVersion),
target in Compile in doc := baseDirectory.value / "docs"
)
// ==============================================================================================
lazy val demo = project
.dependsOn(core)
.configure(commonSettings,createLauncher(), useReact(), preventPublication)
}
| tpdi/scalajs-react-components | project/Build.scala | Scala | apache-2.0 | 5,612 |
package com.sksamuel.elastic4s.requests.searches.aggs.builders
import com.sksamuel.elastic4s.handlers.searches.queries.QueryBuilderFn
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.requests.searches.aggs.{AggMetaDataFn, FiltersAggregation, SubAggsBuilderFn}
object FiltersAggregationBuilder {
def apply(agg: FiltersAggregation): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
val filters = {
builder.startArray("filters")
val filters = agg.filters.map(QueryBuilderFn.apply).map(_.string).mkString(",")
builder.rawValue(filters)
builder.endArray()
}
builder.rawField("filters", filters)
SubAggsBuilderFn(agg, builder)
AggMetaDataFn(agg, builder)
builder
}
}
| sksamuel/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/builders/FiltersAggregationBuilder.scala | Scala | apache-2.0 | 789 |
package nest.sparkle.util
import java.nio.file.Paths
import java.io.BufferedInputStream
import java.io.BufferedReader
import java.io.InputStreamReader
import scala.io.Source
import java.util.jar.JarFile
import java.net.URLDecoder
import scala.collection.JavaConverters._
import java.io.File
import java.net.URL
import java.nio.file.Path
case class ResourceNotFound(msg: String) extends RuntimeException(msg)
/** utilities for working with resources on the classpath */
object Resources extends Log{
/** return a list of the resources within a resource folder
* (works whether the resource is mapped to the file system or to .jar file
*/
def byDirectory(resourcePath: String,
classLoader: ClassLoader = Thread.currentThread().getContextClassLoader()): Iterable[String] = {
val url = Option(classLoader.getResource(resourcePath)).getOrElse { throw new ResourceNotFound(resourcePath) }
url.getProtocol match {
case "file" => childrenFromFile(url)
case "jar" => childrenFromJar(url, resourcePath)
}
}
/** (for testing) return a string for the fileystem path to a given resource. Only works
* on resources in the filesystem (jar resources will throw an exception) */
def filePathString(resourcePath:String): String = {
val possibleResource = Thread.currentThread.getContextClassLoader.getResource(resourcePath)
val resource = Option(possibleResource).getOrElse { throw new ResourceNotFound(resourcePath) }
val file = new File(resource.toURI)
file.getPath()
}
/** load children of a path from a file resource */
protected def childrenFromFile(fileUrl: URL): Iterable[String] = {
val file = new File(fileUrl.toURI())
file.list()
}
/** return children of a path from the .jar file. Since the .jar file records full paths of all
* files, we need to scan the jar table of contents to extract the child relationships for the
* path.
*/
protected[util] def childrenFromJar(jarUrl: URL, resourcePath: String): Iterable[String] = {
object Child {
val TakeToSlash = """([^/]*)""".r
/** given a full path, extract the children (not including the grandchildren) of the
* resourcePath folder.
*
* e.g. given foo/bar/bah/bee and a resourcePath of foo/bar, extract bah
*/
def unapply(path: String): Option[String] = {
if (path.startsWith(resourcePath)) {
val suffix = path.stripPrefix(resourcePath + "/")
val toSlash = TakeToSlash.findFirstIn(suffix).get
Some(toSlash)
} else {
None
}
}
}
// url is e.g. "file:/home/me/foo/bah.jar!/resourcePath
// we want: /home/me/foo/bah.jar
val pathToJar = jarUrl.getPath.stripPrefix("file:").stripSuffix(s"!/$resourcePath")
val decodedPath = URLDecoder.decode(pathToJar, "UTF-8")
val jar = new JarFile(decodedPath)
val children = jar.entries().asScala.map(_.getName).collect {
case Child(child) => child
}
children.toSet
}
}
| mighdoll/sparkle | util/src/main/scala/nest/sparkle/util/Resources.scala | Scala | apache-2.0 | 3,022 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.template
import io.gatling.charts.component.Component
private[charts] class ResponsesPageTemplate(chartComponent: Component) extends PageTemplate("Responses / sec", false, None, None, chartComponent)
| gatling/gatling | gatling-charts/src/main/scala/io/gatling/charts/template/ResponsesPageTemplate.scala | Scala | apache-2.0 | 847 |
package com.aesireanempire.eplus
import com.aesireanempire.eplus.blocks.entities.TileEntityAdvEnchantmentTable
import com.aesireanempire.eplus.gui.elements.{DataProviderEnchantmentData, DataProviderInformation, ListItem, listItemEnchantments}
import com.aesireanempire.eplus.inventory.{SlotArmor, SlotEnchantment, TableInventory}
import net.minecraft.enchantment.{Enchantment, EnchantmentData, EnchantmentHelper}
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.inventory.{Container, IInventory, Slot}
import net.minecraft.item.ItemStack
import net.minecraft.util.BlockPos
import net.minecraft.world.World
import net.minecraftforge.common.ForgeHooks
import scala.collection.JavaConversions._
class ContainerAdvEnchantment(player: EntityPlayer, tile: TileEntityAdvEnchantmentTable) extends Container {
val tableInventory: IInventory = new TableInventory(this, "enchant", true, 1)
var dataProvider = new DataProviderEnchantmentData
var infoProvider = new DataProviderInformation
addSlotToContainer(new SlotEnchantment(this, tableInventory, 0, 64, 17))
bindPlayerInventory()
setInformationPlayerLever(player.experienceLevel)
setInformationBookCase()
setInformationCost(Array.empty[ListItem[EnchantmentData]])
def bindPlayerInventory() = {
val xStart = 47
for (i <- 0 until 3) {
for (j <- 0 until 9) {
addSlotToContainer(new Slot(player.inventory, j + i * 9 + 9, 17 + j * 18 + xStart, 91 + i * 18))
}
}
for (i <- 0 until 9) {
addSlotToContainer(new Slot(player.inventory, i, 17 + i * 18 + xStart, 149))
}
for (i <- 0 until 4) {
addSlotToContainer(new SlotArmor(i, player, 39 - i, 7, 24 + i * 19))
}
}
def setInformationPlayerLever(level: Int) {
infoProvider.setInfoAt(0, "P:" + level.toString)
}
def setInformationBookCase() {
infoProvider.setInfoAt(1, "B:" + getNumberOfBookcases.toString)
}
def setInformationCost(items: Array[ListItem[EnchantmentData]]) {
infoProvider.setInfoAt(2, "C:" + getEnchantmentCost(items))
}
def getEnchantmentCost(enchantments: Array[ListItem[EnchantmentData]]): Int = {
var cost = 0
for (item: ListItem[EnchantmentData] <- enchantments) {
val enchant = item.asInstanceOf[listItemEnchantments]
val newLevel = enchant.getLevel
val oldLevel = enchant.oldLevel
cost += calculateCost(enchant.getEnchantment, newLevel, oldLevel)
}
cost
}
private def calculateCost(enchantment: Enchantment, newLevel: Int, oldLevel: Int): Int = {
val itemStack = tableInventory.getStackInSlot(0)
if (itemStack == null) return 0
var enchantability = itemStack.getItem.getItemEnchantability
if (enchantability == 0) return 0
if(enchantability <= 5) {
enchantability = 5
}
val maxLevel = enchantment.getMaxLevel
val deltaLevel = newLevel - oldLevel
val averageEnchantability = (enchantment.getMaxEnchantability(maxLevel) + enchantment.getMinEnchantability(maxLevel)) / 2
var cost = 0
def costForLevel(level: Int): Int = {
(level + Math.pow(level, 2)).toInt
}
if (deltaLevel >= 0) {
cost = costForLevel(newLevel) - costForLevel(oldLevel)
} else {
cost = (-.80 * (costForLevel(oldLevel) - costForLevel(newLevel))).toInt
}
(cost * averageEnchantability) / (enchantability * 3)
}
private def getNumberOfBookcases: Float = {
var temp: Float = 0
val world: World = tile.getWorld
val xCoord: Int = tile.getPos.getX
val yCoord: Int = tile.getPos.getY
val zCoord: Int = tile.getPos.getZ
for (
x <- -1 to 1;
z <- -1 to 1
) {
if ((x != 0 || z != 0) &&
world.isAirBlock(new BlockPos(xCoord + x, yCoord, zCoord + z)) &&
world.isAirBlock(new BlockPos(xCoord + x, yCoord + 1, zCoord + z))) {
temp += ForgeHooks.getEnchantPower(world, new BlockPos(xCoord + x * 2, yCoord, zCoord + z * 2))
temp += ForgeHooks.getEnchantPower(world, new BlockPos(xCoord + x * 2, yCoord + 1, zCoord + z * 2))
if (x != 0 && z != 0) {
temp += ForgeHooks.getEnchantPower(world, new BlockPos(xCoord + x * 2, yCoord, zCoord + z))
temp += ForgeHooks.getEnchantPower(world, new BlockPos(xCoord + x * 2, yCoord + 1, zCoord + z))
temp += ForgeHooks.getEnchantPower(world, new BlockPos(xCoord + x, yCoord, zCoord + z * 2))
temp += ForgeHooks.getEnchantPower(world, new BlockPos(xCoord + x, yCoord + 1, zCoord + z * 2))
}
}
}
temp
}
override def canInteractWith(player: EntityPlayer): Boolean = true
override def onCraftMatrixChanged(par1IInventory: IInventory): Unit = {
super.onCraftMatrixChanged(par1IInventory)
val itemStack: ItemStack = par1IInventory.getStackInSlot(0)
var newEnchantmentList = Array.empty[EnchantmentData]
if (itemStack != null) {
newEnchantmentList = AdvEnchantmentHelper.buildEnchantmentList(itemStack)
}
dataProvider.setData(newEnchantmentList)
}
def tryEnchantItem(player: EntityPlayer, enchants: collection.mutable.Map[Int, Int], cost: Int): Boolean = {
val itemStack = tableInventory.getStackInSlot(0)
if (itemStack == null) return false
if (!player.capabilities.isCreativeMode) {
if (cost > player.experienceLevel) {
return false
}
if (cost >= 0 && cost > getNumberOfBookcases) {
return false
}
}
player.addExperienceLevel(-cost)
enchantItem(player, enchants, itemStack)
true
}
def enchantItem(player: EntityPlayer, enchants: collection.mutable.Map[Int, Int], itemStack: ItemStack) = {
EnchantmentHelper.setEnchantments(enchants, itemStack)
if (enchants.isEmpty && AdvEnchantmentHelper.isBook(itemStack)) {
itemStack.setTagCompound(null)
}
}
override def transferStackInSlot(player: EntityPlayer, slot: Int): ItemStack = {
null
}
override def onContainerClosed(player: EntityPlayer): Unit = {
super.onContainerClosed(player)
for (i <- 0 until tableInventory.getSizeInventory) {
val stack = tableInventory.getStackInSlot(i)
if (stack != null) {
if (!player.inventory.addItemStackToInventory(stack))
player.entityDropItem(stack, 0.2f)
}
}
}
}
| darkhax/EnchantingPlus-Scala | src/main/scala/com/aesireanempire/eplus/ContainerAdvEnchantment.scala | Scala | lgpl-3.0 | 6,813 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.examples
import com.cibo.scalastan._
import spire.implicits._
import io.github.mandar2812.dynaml.probability._
import io.github.mandar2812.dynaml.pipes._
object StanLinearModel {
def apply(): StanResults = {
val x = GaussianRV(0d, 1d)
val y = DataPipe((x: Double) => (GaussianRV(0d, 0.5d) + 2.5*x) - 1.5*x*x)
val xs = x.iid(500).draw.toSeq
val ys: Seq[Double] = xs.map(x => y(x).draw)
object MyModel extends StanModel {
val n = data(int(lower = 0))
val x = data(vector(n))
val y = data(vector(n))
val b = parameter(real())
val m = parameter(real())
val sigma = parameter(real(lower = 0.0))
sigma ~ stan.cauchy(0, 1)
y ~ stan.normal(m * x + b*x*:*x, sigma)
}
MyModel
.withData(MyModel.x, xs)
.withData(MyModel.y, ys)
.run(chains = 5)
}
} | transcendent-ai-labs/DynaML | dynaml-examples/src/main/scala/io/github/mandar2812/dynaml/examples/StanLinearModel.scala | Scala | apache-2.0 | 1,763 |
import common._
package object scalashop {
/** The value of every pixel is represented as a 32 bit integer. */
type RGBA = Int
/** Returns the red component. */
def red(c: RGBA): Int = (0xff000000 & c) >>> 24
/** Returns the green component. */
def green(c: RGBA): Int = (0x00ff0000 & c) >>> 16
/** Returns the blue component. */
def blue(c: RGBA): Int = (0x0000ff00 & c) >>> 8
/** Returns the alpha component. */
def alpha(c: RGBA): Int = (0x000000ff & c) >>> 0
/** Used to create an RGBA value from separate components. */
def rgba(r: Int, g: Int, b: Int, a: Int): RGBA = {
(r << 24) | (g << 16) | (b << 8) | (a << 0)
}
/** Restricts the integer into the specified range. */
def clamp(v: Int, min: Int, max: Int): Int = {
if (v < min) min
else if (v > max) max
else v
}
/** Image is a two-dimensional matrix of pixel values. */
class Img(val width: Int, val height: Int, private val data: Array[RGBA]) {
def this(w: Int, h: Int) = this(w, h, new Array(w * h))
def apply(x: Int, y: Int): RGBA = data(y * width + x)
def update(x: Int, y: Int, c: RGBA): Unit = data(y * width + x) = c
}
/** Computes the blurred RGBA value of a single pixel of the input image. */
def boxBlurKernel(src: Img, x: Int, y: Int, radius: Int): RGBA = {
if(radius > 0 ){
var xIdx = clamp(x - radius, 0 , src.width -1 )
var yIdx = clamp(y - radius, 0 , src.height -1 )
val xEndIdx = clamp(x + radius, 0 , src.width -1 )
val yEndIdx = clamp(y + radius, 0 , src.height -1 )
var opCounter = 0
var red = 0
var green = 0
var blue = 0
var alpha = 0
while(yIdx <= yEndIdx){
while(xIdx <= xEndIdx){
red = red + this.red(src(xIdx, yIdx))
green = green + this.green(src(xIdx, yIdx))
blue = blue + this.blue(src(xIdx, yIdx))
alpha = alpha + this.alpha(src(xIdx, yIdx))
opCounter = opCounter + 1
xIdx = xIdx + 1
}
xIdx = clamp(x - radius, 0 , src.width -1)
yIdx = yIdx + 1
}
this.rgba(red / opCounter, green / opCounter, blue / opCounter, alpha / opCounter)
}else {
src(x,y)
}
}
}
| alicanalbayrak/ScalaExperiments | scalashop/src/main/scala/scalashop/package.scala | Scala | mit | 2,218 |
package http
package response
import scalaz._
import atto._, Atto._
object HttpResponseParser {
lazy val cr: Parser[Char] =
char(0x0D)
lazy val lf: Parser[Char] =
char(0x0A)
lazy val eol: Parser[Unit] =
(cr ~ lf | cr | lf) map (_ => ())
lazy val dot =
char('.')
lazy val zeroD: Parser[Option[Digit]] =
char('0') map (Digit.digitFromChar(_))
lazy val oneD: Parser[Option[Digit]] =
char('1') map (Digit.digitFromChar(_))
import scalaz.syntax.apply._
import scalaz.syntax.std.list._
import scalaz.std.option._
import scalaz.std.list._
import scalaz.syntax.std.string._
lazy val version: Parser[Option[Version]] =
for {
_ <- string("HTTP/")
major <- oneD <~ dot
minor <- (oneD | zeroD)
} yield (major |@| minor)(Version.version(_, _))
lazy val httpCode =
int.map(Status.fromInt(_)) as "httpCode"
lazy val alphaText: Parser[List[Char]] =
many(letterOrDigit | spaceChar | char('-'))
lazy val statusLine =
for {
v <- version <~ spaceChar
c <- httpCode <~ spaceChar
t <- alphaText <~ eol
} yield (v |@| c)(StatusLine.statusLine(_, _, t))
lazy val printableChar: Parser[Char] =
elem(c => c >= 32 && c < 127)
lazy val header: Parser[Option[(ResponseHeader, NonEmptyList[Char])]] =
for {
key <- alphaText <~ string(": ")
value <- many1(printableChar) <~ eol
} yield (ResponseHeader.fromString(key.mkString) |@| value.mkString.charsNel)((_, _))
lazy val httpHeader =
for {
line <- statusLine
headers <- many1(header)
} yield {
import Scalaz._
(line |@| headers.sequence)(HttpResponse.response[Stream](_, _, Stream.empty))
}
lazy val response =
for {
h <- httpHeader
body <- opt(many(printableChar) <~ eol)
} yield h.map(_ >> body.map(l => l.map(_.toByte).toStream).getOrElse(Stream.empty))
}
| gpampara/scalaz-http-client | src/main/scala/http/response/HttpResponseParser.scala | Scala | bsd-3-clause | 1,899 |
package com.etsy.sahale
import org.apache.commons.httpclient.{HttpClient, HttpStatus}
import com.google.api.client.json.jackson2.JacksonFactory
import com.google.api.client.http.apache.ApacheHttpTransport
import org.apache.commons.httpclient.methods.{GetMethod, PostMethod}
import org.apache.commons.httpclient.NameValuePair
import com.google.api.client.googleapis.auth.oauth2.{GoogleIdTokenVerifier, GoogleCredential, GooglePublicKeysManager}
import com.google.api.client.json.webtoken.{JsonWebToken, JsonWebSignature}
import java.util.concurrent.atomic.AtomicBoolean
import java.net.URI
import cascading.flow.Flow
import scala.util.{Try, Success}
import java.io.FileInputStream
import java.util.Collections
import java.net.URLEncoder
case class IdToken(audience: String, transport: HttpClient, serviceAccountJsonFile: Option[String] = None) {
private var _token: Option[String] = None
private var _expiresAtSeconds: Option[Long] = None
private def updateToken {
val token = serviceAccountJsonFile.map { file =>
IdToken.getTokenFromServiceAccountFlow(audience, transport, file)
}.getOrElse(IdToken.getTokenFromMetadata(audience, transport))
// the expiry checker also validates the token and raises an exception if
// the token is invalid. So compute both before storing either.
val expiry = IdToken.getExpiresAtSeconds(token, audience)
_token = Some(token)
_expiresAtSeconds = Some(expiry)
}
def isExpired: Boolean = {
// Returns true if the token has not yet been retrieved, or if the token
// has expired
_expiresAtSeconds.forall { expSeconds =>
// Indicate expiry 1 minute before the token has actually expired,
// to prevent us from using a token that will expire by the time it is
// processed by the server
expSeconds <= 60 + System.currentTimeMillis / 1000
}
}
def token: String = {
if(isExpired) {
updateToken
}
// updateToken raises an exception if it fails, so if we're here then we
// know that _token is populated
_token.get
}
}
object IdToken {
val IDENTITY_TOKEN_METADATA_URI = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity"
val GOOGLE_TOKEN_ENDPOINT = "https://www.googleapis.com/oauth2/v4/token"
def getTokenFromMetadata(audience: String, transport: HttpClient): String = {
// see: https://cloud.google.com/compute/docs/instances/verifying-instance-identity
val request = new GetMethod(IDENTITY_TOKEN_METADATA_URI)
request.addRequestHeader("Metadata-Flavor", "Google")
request.setQueryString(Array(new NameValuePair("audience", audience)))
var token: Option[String] = None
try {
val code = transport.executeMethod(request)
if(code != HttpStatus.SC_OK) {
FlowTracker.LOG.warn(s"Metadata server returned failure code on identity-token request: $code")
}
token = Some(new String(request.getResponseBody, "UTF-8"))
} catch { case e: Throwable =>
FlowTracker.LOG.warn(s"Failed to refresh identity token from metadata server: $e")
} finally {
request.releaseConnection
}
token.getOrElse {
sys.error("Failed to refresh identity token")
}
}
def getTokenFromServiceAccountFlow(audience: String, transport: HttpClient, filename: String): String ={
// This is a 2-step flow to get a Google-signed ID token starting from a
// service account.
// see: https://cloud.google.com/endpoints/docs/openapi/service-account-authentication#using_a_google_id_token
// Step 1: Construct a token that we self-sign using the private key for
// the service account. We set the audience to the google token
// API's URL, and we set the target_audience assertion to the
// audience for which we want the Google ID token
val credentials = getServiceAccountCredentials(filename)
val selfSignedToken = getServiceAccountSignedToken(audience, credentials)
// Step 2: Send the self-signed token to the Google token endpoint, and it
// will send us back a Google-signed ID token that we can send to
// the upstream service
exchangeToken(selfSignedToken, transport)
}
val keyManager = new GooglePublicKeysManager(new ApacheHttpTransport, new JacksonFactory)
def getExpiresAtSeconds(token: String, audience: String): Long = {
val verifier = new GoogleIdTokenVerifier.Builder(keyManager)
.setAudience(Collections.singletonList(audience))
.setIssuer("https://accounts.google.com")
.build
val parsedToken = Option(verifier.verify(token)).getOrElse {
sys.error("Failed to parse id token!")
}
parsedToken.getPayload.getExpirationTimeSeconds
}
private def getServiceAccountSignedToken(audience: String, creds: GoogleCredential): String = {
val header = new JsonWebSignature.Header
header.setType("JWT")
header.setAlgorithm("RS256")
val nowSeconds = System.currentTimeMillis / 1000
val payload = new JsonWebToken.Payload
payload.setIssuedAtTimeSeconds(nowSeconds)
payload.setExpirationTimeSeconds(nowSeconds + 3600)
payload.setIssuer(creds.getServiceAccountId)
payload.set("target_audience", audience)
payload.setAudience(GOOGLE_TOKEN_ENDPOINT)
JsonWebSignature.signUsingRsaSha256(
creds.getServiceAccountPrivateKey,
new JacksonFactory,
header,
payload)
}
private def getServiceAccountCredentials(filename: String): GoogleCredential = {
val stream = new FileInputStream(filename)
val creds = GoogleCredential.fromStream(stream)
stream.close
creds
}
private def exchangeToken(token: String, transport: HttpClient): String = {
// see: https://cloud.google.com/compute/docs/instances/verifying-instance-identity
val request = new PostMethod(GOOGLE_TOKEN_ENDPOINT)
val params = Array(
new NameValuePair("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
new NameValuePair("assertion", token)
)
request.setRequestBody(params)
var idToken: Option[String] = None
try {
val code = transport.executeMethod(request)
if(code != HttpStatus.SC_OK) {
sys.error(s"Token endpoint failed with code $code: ${request.getResponseBody}")
}
val idTokenJson = new String(request.getResponseBody, "UTF-8")
val parser = (new JacksonFactory).createJsonParser(idTokenJson)
parser.skipToKey("id_token")
idToken = Some(parser.getText)
} catch { case e: Throwable =>
FlowTracker.LOG.warn(s"Failed to refresh identity token from metadata server: $e")
} finally {
request.releaseConnection
}
idToken.getOrElse {
sys.error("Failed to retrieve google-signed identity token")
}
}
def getAudience(hostPort: String) = {
val uri = new URI(hostPort)
// Do not send the port as part of the audience, only the scheme and host
new URI(uri.getScheme, uri.getHost, null, null).toString
}
}
class GoogleAuthFlowTracker(
flow: Flow[_],
runCompleted: AtomicBoolean,
hostPort: String,
disableProgressBar: Boolean,
serviceAccountJsonFilename: String,
httpConnectionTimeout: Int = FlowTracker.HTTP_CONNECTION_TIMEOUT,
httpSocketTimeout: Int = FlowTracker.HTTP_SOCKET_TIMEOUT) extends FlowTracker(
flow, runCompleted, hostPort, disableProgressBar, httpConnectionTimeout, httpSocketTimeout) {
// More java-compatibility constructors
def this(
flow: Flow[_],
runCompleted: AtomicBoolean,
hostPort: String,
disableProgressBar: java.lang.Boolean,
httpConnectionTimeout: Int,
httpSocketTimeout: Int) = {
this(flow, runCompleted, hostPort, disableProgressBar, null, httpConnectionTimeout, httpSocketTimeout)
}
def this(flow: Flow[_], runCompleted: AtomicBoolean, hostPort: String, disableProgressBar: java.lang.Boolean) = {
this(flow, runCompleted, hostPort, disableProgressBar, null,
FlowTracker.HTTP_CONNECTION_TIMEOUT, FlowTracker.HTTP_SOCKET_TIMEOUT)
}
def this(flow: Flow[_], runCompleted: AtomicBoolean, hostPort: String) = {
this(flow, runCompleted, hostPort, false, null,
FlowTracker.HTTP_CONNECTION_TIMEOUT, FlowTracker.HTTP_SOCKET_TIMEOUT)
}
def this(flow: Flow[_], runCompleted: AtomicBoolean) = {
this(flow, runCompleted, "", false, null,
FlowTracker.HTTP_CONNECTION_TIMEOUT, FlowTracker.HTTP_SOCKET_TIMEOUT)
}
// Refuse to run if the server host is not using HTTPS
Try(new URI(this.serverHostPort).getScheme) match {
case Success("https") => // OK
case _ =>
sys.error(s"Invalid host ${this.serverHostPort}: Google Auth is only valid over https!")
}
@transient // should not generally happen, but do not allow credentials to be serialized
private val idToken: IdToken = IdToken(
audience = IdToken.getAudience(this.serverHostPort),
transport = FlowTracker.getHttpClient(httpConnectionTimeout, httpSocketTimeout),
serviceAccountJsonFile = Option(serviceAccountJsonFilename))
override def setAdditionalHeaders = Map( "Authorization" -> s"Bearer ${idToken.token}" )
}
| etsy/Sahale | flowtracker-gcp/src/main/scala/GoogleAuthFlowTracker.scala | Scala | mit | 9,172 |
package kidstravel.client.components
import diode.data.Pot
import diode.react.ModelProxy
import diode.react.ReactPot._
import japgolly.scalajs.react.extra.router.RouterCtl
import japgolly.scalajs.react.vdom.prefix_<^._
import japgolly.scalajs.react.{BackendScope, ReactComponentB}
import kidstravel.client.KidsTravelMain.{CityLoc, Loc}
import kidstravel.client.services.FlickrImage
import kidstravel.shared.geo.City
object CityTile {
case class Props(router: RouterCtl[Loc], proxy: ModelProxy[(City, Pot[FlickrImage])])
class Backend($: BackendScope[Props, Unit]) {
def render(props: Props) = {
val city = props.proxy()._1
val imgPot = props.proxy()._2
println(s"Rendering ${city.name} ($imgPot)")
<.div(
^.`class` := "col-lg-3",
imgPot.renderEmpty(<.p("Loading …")),
imgPot.renderPending(_ > 10, _ => <.p("Loading …")),
imgPot.renderReady(img =>
<.div(
^.backgroundImage := s"url(${img.url})",
^.backgroundSize := "cover",
^.height := 200.px,
^.marginBottom := 15.px,
<.h3(
^.padding := 5.px + " " + 10.px,
^.margin := 0.px,
^.color := "white",
^.backgroundColor := "rgba(0, 0, 0, 0.5)",
props.router.link(CityLoc(city.id))(city.name)(^.color := "white")
)
)
)
)
}
}
private def component = ReactComponentB[Props]("CityTile").
renderBackend[Backend].
build
def apply(router: RouterCtl[Loc], proxy: ModelProxy[(City, Pot[FlickrImage])]) =
component(Props(router, proxy))
}
| devkat/kidstravel | client/src/main/scala/kidstravel/client/components/CityTile.scala | Scala | apache-2.0 | 1,647 |
import info.fotm.util.MathVector
object ComparerTestHelpers {
implicit val comparer = new org.scalactic.Equality[MathVector] {
override def areEqual(a: MathVector, b: Any): Boolean =
b.isInstanceOf[MathVector] && a.coords == b.asInstanceOf[MathVector].coords
}
implicit val seqComparer = new org.scalactic.Equality[Seq[MathVector]] {
override def areEqual(as: Seq[MathVector], b: Any): Boolean = {
val bs = b.asInstanceOf[Seq[MathVector]]
as.size == bs.size &&
as.forall(a => bs.exists(b => comparer.areEqual(a, b)))
}
}
}
| Groz/fotm-info | core/src/test/scala/ComparerTestHelpers.scala | Scala | mit | 572 |
//package org.sireum.pilarform.astselect
//
//import org.sireum.pilarform.lexer._
//import org.sireum.pilarform.util.Range
//import org.sireum.pilarform.util.Utils._
//import scala.util.control.Exception._
//import org.sireum.pilarform.PilarVersions
//
//object AstSelector {
//
// /**
// * Expands the given selection in the source to the range of the closest appropriate
// * enclosing AST element. Returns None if the source does not parse correctly, or if
// * there is no strictly larger containing AST element.
// */
// def expandSelection(source: String, initialSelection: Range, pilarVersion: String = PilarVersions.DEFAULT_VERSION): Option[Range] =
// catching(classOf[PilarParserException]).toOption {
// new AstSelector(source, pilarVersion).expandSelection(initialSelection)
// }
//
// import Tokens._
//
// private val nonSelectableAstNodes: Set[Class[_ <: AstNode]] =
// Set(
// classOf[AccessQualifier],
// classOf[CasePattern],
// classOf[CatchClause],
// classOf[CondExpr],
// classOf[ElseClause],
// classOf[Enumerators],
// classOf[ExprFunBody],
// classOf[FunDefOrDcl],
// classOf[ParenArgumentExprs],
// classOf[GeneralTokens],
// classOf[Guard],
// classOf[ParamClause],
// classOf[ParamClauses],
// classOf[PatDefOrDcl],
// classOf[ProcFunBody],
// classOf[Template],
// classOf[TemplateBody],
// classOf[TemplateParents],
// classOf[TmplDef],
// classOf[TypeDefOrDcl],
// classOf[TypeExprElement],
// classOf[TypeParamClause])
//
//}
//
//class AstSelector(source: String, pilarVersion: String = PilarVersions.DEFAULT_VERSION) {
//
// import AstSelector._
//
// private val tokens = PilarLexer.tokenise(source, pilarVersion = pilarVersion)
//
// private val compilationUnitOpt: Option[CompilationUnit] = {
// val parser = new PilarParser(tokens.toArray)
// parser.safeParse(parser.compilationUnitOrScript)
// }
//
// private val allTokens: List[Token] = tokens.flatMap { token ⇒
// if (token.isNewline)
// token.associatedWhitespaceAndComments.rawTokens
// else
// token.associatedWhitespaceAndComments.rawTokens :+ token
// }
//
// private def previousToken(token: Token): Option[Token] =
// tokens.indexOf(token) match {
// case 0 | -1 ⇒ None
// case n ⇒ Some(tokens(n - 1))
// }
//
// def expandSelection(initialSelection: Range): Option[Range] =
// expandToToken(initialSelection) orElse
// (compilationUnitOpt flatMap { expandToEnclosingAst(_, initialSelection, enclosingNodes = Nil) })
//
// /**
// * If the selection is a strict subrange of some token, expand to the entire token.
// */
// private def expandToToken(initialSelection: Range): Option[Range] =
// allTokens.find { token ⇒
// isSelectableToken(token) && (token.range contains initialSelection) && initialSelection.length < token.length
// }.map(_.range)
//
// private def findAssociatedAstNode(pilardocCommentToken: Token): Option[AstNode] =
// compilationUnitOpt.flatMap { cu ⇒ findAssociatedAstNode(cu, pilardocCommentToken) }
//
// private def findAssociatedAstNode(nodeToSearch: AstNode, pilardocCommentToken: Token): Option[AstNode] =
// nodeToSearch.firstTokenOption flatMap { firstToken ⇒
// val hiddenTokens = getPriorHiddenTokens(firstToken)
// if (hiddenTokens.rawTokens.contains(pilardocCommentToken) && !nodeToSearch.isInstanceOf[CompilationUnit])
// Some(nodeToSearch)
// else {
// for {
// childNode ← nodeToSearch.immediateChildren
// result ← findAssociatedAstNode(childNode, pilardocCommentToken)
// } return Some(result)
// None
// }
// }
//
// private def isSelectableToken(token: Token) = {
// val tokenType = token.tokenType
// import tokenType._
// isLiteral || isKeyword || isComment || isId
// }
//
// /**
// * @return range of the node and any Pilardoc immediately before it
// */
// private def adjustedNodeRange(node: AstNode): Option[Range] =
// node.rangeOpt map { nodeRange ⇒
// nodeRange
// }
//
// /**
// * Attempt to find a suitable AST node to expand to which contains the given selection.
// *
// * @param enclosingNodes -- stack of nodes recording path to root compilation unit (useful for more context-aware
// * decisions about whether to expand to a node or not).
// */
// private def expandToEnclosingAst(node: AstNode, initialSelection: Range, enclosingNodes: List[AstNode]): Option[Range] = {
//
// val nodeRange = adjustedNodeRange(node).getOrElse { return None }
//
// if (!nodeRange.contains(initialSelection)) { return None }
//
// for {
// childNode ← node.immediateChildren
// descendantRange ← expandToEnclosingAst(childNode, initialSelection, enclosingNodes = node :: enclosingNodes)
// } return Some(descendantRange)
//
// if (nodeRange.strictlyContains(initialSelection) && isSelectableAst(node :: enclosingNodes))
// Some(nodeRange)
// else
// None
//
// }
//
// private def getPredecessorNewline(token: Token): Option[HiddenTokens] =
// tokens.indexOf(token) match {
// case 0 ⇒ None
// case n ⇒
// val previousToken = tokens(n - 1)
// if (previousToken.isNewline)
// Some(previousToken.associatedWhitespaceAndComments)
// else
// None
// }
//
// private def getPriorHiddenTokens(token: Token) = getPredecessorNewline(token) getOrElse token.associatedWhitespaceAndComments
//
// private def isSelectableAst(nodeStack: List[AstNode]) =
// nodeStack match {
// case List(_: BlockExpr, _: MatchExpr, _*) ⇒ false
// case List(_: BlockExpr, _: ProcFunBody, _*) ⇒ false
// case List(node, _*) ⇒ !(nonSelectableAstNodes contains node.getClass.asInstanceOf[Class[_ <: AstNode]])
// case Nil ⇒ false
// }
//
//} | fgwei/pilarform | pilarform/src/main/scala/org/sireum/pilarform/astselect/AstSelector.scala | Scala | epl-1.0 | 5,999 |
package ch.uzh.ifi.pdeboer.pplib.patterns
import ch.uzh.ifi.pdeboer.pplib.hcomp.{HCompPortalAdapter, MultipleChoiceAnswer, MultipleChoiceQuery}
import scala.concurrent.duration._
import scala.util.Random
/**
* Created by pdeboer on 24/10/14.
*/
class ContestExecutor[T](val driver: ContestDriver[T], val showsPerElement: Int = 3, val maxElementsPerGo: Int = 100) extends Serializable {
lazy val winner: T = {
do {
step()
} while (alternatives.minBy(_._2.numberOfShows)._2.numberOfShows < showsPerElement)
val candidates = winnerCandidates
if (candidates.size == 1)
candidates.head._2.alternative
else
voteOnTargetsAndReturnWinner(candidates.map(_._2)).alternative
}
protected val alternatives = driver.alternatives.zipWithIndex.map(a => a._2 -> new AlternativeDetails(a._1)).toMap
protected def winnerCandidates = {
val maxSelects = alternatives.maxBy(_._2.numberOfSelects)._2.numberOfSelects
alternatives.filter(_._2.numberOfSelects == maxSelects).toList
}
protected def step(): Unit = {
val target = alternatives.values.view.filter(_.numberOfShows < showsPerElement).map((_, new Random().nextDouble())).toList.sortBy(_._2).take(maxElementsPerGo)
target.foreach(e => {
e._1.numberOfShows += 1
})
val selectedAlt = voteOnTargetsAndReturnWinner(target.map(_._1))
selectedAlt.numberOfSelects += 1
}
protected def voteOnTargetsAndReturnWinner(target: List[AlternativeDetails]) = {
val selected = driver.castSingleVote(target.map(_.alternative))
val selectedAlt = target.find(_.alternative == selected).get //crash if not found is ok
selectedAlt
}
protected case class AlternativeDetails(alternative: T, var numberOfShows: Int = 0, var numberOfSelects: Int = 0)
}
trait ContestDriver[T] {
def alternatives: List[T]
def castSingleVote(options: List[T]): T
}
class DefaultContestHCompDriver(
val alternatives: List[String],
hcompPortal: HCompPortalAdapter,
val question: String = "Please select the best element from this list",
val maxWait: Duration = 14 days) extends ContestDriver[String] {
override def castSingleVote(options: List[String]): String = {
hcompPortal.sendQueryAndAwaitResult(
MultipleChoiceQuery(question, options, 1, 1),
maxWaitTime = maxWait
).get.asInstanceOf[MultipleChoiceAnswer].selectedAnswer
}
} | uzh/PPLib | src/main/scala/ch/uzh/ifi/pdeboer/pplib/patterns/Contest.scala | Scala | mit | 2,340 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.File
import java.util.regex.Matcher
import java.util.regex.Pattern
import scala.collection.mutable.HashMap
import scala.util.Try
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.{Master, JobConf}
import org.apache.hadoop.security.Credentials
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.api.ApplicationConstants
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.records.{ApplicationAccessType, ContainerId, Priority}
import org.apache.hadoop.yarn.util.ConverterUtils
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.util.Utils
/**
* Contains util methods to interact with Hadoop from spark.
*/
class YarnSparkHadoopUtil extends SparkHadoopUtil {
private var tokenRenewer: Option[ExecutorDelegationTokenUpdater] = None
override def transferCredentials(source: UserGroupInformation, dest: UserGroupInformation) {
dest.addCredentials(source.getCredentials())
}
// Note that all params which start with SPARK are propagated all the way through, so if in yarn
// mode, this MUST be set to true.
override def isYarnMode(): Boolean = { true }
// Return an appropriate (subclass) of Configuration. Creating a config initializes some Hadoop
// subsystems. Always create a new config, dont reuse yarnConf.
override def newConfiguration(conf: SparkConf): Configuration =
new YarnConfiguration(super.newConfiguration(conf))
// Add any user credentials to the job conf which are necessary for running on a secure Hadoop
// cluster
override def addCredentials(conf: JobConf) {
val jobCreds = conf.getCredentials()
jobCreds.mergeAll(UserGroupInformation.getCurrentUser().getCredentials())
}
override def getCurrentUserCredentials(): Credentials = {
UserGroupInformation.getCurrentUser().getCredentials()
}
override def addCurrentUserCredentials(creds: Credentials) {
UserGroupInformation.getCurrentUser().addCredentials(creds)
}
override def addSecretKeyToUserCredentials(key: String, secret: String) {
val creds = new Credentials()
creds.addSecretKey(new Text(key), secret.getBytes("utf-8"))
addCurrentUserCredentials(creds)
}
override def getSecretKeyFromUserCredentials(key: String): Array[Byte] = {
val credentials = getCurrentUserCredentials()
if (credentials != null) credentials.getSecretKey(new Text(key)) else null
}
/**
* Get the list of namenodes the user may access.
*/
def getNameNodesToAccess(sparkConf: SparkConf): Set[Path] = {
sparkConf.get("spark.yarn.access.namenodes", "")
.split(",")
.map(_.trim())
.filter(!_.isEmpty)
.map(new Path(_))
.toSet
}
def getTokenRenewer(conf: Configuration): String = {
val delegTokenRenewer = Master.getMasterPrincipal(conf)
logDebug("delegation token renewer is: " + delegTokenRenewer)
if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
val errorMessage = "Can't get Master Kerberos principal for use as renewer"
logError(errorMessage)
throw new SparkException(errorMessage)
}
delegTokenRenewer
}
/**
* Obtains tokens for the namenodes passed in and adds them to the credentials.
*/
def obtainTokensForNamenodes(
paths: Set[Path],
conf: Configuration,
creds: Credentials,
renewer: Option[String] = None
): Unit = {
if (UserGroupInformation.isSecurityEnabled()) {
val delegTokenRenewer = renewer.getOrElse(getTokenRenewer(conf))
paths.foreach { dst =>
val dstFs = dst.getFileSystem(conf)
logInfo("getting token for namenode: " + dst)
dstFs.addDelegationTokens(delegTokenRenewer, creds)
}
}
}
private[spark] override def startExecutorDelegationTokenRenewer(sparkConf: SparkConf): Unit = {
tokenRenewer = Some(new ExecutorDelegationTokenUpdater(sparkConf, conf))
tokenRenewer.get.updateCredentialsIfRequired()
}
private[spark] override def stopExecutorDelegationTokenRenewer(): Unit = {
tokenRenewer.foreach(_.stop())
}
private[spark] def getContainerId: ContainerId = {
val containerIdString = System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name())
ConverterUtils.toContainerId(containerIdString)
}
}
object YarnSparkHadoopUtil {
// Additional memory overhead
// 10% was arrived at experimentally. In the interest of minimizing memory waste while covering
// the common cases. Memory overhead tends to grow with container size.
val MEMORY_OVERHEAD_FACTOR = 0.10
val MEMORY_OVERHEAD_MIN = 384
val ANY_HOST = "*"
val DEFAULT_NUMBER_EXECUTORS = 2
// All RM requests are issued with same priority : we do not (yet) have any distinction between
// request types (like map/reduce in hadoop for example)
val RM_REQUEST_PRIORITY = Priority.newInstance(1)
def get: YarnSparkHadoopUtil = {
val yarnMode = java.lang.Boolean.valueOf(
System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE")))
if (!yarnMode) {
throw new SparkException("YarnSparkHadoopUtil is not available in non-YARN mode!")
}
SparkHadoopUtil.get.asInstanceOf[YarnSparkHadoopUtil]
}
/**
* Add a path variable to the given environment map.
* If the map already contains this key, append the value to the existing value instead.
*/
def addPathToEnvironment(env: HashMap[String, String], key: String, value: String): Unit = {
val newValue = if (env.contains(key)) { env(key) + getClassPathSeparator + value } else value
env.put(key, newValue)
}
/**
* Set zero or more environment variables specified by the given input string.
* The input string is expected to take the form "KEY1=VAL1,KEY2=VAL2,KEY3=VAL3".
*/
def setEnvFromInputString(env: HashMap[String, String], inputString: String): Unit = {
if (inputString != null && inputString.length() > 0) {
val childEnvs = inputString.split(",")
val p = Pattern.compile(environmentVariableRegex)
for (cEnv <- childEnvs) {
val parts = cEnv.split("=") // split on '='
val m = p.matcher(parts(1))
val sb = new StringBuffer
while (m.find()) {
val variable = m.group(1)
var replace = ""
if (env.get(variable) != None) {
replace = env.get(variable).get
} else {
// if this key is not configured for the child .. get it from the env
replace = System.getenv(variable)
if (replace == null) {
// the env key is note present anywhere .. simply set it
replace = ""
}
}
m.appendReplacement(sb, Matcher.quoteReplacement(replace))
}
m.appendTail(sb)
// This treats the environment variable as path variable delimited by `File.pathSeparator`
// This is kept for backward compatibility and consistency with Hadoop's behavior
addPathToEnvironment(env, parts(0), sb.toString)
}
}
}
private val environmentVariableRegex: String = {
if (Utils.isWindows) {
"%([A-Za-z_][A-Za-z0-9_]*?)%"
} else {
"\\\\$([A-Za-z_][A-Za-z0-9_]*)"
}
}
/**
* Escapes a string for inclusion in a command line executed by Yarn. Yarn executes commands
* using `bash -c "command arg1 arg2"` and that means plain quoting doesn't really work. The
* argument is enclosed in single quotes and some key characters are escaped.
*
* @param arg A single argument.
* @return Argument quoted for execution via Yarn's generated shell script.
*/
def escapeForShell(arg: String): String = {
if (arg != null) {
val escaped = new StringBuilder("'")
for (i <- 0 to arg.length() - 1) {
arg.charAt(i) match {
case '$' => escaped.append("\\\\$")
case '"' => escaped.append("\\\\\\"")
case '\\'' => escaped.append("'\\\\''")
case c => escaped.append(c)
}
}
escaped.append("'").toString()
} else {
arg
}
}
def getApplicationAclsForYarn(securityMgr: SecurityManager)
: Map[ApplicationAccessType, String] = {
Map[ApplicationAccessType, String] (
ApplicationAccessType.VIEW_APP -> securityMgr.getViewAcls,
ApplicationAccessType.MODIFY_APP -> securityMgr.getModifyAcls
)
}
/**
* Expand environment variable using Yarn API.
* If environment.$$() is implemented, return the result of it.
* Otherwise, return the result of environment.$()
* Note: $$() is added in Hadoop 2.4.
*/
private lazy val expandMethod =
Try(classOf[Environment].getMethod("$$"))
.getOrElse(classOf[Environment].getMethod("$"))
def expandEnvironment(environment: Environment): String =
expandMethod.invoke(environment).asInstanceOf[String]
/**
* Get class path separator using Yarn API.
* If ApplicationConstants.CLASS_PATH_SEPARATOR is implemented, return it.
* Otherwise, return File.pathSeparator
* Note: CLASS_PATH_SEPARATOR is added in Hadoop 2.4.
*/
private lazy val classPathSeparatorField =
Try(classOf[ApplicationConstants].getField("CLASS_PATH_SEPARATOR"))
.getOrElse(classOf[File].getField("pathSeparator"))
def getClassPathSeparator(): String = {
classPathSeparatorField.get(null).asInstanceOf[String]
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala | Scala | apache-2.0 | 10,385 |
package documentation
import org.qirx.littlespec.Specification
object Test extends Specification {
"#Title" - {
"test" - {
1 + 1 is 2
}
"example" - {
example {
1 + 1 is 2
}
}
}
} | EECOLOR/little-spec | extra/documentation/src/sbt-test/test-dependencies/sbt-extra-documentation/testClasses/documentation/Test.scala | Scala | mit | 227 |
package org.pblue.asyncpools
class AsyncPoolsException(msg: String, cause: Throwable = null) extends Exception(msg, cause) | privateblue/asyncpools | src/main/scala/org/pblue/asyncpools/AsyncPoolsException.scala | Scala | mit | 123 |
package dotty.tools.dotc
package transform
import core._
import TreeTransforms._
import Contexts.Context
import Flags._
import SymUtils._
import Symbols._
import SymDenotations._
import Types._
import Decorators._
import DenotTransformers._
import StdNames._
import NameOps._
import Phases._
import ast.untpd
import ast.Trees._
import collection.mutable
/** Rewrite calls
*
* super[M].f(args)
*
* where M is a Scala2 trait implemented by the current class to
*
* M$class.f(this, args)
*
* provided the implementation class M$class defines a corresponding function `f`.
*/
class LinkScala2ImplClasses extends MiniPhaseTransform with IdentityDenotTransformer { thisTransform =>
import ast.tpd._
override def phaseName: String = "linkScala2ImplClasses"
override def runsAfter: Set[Class[_ <: Phase]] = Set(classOf[Mixin])
override def transformApply(app: Apply)(implicit ctx: Context, info: TransformerInfo) = {
def currentClass = ctx.owner.enclosingClass.asClass
app match {
case Apply(sel @ Select(Super(_, _), _), args)
if sel.symbol.owner.is(Scala2xTrait) && currentClass.mixins.contains(sel.symbol.owner) =>
val impl = implMethod(sel.symbol)
if (impl.exists) Apply(ref(impl), This(currentClass) :: args).withPos(app.pos)
else app // could have been an abstract method in a trait linked to from a super constructor
case _ =>
app
}
}
private def implMethod(meth: Symbol)(implicit ctx: Context): Symbol =
meth.owner.implClass.info
.decl(if (meth.isConstructor) nme.TRAIT_CONSTRUCTOR else meth.name)
.suchThat(c => FullParameterization.memberSignature(c.info) == meth.signature)
.symbol
private val Scala2xTrait = allOf(Scala2x, Trait)
} | yusuke2255/dotty | src/dotty/tools/dotc/transform/LinkScala2ImplClasses.scala | Scala | bsd-3-clause | 1,757 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.channel.nio1
import java.nio.channels.Selector
import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicLong
import scala.annotation.tailrec
/** Provides a fixed size pool of [[SelectorLoop]]s, distributing work in a round robin fashion */
final class FixedSelectorPool(
poolSize: Int,
bufferSize: Int,
threadFactory: ThreadFactory
) extends SelectorLoopPool {
require(poolSize > 0, s"Invalid pool size: $poolSize")
private[this] val next = new AtomicLong(0L)
private[this] val loops = Array.fill(poolSize) {
new SelectorLoop(Selector.open(), bufferSize, threadFactory)
}
@tailrec
def nextLoop(): SelectorLoop = {
val i = next.get
if (next.compareAndSet(i, (i + 1L) % poolSize)) loops(i.toInt)
else nextLoop()
}
def close(): Unit = loops.foreach(_.close())
}
| http4s/blaze | core/src/main/scala/org/http4s/blaze/channel/nio1/FixedSelectorPool.scala | Scala | apache-2.0 | 1,455 |
package akka.persistence.hbase.snapshot
import akka.testkit.{ TestKit, TestProbe }
import akka.actor.{ ActorLogging, Props, ActorRef, ActorSystem }
import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike }
import akka.persistence._
import akka.persistence.hbase.journal.{ HBaseClientFactory, HBaseJournalInit }
import org.apache.hadoop.hbase.client.HBaseAdmin
import concurrent.duration._
import akka.persistence.SaveSnapshotFailure
import akka.persistence.SaveSnapshotSuccess
import akka.persistence.SnapshotMetadata
import com.typesafe.config.{ ConfigFactory, Config }
import org.apache.hadoop.fs.{ Path, FileSystem }
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.permission.{ FsPermission, FsAction }
object HadoopSnapshotStoreSpec {
class SnapshottingActor(probe: ActorRef, override val processorId: String) extends Processor with ActorLogging {
var data = List[String]()
def receive = {
// snapshot making ------------------------------------------------------
case x: String =>
log.info("Prepending: " + x)
data ::= x
case ShowData =>
log.info("Show data : " + data)
probe ! "show"
case MakeSnapshot =>
log.info("Starting snapshot creation: " + data)
saveSnapshot(data)
probe ! "making"
case SaveSnapshotSuccess(meta) =>
log.info("save success, desc = " + meta)
probe ! SnapshotOk(meta)
case SaveSnapshotFailure(meta, reason) =>
log.info("failure: " + meta)
probe ! SnapshotFail(meta, reason)
// end of snapshot making -----------------------------------------------
// snapshot offers ------------------------------------------------------
case SnapshotOffer(metadata, offeredSnapshot) =>
log.info("Offer: " + metadata + ", data: " + offeredSnapshot)
data = offeredSnapshot.asInstanceOf[List[String]]
log.info("Offered data : " + data.toString())
probe ! WasOfferedSnapshot(data)
case DeleteSnapshot(toSeqNr) =>
log.info("Delete, to: " + toSeqNr)
deleteSnapshot(toSeqNr, System.currentTimeMillis())
// end of snapshot offers ------------------------------------------------
}
}
case object MakeSnapshot
case object ShowData
case class DeleteSnapshot(toSeqNr: Long)
case class WasOfferedSnapshot(data: List[String])
case class SnapshotOk(meta: SnapshotMetadata)
case class SnapshotFail(meta: SnapshotMetadata, reason: Throwable)
}
trait HadoopSnapshotBehavior {
self: TestKit with FlatSpecLike with BeforeAndAfterAll =>
def config: Config
val timeout = 5.seconds
import HadoopSnapshotStoreSpec._
val hadoopSnapshotStore = {
it should "store a snapshot" in {
// given
val probe = TestProbe()
val actor = system.actorOf(Props(classOf[SnapshottingActor], probe.ref, "snap1"))
// when
actor ! "a"
actor ! "b"
actor ! "c"
actor ! MakeSnapshot
// then
probe.expectMsg(max = 30.seconds, "making")
val ok = probe.expectMsgType[SnapshotOk](max = 15.seconds)
info(s"Snapshot successful: $ok")
}
it should "be offered a snapshot from the previous test (a, b, c)" in {
// given
val probe = TestProbe()
val actor = system.actorOf(Props(classOf[SnapshottingActor], probe.ref, "snap1"))
// then
probe.expectMsg(max = 20.seconds, WasOfferedSnapshot(List("c", "b", "a")))
actor ! ShowData
probe.expectMsg(max = 20.seconds, "show")
}
it should "be able to delete a snapshot, so it won't be replayed again" in {
// given
val probe = TestProbe()
val actor = system.actorOf(Props(classOf[SnapshottingActor], probe.ref, "snap1"))
Thread.sleep(1000)
// when
actor ! DeleteSnapshot(3)
Thread.sleep(1000)
// then
val actor2 = system.actorOf(Props(classOf[SnapshottingActor], probe.ref, "snap1"))
Thread.sleep(1000)
expectNoMsg(2.seconds) // we deleted the snapshot, nothing there to replay
actor2 ! "d"
expectNoMsg(max = 5.seconds)
val actor3 = system.actorOf(Props(classOf[SnapshottingActor], probe.ref, "snap1"))
expectNoMsg(max = 5.seconds) // we didn't snapshot, and it's not persistent
}
}
}
/*
* The following two spec cann't be execute concurrently, comment one and execute another,
* the to be executed one should config correspond "hadoop-snapshot-store.impl" value
*/
class HdfsSnapshotStoreSpec extends TestKit(ActorSystem("hdfs-test")) with FlatSpecLike with BeforeAndAfterAll
with HadoopSnapshotBehavior {
behavior of "HdfsSnapshotStore"
// This operation not work
def config: Config = ConfigFactory.parseString(
s"""hadoop-snapshot-store.impl = "${classOf[HdfsSnapshotter].getCanonicalName}" """
).withFallback(system.settings.config)
override protected def afterAll() {
super.afterAll()
system.shutdown()
}
override protected def beforeAll() {
val conf = new Configuration
conf.set("fs.default.name", config.getString("hadoop-snapshot-store.hdfs-default-name"))
// Sleep for wait HBaseAsyncJournalSpec finished, or FileSystem may close by HBaseAsyncJournalSpec finish process
Thread.sleep(3000)
val fs = FileSystem.get(conf)
val path = new Path(config.getString("hadoop-snapshot-store.snapshot-dir"))
fs.delete(path, true)
fs.mkdirs(path)
fs.setPermission(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))
fs.close()
}
it should behave like hadoopSnapshotStore
}
//Comment for test HdfsSnapshotStoreSpec, they cann't be tested together
//class HBaseSnapshotStoreSpec extends TestKit(ActorSystem("hbase-test")) with FlatSpecLike with BeforeAndAfterAll
// with HadoopSnapshotBehavior {
//
// behavior of "HBaseSnapshotStore"
//
// override protected def beforeAll() {
// val tableName = config.getString("hadoop-snapshot-store.table")
// val admin = new HBaseAdmin(HBaseJournalInit.getHBaseConfig(config, "hadoop-snapshot-store"))
// if (admin.tableExists(tableName)) {
// admin.disableTable(tableName)
// admin.deleteTable(tableName)
// admin.close()
// }
// HBaseJournalInit.createTable(config, "hadoop-snapshot-store")
// }
//
// override protected def afterAll() {
// HBaseClientFactory.reset()
// system.shutdown()
// }
//
// // This operation not work
// def config: Config = ConfigFactory.parseString(
// s"""hadoop-snapshot-store.impl = "${classOf[HBaseSnapshotter].getCanonicalName}" """
// ).withFallback(system.settings.config)
//
// it should behave like hadoopSnapshotStore
//
//}
| hossx/akka-persistence-hbase | src/test/scala/akka/persistence/hbase/snapshot/HadoopSnapshotStoreSpec.scala | Scala | apache-2.0 | 6,662 |
package com.truecar.mleap.runtime.transformer
import com.truecar.mleap.core.feature.StandardScaler
import com.truecar.mleap.runtime.attribute.AttributeSchema
import com.truecar.mleap.runtime.transformer.builder.TransformBuilder
import com.truecar.mleap.runtime.types.VectorType
import com.truecar.mleap.runtime.transformer.builder.TransformBuilder.Ops
import scala.util.Try
/**
* Created by hwilkins on 10/23/15.
*/
case class StandardScalerModel(inputCol: String,
outputCol: String,
scaler: StandardScaler) extends Transformer {
override def build[TB: TransformBuilder](builder: TB): Try[TB] = {
builder.withInput(inputCol, VectorType).flatMap {
case (b, inputIndex) =>
b.withOutput(outputCol, VectorType)(row => scaler(row.getVector(inputIndex)))
}
}
override def transformAttributeSchema(schema: AttributeSchema): AttributeSchema = {
schema.withField(outputCol, schema(inputCol))
}
}
| TrueCar/mleap | mleap-runtime/src/main/scala/com/truecar/mleap/runtime/transformer/StandardScalerModel.scala | Scala | apache-2.0 | 991 |
Subsets and Splits