code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.arcusys.valamis.persistence.common
import slick.driver.JdbcProfile
import slick.jdbc.JdbcBackend
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
/**
* Created by pkornilov on 20.04.16.
*/
trait DatabaseLayer { self: SlickProfile =>
import driver.api._
def execSync[T](action: DBIO[T]): T
def execAsync[T](action: DBIO[T]): Future[T]
def execSyncInTransaction[T](action: DBIO[T]): T
}
object DatabaseLayer {
import slick.dbio.DBIO
val dbTimeout = 90 seconds
def sequence[T](actions: Seq[DBIO[T]]): DBIO[Seq[T]] =
DBIO.sequence(actions)
implicit class DBActionOpts[A](val action: DBIO[Option[A]]) extends AnyVal {
def ifSomeThen[B](g: A => DBIO[B])(implicit cxt: ExecutionContext): DBIO[Option[B]] =
action flatMap (_ map g moveOption)
}
implicit class OptionDBActionOpts[A](val a: Option[DBIO[A]]) extends AnyVal {
def moveOption(implicit cxt: ExecutionContext): DBIO[Option[A]] = a match {
case Some(f) => f map (Some(_))
case None => DBIO.successful(None)
}
}
}
class Slick3DatabaseLayer(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends DatabaseLayer
with SlickProfile {
import DatabaseLayer.dbTimeout
import driver.api._
override def execAsync[T](action: DBIO[T]): Future[T] = {
db.run(action)
}
override def execSync[T](action: DBIO[T]): T = {
Await.result(db.run(action), dbTimeout)
}
override def execSyncInTransaction[T](action: DBIO[T]): T = {
Await.result(db.run(action.transactionally), dbTimeout)
}
}
| igor-borisov/valamis | valamis-slick-support/src/main/scala/com/arcusys/valamis/persistence/common/DatabaseLayer.scala | Scala | gpl-3.0 | 1,634 |
package zzb.domain
import spray.http.StatusCodes._
import spray.json.{JsString, JsonParser}
import spray.routing.MalformedRequestContentRejection
import zzb.datatype._
import zzb.domain.plane._
/**
* Created by Simon on 2014/5/15
*/
class DirectAlterTest extends PlaneHttpTestBase {
var revFirstSave = VersionRevise(0, 0)
"Plane Http Api " - {
"可以创建一个新的领域对象 " in {
user(Post("/api/planes", entity(TString("simon").json))) ~> check {
status mustBe OK
pid = JsonParser(body.asString).asInstanceOf[JsString].value
pid.length mustBe 5
}
user(Get(s"/api/planes/$pid/latest/state")) ~> check {
status mustBe OK
PlaneState.fromJsValue(JsonParser(body.asString)).idx mustBe 1 //Stop
}
}
"直接Alter 动作无权限时会被拒绝 " in {
val tempPlane = Plane(Plane.id := pid, Plane.owner := "Simon")
//直接修改全部数据
user(Put(s"/api/planes/$pid/alter",tempPlane.json)) ~> check {
status mustBe Forbidden
val alter = ActionResult.format.read(JsonParser(body.asString))
alter.msg mustBe "用户任何时候都无权修改乘客数据"
alter.param mustBe -1
}
val tempFoods = Foods(Foods.water := 2,Foods.bread := 2)
//直接修改部分数据
user(Put(s"/api/planes/$pid/alter/foods",tempFoods.json)) ~> check {
status mustBe Forbidden
val alter = ActionResult.format.read(JsonParser(body.asString))
alter.msg mustBe "乘客不能修改食物数量"
alter.param mustBe -1
}
}
"可以请求修改数据结构中某一个节点以下的内容" in {
manager(Put(s"/api/planes/$pid/alter/foods/water", TInt(5).json)) ~> check {
status mustBe OK
}
user(Get(s"/api/planes/$pid/latest/foods/water")) ~> check {
status mustBe OK
TInt.fromJsValue(JsonParser(body.asString)).value mustBe 5
}
}
"可以在修改数据的同时要求对当前版本打标记" in {
manager(Put(s"/api/planes/$pid/alter/foods/water?tag=t1", TInt(666).json)) ~> check {
status mustBe OK
}
user(Get(s"/api/planes/$pid/latest/foods/water")) ~> check {
status mustBe OK
TInt.fromJsValue(JsonParser(body.asString)).value mustBe 666
}
manager(Put(s"/api/planes/$pid/alter/foods/water", TInt(5).json)) ~> check {
status mustBe OK
}
user(Get(s"/api/planes/$pid/latest/foods/water")) ~> check {
status mustBe OK
TInt.fromJsValue(JsonParser(body.asString)).value mustBe 5
}
user(Get(s"/api/planes/$pid/tag/t1/foods/water")) ~> check {
status mustBe OK
TInt.fromJsValue(JsonParser(body.asString)).value mustBe 666
}
}
"有其他变更会话在进行时,如果修改的数据有重叠路径会报冲突(409)" in {
//管理员请求创建一个新的Alter会话,要求修改部分数据
manager(Post(s"/api/planes/$pid/alter/foods")) ~> check {
status mustBe OK
val alter = ActionResult.format.read(JsonParser(body.asString))
alterSeq = alter.param
alterSeq must be > 0
}
manager(Put(s"/api/planes/$pid/alter/foods/water", TInt(5).json)) ~> check {
status mustBe Conflict
}
manager(Post(s"/api/planes/$pid/alter/$alterSeq")) ~> check {
status mustBe OK //提交更改
}
}
"可以删除数据结构中的某一个节点的内容" in {
//删除数据
manager(Delete(s"/api/planes/$pid/alter/foods/water")) ~> check {
status mustBe OK
}
user(Get(s"/api/planes/$pid/latest/foods/water")) ~> check {
status mustBe NotFound
}
}
"无法用变更API删除整个文档" in {
manager(Delete(s"/api/planes/$pid/alter")) ~> check {
status mustBe Forbidden
}
}
"不能删除必填字段" in {
manager(Delete(s"/api/planes/$pid/alter/id")) ~> check {
status mustBe BadRequest
}
}
val p1 = Passenger(Passenger.name := "Simon")
val p2 = Passenger(Passenger.name := "Jack")
val p3 = Passenger(Passenger.name := "Mike")
import Plane.{Passengers,Cargos,Vips}
"可以控制特定状态下拒绝数据修改" in {
manager(Put(s"/api/planes/$pid/alter/passengers", Passengers(List(p1,p2)).json)) ~> check {
status mustBe OK //飞机停止时,管理员可以修改乘客数据
}
manager(Post(s"/api/planes/$pid/action/slide")) ~> check {
status mustBe OK //停止状态可以执行滑行指令,转到滑行状态
ActionResult.format.read(JsonParser(body.asString)).param mustBe 2 //Slide
}
manager(Put(s"/api/planes/$pid/alter/passengers", Passengers(List(p1,p2)).json)) ~> check {
status mustBe Forbidden //飞机停止时,管理员也不可以修改乘客数据
}
manager(Post(s"/api/planes/$pid/action/stop")) ~> check {
status mustBe OK //停止状态可以执行滑行指令,转到滑行状态
ActionResult.format.read(JsonParser(body.asString)).param mustBe 1 //Stop
}
}
"可以在提交修改的同时执行命令" in {
manager(Get(s"/api/planes/$pid/latest/passengers")) ~> check {
status mustBe OK
Passengers.fromJsValue(JsonParser(body.asString)).length mustBe 2
}
manager(Put(s"/api/planes/$pid/alter/passengers?action=slide", Passengers(List(p1,p2,p3)).json)) ~> check {
status mustBe OK //飞机停止时,管理员可以修改乘客数据,提交更改同时转到滑行状态
val ddd = body.asString
ActionResult.format.read(JsonParser(body.asString)).param mustBe 2 //Slide
}
user(Get(s"/api/planes/$pid/latest/passengers")) ~> check {
status mustBe OK
Passengers.fromJsValue(JsonParser(body.asString)).length mustBe 3
}
//转换到Slide状态时速度自动会变为10
manager(Get(s"/api/planes/$pid/latest/speed")) ~> check {
status mustBe OK
TInt.fromJsValue(JsonParser(body.asString)).value mustBe 10
}
manager(Post(s"/api/planes/$pid/action/stop")) ~> check {
status mustBe OK
ActionResult.format.read(JsonParser(body.asString)).param mustBe 1 //Stop
}
//转换到Stop状态时速度自动会变为0
manager(Get(s"/api/planes/$pid/latest/speed")) ~> check {
status mustBe OK
TInt.fromJsValue(JsonParser(body.asString)).value mustBe 0
}
}
"提交数据修改同时执行命令,命令如果无权执行,数据修改也会放弃" in {
//用户无权转到 slide 状态
user(Post(s"/api/planes/$pid/action/slide")) ~> check {
status mustBe Forbidden
}
user(Put(s"/api/planes/$pid/alter/memo?action=slide", TString("还不错").json)) ~> check {
status mustBe Forbidden //用户无权转到 slide 状态
ActionResult.format.read(JsonParser(body.asString)).param mustBe 1 //Stop
}
}
"数据修改时,之前注册的监控者会得到通知" in {
manager(Put(s"/api/planes/$pid/alter/owner", TString("God").json)) ~> check {
status mustBe OK
}
Thread.sleep(1000)
manager(Get(s"/api/planes/$pid/latest/owner")) ~> check {
status mustBe OK
TString.fromJsValue(JsonParser(body.asString)).value mustBe "hello God"
}
}
"提交的 json格式不正确会报400错误 " in {
manager(Put(s"/api/planes/$pid/alter/foods", "ddd")) ~> check {
rejection mustBe a[MalformedRequestContentRejection]
}
}
"列表操作,内部数据类型为ValuePack" in {
// manager(Post(s"/api/planes/$pid/alter")) ~> check {
// status mustBe OK
// val alter = ActionResult.format.read(JsonParser(body.asString))
// alterSeq = alter.param
// alterSeq must be > 0
// }
val p1 = Passenger(Passenger.name := "simon")
val p2 = Passenger(Passenger.name := "jack")
val ps = Passengers(List(p1, p2))
manager(Put(s"/api/planes/$pid/alter/passengers", ps.json)) ~> check {
status mustBe OK
}
manager(Get(s"/api/planes/$pid/latest/passengers")) ~> check {
status mustBe OK
Passengers.fromJsValue(JsonParser(body.asString)).value.size mustBe 2
}
manager(Get(s"/api/planes/$pid/latest/passengers/@size")) ~> check {
status mustBe OK
Integer.parseInt(body.asString) mustBe 2
}
manager(Delete(s"/api/planes/$pid/alter/passengers/0")) ~> check {
status mustBe OK
}
manager(Get(s"/api/planes/$pid/latest/passengers")) ~> check {
status mustBe OK
Passengers.fromJsValue(JsonParser(body.asString)).value.size mustBe 1
}
manager(Get(s"/api/planes/$pid/latest/passengers/@size")) ~> check {
status mustBe OK
body.asString mustBe "1"
}
manager(Delete(s"/api/planes/$pid/alter/passengers/0")) ~> check {
status mustBe OK
}
manager(Get(s"/api/planes/$pid/latest/passengers")) ~> check {
status mustBe OK
Passengers.fromJsValue(JsonParser(body.asString)).value.size mustBe 0
}
manager(Delete(s"/api/planes/$pid/alter/passengers/0")) ~> check {
status mustBe BadRequest
}
}
"Map 类型操作,值类型是 Boolean" in {
val cs = Vips(Map("simon" -> true, "jack" -> false,"vivian" -> true))
manager(Put(s"/api/planes/$pid/alter/vips?merge=replace", cs.json)) ~> check {
status mustBe OK
}
manager(Get(s"/api/planes/$pid/latest/vips")) ~> check {
status mustBe OK
val res = Vips.fromJsValue(JsonParser(body.asString))
res.size mustBe 3
}
manager(Get(s"/api/planes/$pid/latest/vips/@size")) ~> check {
status mustBe OK
body.asString mustBe "3"
}
manager(Get(s"/api/planes/$pid/latest/vips/simon")) ~> check {
val msg = body.asString
status mustBe OK
msg mustBe "true"
}
manager(Delete(s"/api/planes/$pid/alter/vips/simon")) ~> check {
val msg = body.asString
status mustBe OK
}
manager(Get(s"/api/planes/$pid/latest/vips")) ~> check {
val msg = body.asString
status mustBe OK
val res = Vips.fromJsValue(JsonParser(msg))
res.size mustBe 2
}
manager(Put(s"/api/planes/$pid/alter/vips/abc", "true")) ~> check {
status mustBe OK
}
manager(Get(s"/api/planes/$pid/latest/vips/abc")) ~> check {
val msg = body.asString
status mustBe OK
msg mustBe "true"
}
}
}
}
| xiefeifeihu/zzb | zzb-domain/src/test/scala/zzb/domain/DirectAlterTest.scala | Scala | mit | 10,766 |
package org.eichelberger.sfseize.api
/******************************************************
* discretizers/cells
******************************************************/
// okay, so these aren't just operations on "continuous" spaces,
// but also on linear/uniform distributions of continuous values...
case class ContinuousFieldRange(minimum: Double, maximum: Double, override val isMaximumInclusive: Boolean = false)
extends FieldRange[Double]
case class ContinuousSpace(ranges: Seq[ContinuousFieldRange]) extends Space[Double]
case class ContinuousDiscretizer(override val name: String, range: ContinuousFieldRange, cardinality: Long) extends Discretizer[Double] {
val minimum = range.minimum
val maximum = range.maximum
val rangeSize = range.maximum - range.minimum
val binSize = rangeSize / cardinality.toDouble
def conditionedDatum(data: Double): Double =
Math.min(Math.max(data, range.minimum), range.maximum)
def conditionedBin(bin: Long): Long =
Math.min(Math.max(bin, 0), cardinality)
// should return a bin index on [0, c-1]
def discretize(data: Double): Long = {
Math.min(cardinality - 1, Math.floor((conditionedDatum(data) - range.minimum) / binSize).toLong)
}
// should return a range of [min, max)
def undiscretize(rawBin: Long): Space[Double] = {
val bin = conditionedBin(rawBin)
ContinuousSpace(
Seq(
ContinuousFieldRange(
range.minimum + bin * binSize,
range.minimum + (bin + 1.0) * binSize,
isMaximumInclusive = bin == cardinality - 1
)
)
)
}
}
/******************************************************
* single curves
******************************************************/
case class RowMajorCurve(children: Seq[DiscreteSource]) extends Curve {
override def baseName: String = "RowMajorCurve"
// row-major accepts all positive cardinalities
def accepts(cardinalities: Seq[Long]): Boolean = cardinalities.forall(Curve.acceptNonZero)
def placeValues: Seq[Long] =
(for (i <- 1 until numChildren) yield cardinalities.slice(i, numChildren).product) ++ Seq(1L)
override def encode(point: Seq[Long]): Long =
point.zip(placeValues).foldLeft(0L)((acc, t) => t match {
case (coordinate, placeValue) => acc + coordinate * placeValue
})
override def decode(index: Long): Seq[Long] =
(0 until numChildren).foldLeft((index, Seq[Long]()))((acc, i) => acc match {
case (remainder, seqSoFar) =>
val value = remainder / placeValues(i)
(remainder - value * placeValues(i), seqSoFar ++ Seq(value))
})._2
}
case class PeanoCurve2D(children: Seq[DiscreteSource]) extends Curve {
override def baseName: String = "PeanoCurve2D"
// there are only four possible orientations of the 9-square unit;
// orientation# -> row-major index
val orientations = Map(
0 -> Seq(0, 5, 6, 1, 4, 7, 2, 3, 8),
1 -> Seq(6, 5, 0, 7, 4, 1, 8, 3, 2),
2 -> Seq(2, 3, 8, 1, 4, 7, 0, 5, 6),
3 -> Seq(8, 3, 2, 7, 4, 1, 6, 5, 0)
)
// Peano has an easy substitution pattern for orientations
// as you recurse down levels of detail;
// orientation# -> row-major set of orientations used at the next level of precision
val orientationMap = Map(
0 -> Seq(0, 2, 0, 1, 3, 1, 0, 2, 0),
1 -> Seq(1, 3, 1, 0, 2, 0, 1, 3, 1),
2 -> Seq(2, 0, 2, 3, 1, 3, 2, 0, 2),
3 -> Seq(3, 1, 1, 2, 0, 2, 3, 1, 3)
)
// Peano accepts all cardinalities that are powers of 3;
// for fun, let's also limit it to 2D squares
def accepts(cardinalities: Seq[Long]): Boolean =
cardinalities.length == 2 && cardinalities.forall(Curve.acceptPowerOf(_, 3)) && isSquare
// now many levels of recursion are there
def levels: Int = Math.round(Math.log(cardinalities.head) / Math.log(3.0)).toInt
override def encode(point: Seq[Long]): Long = {
require(point.length == numChildren)
def seek(p: Seq[Long], orientation: Int, recursesLeft: Int = levels): Long = {
require(recursesLeft >= 1, s"$name went weird: recurses left is $recursesLeft")
if (recursesLeft == 1) {
// you've bottomed out
val offset = 3 * p(0).toInt + p(1).toInt
orientations(orientation)(offset)
} else {
// you have further to recurse
val unitSize: Long = Math.round(Math.pow(3, recursesLeft - 1))
val thisY = (p(0) / unitSize).toInt
val thisX = (p(1) / unitSize).toInt
val nextOrientation = orientationMap(orientation)(thisY * 3 + thisX)
val nextY = p(0) % unitSize
val nextX = p(1) % unitSize
val steps = orientations(orientation)(thisY * 3 + thisX)
val basis = steps * unitSize * unitSize
basis + seek(Seq(nextY, nextX), nextOrientation, recursesLeft - 1)
}
}
// the top-level orientation is always #0
seek(point, 0)
}
override def decode(index: Long): Seq[Long] = {
require(index >= 0L, s"$name.decode($index) underflow")
require(index < cardinality, s"$name.decode($index) overflow")
def seek(Y: Long, X: Long, min: Long, orientation: Int, recursesLeft: Int = levels): Seq[Long] = {
if (recursesLeft == 1) {
// bottom out
val steps = index - min
val offset = orientations(orientation).indexOf(steps) // TODO expedite
val y = Y + offset / 3
val x = X + offset % 3
Seq(y, x)
} else {
// keep recursing
val unitSize: Long = Math.round(Math.pow(3, recursesLeft - 1))
val span = index - min
val steps = span / (unitSize * unitSize)
val nextMin = min + steps * unitSize * unitSize
val offset = orientations(orientation).indexOf(steps) // TODO expedite
val y = offset / 3
val x = offset % 3
val nextOrientation = orientationMap(orientation)(offset)
seek(Y + y * unitSize, X + x * unitSize, nextMin, nextOrientation, recursesLeft - 1)
}
}
seek(0, 0, 0, 0)
}
}
/******************************************************
* composed curve
******************************************************/
// trite R(x, P(y, z))
class ComposedCurve_RP(xDim: ContinuousDiscretizer, yDim: ContinuousDiscretizer, zDim: ContinuousDiscretizer)
extends RowMajorCurve(Seq(xDim, PeanoCurve2D(Seq(yDim, zDim))))
/******************************************************
* single range-finder
******************************************************/
| cne1x/sfseize-api | src/main/scala/org/eichelberger/sfseize/api/ExampleImpl.scala | Scala | apache-2.0 | 6,430 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.immutable.HashSet
import org.apache.spark.sql.catalyst.analysis.{CleanupAliases, EliminateSubQueries}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.Inner
import org.apache.spark.sql.catalyst.plans.FullOuter
import org.apache.spark.sql.catalyst.plans.LeftOuter
import org.apache.spark.sql.catalyst.plans.RightOuter
import org.apache.spark.sql.catalyst.plans.LeftSemi
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.types._
abstract class Optimizer extends RuleExecutor[LogicalPlan]
object DefaultOptimizer extends Optimizer {
val batches =
// SubQueries are only needed for analysis and can be removed before execution.
Batch("Remove SubQueries", FixedPoint(100),
EliminateSubQueries) ::
Batch("Aggregate", FixedPoint(100),
ReplaceDistinctWithAggregate,
RemoveLiteralFromGroupExpressions) ::
Batch("Operator Optimizations", FixedPoint(100),
// Operator push down
SetOperationPushDown,
SamplePushDown,
PushPredicateThroughJoin,
PushPredicateThroughProject,
PushPredicateThroughGenerate,
ColumnPruning,
// Operator combine
ProjectCollapsing,
CombineFilters,
CombineLimits,
// Constant folding
NullPropagation,
OptimizeIn,
ConstantFolding,
LikeSimplification,
BooleanSimplification,
RemovePositive,
SimplifyFilters,
SimplifyCasts,
SimplifyCaseConversionExpressions) ::
Batch("Decimal Optimizations", FixedPoint(100),
DecimalAggregates) ::
Batch("LocalRelation", FixedPoint(100),
ConvertToLocalRelation) :: Nil
}
/**
* Pushes operations down into a Sample.
*/
object SamplePushDown extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Push down projection into sample
case Project(projectList, s @ Sample(lb, up, replace, seed, child)) =>
Sample(lb, up, replace, seed,
Project(projectList, child))
}
}
/**
* Pushes certain operations to both sides of a Union, Intersect or Except operator.
* Operations that are safe to pushdown are listed as follows.
* Union:
* Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is
* safe to pushdown Filters and Projections through it. Once we add UNION DISTINCT,
* we will not be able to pushdown Projections.
*
* Intersect:
* It is not safe to pushdown Projections through it because we need to get the
* intersect of rows by comparing the entire rows. It is fine to pushdown Filters
* with deterministic condition.
*
* Except:
* It is not safe to pushdown Projections through it because we need to get the
* intersect of rows by comparing the entire rows. It is fine to pushdown Filters
* with deterministic condition.
*/
object SetOperationPushDown extends Rule[LogicalPlan] with PredicateHelper {
/**
* Maps Attributes from the left side to the corresponding Attribute on the right side.
*/
private def buildRewrites(bn: BinaryNode): AttributeMap[Attribute] = {
assert(bn.isInstanceOf[Union] || bn.isInstanceOf[Intersect] || bn.isInstanceOf[Except])
assert(bn.left.output.size == bn.right.output.size)
AttributeMap(bn.left.output.zip(bn.right.output))
}
/**
* Rewrites an expression so that it can be pushed to the right side of a
* Union, Intersect or Except operator. This method relies on the fact that the output attributes
* of a union/intersect/except are always equal to the left child's output.
*/
private def pushToRight[A <: Expression](e: A, rewrites: AttributeMap[Attribute]) = {
val result = e transform {
case a: Attribute => rewrites(a)
}
// We must promise the compiler that we did not discard the names in the case of project
// expressions. This is safe since the only transformation is from Attribute => Attribute.
result.asInstanceOf[A]
}
/**
* Splits the condition expression into small conditions by `And`, and partition them by
* deterministic, and finally recombine them by `And`. It returns an expression containing
* all deterministic expressions (the first field of the returned Tuple2) and an expression
* containing all non-deterministic expressions (the second field of the returned Tuple2).
*/
private def partitionByDeterministic(condition: Expression): (Expression, Expression) = {
val andConditions = splitConjunctivePredicates(condition)
andConditions.partition(_.deterministic) match {
case (deterministic, nondeterministic) =>
deterministic.reduceOption(And).getOrElse(Literal(true)) ->
nondeterministic.reduceOption(And).getOrElse(Literal(true))
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Push down filter into union
case Filter(condition, u @ Union(left, right)) =>
val (deterministic, nondeterministic) = partitionByDeterministic(condition)
val rewrites = buildRewrites(u)
Filter(nondeterministic,
Union(
Filter(deterministic, left),
Filter(pushToRight(deterministic, rewrites), right)
)
)
// Push down deterministic projection through UNION ALL
case p @ Project(projectList, u @ Union(left, right)) =>
if (projectList.forall(_.deterministic)) {
val rewrites = buildRewrites(u)
Union(
Project(projectList, left),
Project(projectList.map(pushToRight(_, rewrites)), right))
} else {
p
}
// Push down filter through INTERSECT
case Filter(condition, i @ Intersect(left, right)) =>
val (deterministic, nondeterministic) = partitionByDeterministic(condition)
val rewrites = buildRewrites(i)
Filter(nondeterministic,
Intersect(
Filter(deterministic, left),
Filter(pushToRight(deterministic, rewrites), right)
)
)
// Push down filter through EXCEPT
case Filter(condition, e @ Except(left, right)) =>
val (deterministic, nondeterministic) = partitionByDeterministic(condition)
val rewrites = buildRewrites(e)
Filter(nondeterministic,
Except(
Filter(deterministic, left),
Filter(pushToRight(deterministic, rewrites), right)
)
)
}
}
/**
* Attempts to eliminate the reading of unneeded columns from the query plan using the following
* transformations:
*
* - Inserting Projections beneath the following operators:
* - Aggregate
* - Generate
* - Project <- Join
* - LeftSemiJoin
*/
object ColumnPruning extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case a @ Aggregate(_, _, e @ Expand(_, groupByExprs, _, child))
if (child.outputSet -- AttributeSet(groupByExprs) -- a.references).nonEmpty =>
a.copy(child = e.copy(child = prunedChild(child, AttributeSet(groupByExprs) ++ a.references)))
// Eliminate attributes that are not needed to calculate the specified aggregates.
case a @ Aggregate(_, _, child) if (child.outputSet -- a.references).nonEmpty =>
a.copy(child = Project(a.references.toSeq, child))
// Eliminate attributes that are not needed to calculate the Generate.
case g: Generate if !g.join && (g.child.outputSet -- g.references).nonEmpty =>
g.copy(child = Project(g.references.toSeq, g.child))
case p @ Project(_, g: Generate) if g.join && p.references.subsetOf(g.generatedSet) =>
p.copy(child = g.copy(join = false))
case p @ Project(projectList, g: Generate) if g.join =>
val neededChildOutput = p.references -- g.generatorOutput ++ g.references
if (neededChildOutput == g.child.outputSet) {
p
} else {
Project(projectList, g.copy(child = Project(neededChildOutput.toSeq, g.child)))
}
case p @ Project(projectList, a @ Aggregate(groupingExpressions, aggregateExpressions, child))
if (a.outputSet -- p.references).nonEmpty =>
Project(
projectList,
Aggregate(
groupingExpressions,
aggregateExpressions.filter(e => p.references.contains(e)),
child))
// Eliminate unneeded attributes from either side of a Join.
case Project(projectList, Join(left, right, joinType, condition)) =>
// Collect the list of all references required either above or to evaluate the condition.
val allReferences: AttributeSet =
AttributeSet(
projectList.flatMap(_.references.iterator)) ++
condition.map(_.references).getOrElse(AttributeSet(Seq.empty))
/** Applies a projection only when the child is producing unnecessary attributes */
def pruneJoinChild(c: LogicalPlan): LogicalPlan = prunedChild(c, allReferences)
Project(projectList, Join(pruneJoinChild(left), pruneJoinChild(right), joinType, condition))
// Eliminate unneeded attributes from right side of a LeftSemiJoin.
case Join(left, right, LeftSemi, condition) =>
// Collect the list of all references required to evaluate the condition.
val allReferences: AttributeSet =
condition.map(_.references).getOrElse(AttributeSet(Seq.empty))
Join(left, prunedChild(right, allReferences), LeftSemi, condition)
// Push down project through limit, so that we may have chance to push it further.
case Project(projectList, Limit(exp, child)) =>
Limit(exp, Project(projectList, child))
// Push down project if possible when the child is sort
case p @ Project(projectList, s @ Sort(_, _, grandChild))
if s.references.subsetOf(p.outputSet) =>
s.copy(child = Project(projectList, grandChild))
// Eliminate no-op Projects
case Project(projectList, child) if child.output == projectList => child
}
/** Applies a projection only when the child is producing unnecessary attributes */
private def prunedChild(c: LogicalPlan, allReferences: AttributeSet) =
if ((c.outputSet -- allReferences.filter(c.outputSet.contains)).nonEmpty) {
Project(allReferences.filter(c.outputSet.contains).toSeq, c)
} else {
c
}
}
/**
* Combines two adjacent [[Project]] operators into one and perform alias substitution,
* merging the expressions into one single expression.
*/
object ProjectCollapsing extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p @ Project(projectList1, Project(projectList2, child)) =>
// Create a map of Aliases to their values from the child projection.
// e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)).
val aliasMap = AttributeMap(projectList2.collect {
case a: Alias => (a.toAttribute, a)
})
// We only collapse these two Projects if their overlapped expressions are all
// deterministic.
val hasNondeterministic = projectList1.exists(_.collect {
case a: Attribute if aliasMap.contains(a) => aliasMap(a).child
}.exists(!_.deterministic))
if (hasNondeterministic) {
p
} else {
// Substitute any attributes that are produced by the child projection, so that we safely
// eliminate it.
// e.g., 'SELECT c + 1 FROM (SELECT a + b AS C ...' produces 'SELECT a + b + 1 ...'
// TODO: Fix TransformBase to avoid the cast below.
val substitutedProjection = projectList1.map(_.transform {
case a: Attribute => aliasMap.getOrElse(a, a)
}).asInstanceOf[Seq[NamedExpression]]
// collapse 2 projects may introduce unnecessary Aliases, trim them here.
val cleanedProjection = substitutedProjection.map(p =>
CleanupAliases.trimNonTopLevelAliases(p).asInstanceOf[NamedExpression]
)
Project(cleanedProjection, child)
}
}
}
/**
* Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition.
* For example, when the expression is just checking to see if a string starts with a given
* pattern.
*/
object LikeSimplification extends Rule[LogicalPlan] {
// if guards below protect from escapes on trailing %.
// Cases like "something\\%" are not optimized, but this does not affect correctness.
private val startsWith = "([^_%]+)%".r
private val endsWith = "%([^_%]+)".r
private val contains = "%([^_%]+)%".r
private val equalTo = "([^_%]*)".r
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Like(l, Literal(utf, StringType)) =>
utf.toString match {
case startsWith(pattern) if !pattern.endsWith("\\\\") =>
StartsWith(l, Literal(pattern))
case endsWith(pattern) =>
EndsWith(l, Literal(pattern))
case contains(pattern) if !pattern.endsWith("\\\\") =>
Contains(l, Literal(pattern))
case equalTo(pattern) =>
EqualTo(l, Literal(pattern))
case _ =>
Like(l, Literal.create(utf, StringType))
}
}
}
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values. This rule is more specific with
* Null value propagation from bottom to top of the expression tree.
*/
object NullPropagation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case e @ Count(Literal(null, _)) => Cast(Literal(0L), e.dataType)
case e @ IsNull(c) if !c.nullable => Literal.create(false, BooleanType)
case e @ IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType)
case e @ GetArrayItem(Literal(null, _), _) => Literal.create(null, e.dataType)
case e @ GetArrayItem(_, Literal(null, _)) => Literal.create(null, e.dataType)
case e @ GetMapValue(Literal(null, _), _) => Literal.create(null, e.dataType)
case e @ GetMapValue(_, Literal(null, _)) => Literal.create(null, e.dataType)
case e @ GetStructField(Literal(null, _), _, _) => Literal.create(null, e.dataType)
case e @ GetArrayStructFields(Literal(null, _), _, _, _, _) =>
Literal.create(null, e.dataType)
case e @ EqualNullSafe(Literal(null, _), r) => IsNull(r)
case e @ EqualNullSafe(l, Literal(null, _)) => IsNull(l)
case e @ Count(expr) if !expr.nullable => Count(Literal(1))
// For Coalesce, remove null literals.
case e @ Coalesce(children) =>
val newChildren = children.filter {
case Literal(null, _) => false
case _ => true
}
if (newChildren.length == 0) {
Literal.create(null, e.dataType)
} else if (newChildren.length == 1) {
newChildren.head
} else {
Coalesce(newChildren)
}
case e @ Substring(Literal(null, _), _, _) => Literal.create(null, e.dataType)
case e @ Substring(_, Literal(null, _), _) => Literal.create(null, e.dataType)
case e @ Substring(_, _, Literal(null, _)) => Literal.create(null, e.dataType)
// MaxOf and MinOf can't do null propagation
case e: MaxOf => e
case e: MinOf => e
// Put exceptional cases above if any
case e @ BinaryArithmetic(Literal(null, _), _) => Literal.create(null, e.dataType)
case e @ BinaryArithmetic(_, Literal(null, _)) => Literal.create(null, e.dataType)
case e @ BinaryComparison(Literal(null, _), _) => Literal.create(null, e.dataType)
case e @ BinaryComparison(_, Literal(null, _)) => Literal.create(null, e.dataType)
case e: StringRegexExpression => e.children match {
case Literal(null, _) :: right :: Nil => Literal.create(null, e.dataType)
case left :: Literal(null, _) :: Nil => Literal.create(null, e.dataType)
case _ => e
}
case e: StringPredicate => e.children match {
case Literal(null, _) :: right :: Nil => Literal.create(null, e.dataType)
case left :: Literal(null, _) :: Nil => Literal.create(null, e.dataType)
case _ => e
}
}
}
}
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values.
*/
object ConstantFolding extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
// Skip redundant folding of literals. This rule is technically not necessary. Placing this
// here avoids running the next rule for Literal values, which would create a new Literal
// object and running eval unnecessarily.
case l: Literal => l
// Fold expressions that are foldable.
case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType)
}
}
}
/**
* Replaces [[In (value, seq[Literal])]] with optimized version[[InSet (value, HashSet[Literal])]]
* which is much faster
*/
object OptimizeIn extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case In(v, list) if !list.exists(!_.isInstanceOf[Literal]) && list.size > 10 =>
val hSet = list.map(e => e.eval(EmptyRow))
InSet(v, HashSet() ++ hSet)
}
}
}
/**
* Simplifies boolean expressions:
* 1. Simplifies expressions whose answer can be determined without evaluating both sides.
* 2. Eliminates / extracts common factors.
* 3. Merge same expressions
* 4. Removes `Not` operator.
*/
object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case and @ And(left, right) => (left, right) match {
// true && r => r
case (Literal(true, BooleanType), r) => r
// l && true => l
case (l, Literal(true, BooleanType)) => l
// false && r => false
case (Literal(false, BooleanType), _) => Literal(false)
// l && false => false
case (_, Literal(false, BooleanType)) => Literal(false)
// a && a => a
case (l, r) if l fastEquals r => l
// (a || b) && (a || c) => a || (b && c)
case _ =>
// 1. Split left and right to get the disjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common || (ldiff && rdiff)
val lhs = splitDisjunctivePredicates(left)
val rhs = splitDisjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals(_)))
if (common.isEmpty) {
// No common factors, return the original predicate
and
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals(_)))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals(_)))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a || b || c || ...) && (a || b) => (a || b)
common.reduce(Or)
} else {
// (a || b || c || ...) && (a || b || d || ...) =>
// ((c || ...) && (d || ...)) || a || b
(common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or)
}
}
} // end of And(left, right)
case or @ Or(left, right) => (left, right) match {
// true || r => true
case (Literal(true, BooleanType), _) => Literal(true)
// r || true => true
case (_, Literal(true, BooleanType)) => Literal(true)
// false || r => r
case (Literal(false, BooleanType), r) => r
// l || false => l
case (l, Literal(false, BooleanType)) => l
// a || a => a
case (l, r) if l fastEquals r => l
// (a && b) || (a && c) => a && (b || c)
case _ =>
// 1. Split left and right to get the conjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common && (ldiff || rdiff)
val lhs = splitConjunctivePredicates(left)
val rhs = splitConjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals(_)))
if (common.isEmpty) {
// No common factors, return the original predicate
or
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals(_)))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals(_)))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a && b) || (a && b && c && ...) => a && b
common.reduce(And)
} else {
// (a && b && c && ...) || (a && b && d && ...) =>
// ((c && ...) || (d && ...)) && a && b
(common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And)
}
}
} // end of Or(left, right)
case not @ Not(exp) => exp match {
// not(true) => false
case Literal(true, BooleanType) => Literal(false)
// not(false) => true
case Literal(false, BooleanType) => Literal(true)
// not(l > r) => l <= r
case GreaterThan(l, r) => LessThanOrEqual(l, r)
// not(l >= r) => l < r
case GreaterThanOrEqual(l, r) => LessThan(l, r)
// not(l < r) => l >= r
case LessThan(l, r) => GreaterThanOrEqual(l, r)
// not(l <= r) => l > r
case LessThanOrEqual(l, r) => GreaterThan(l, r)
// not(not(e)) => e
case Not(e) => e
case _ => not
} // end of Not(exp)
// if (true) a else b => a
// if (false) a else b => b
case e @ If(Literal(v, _), trueValue, falseValue) => if (v == true) trueValue else falseValue
}
}
}
/**
* Combines two adjacent [[Filter]] operators into one, merging the
* conditions into one conjunctive predicate.
*/
object CombineFilters extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case ff @ Filter(fc, nf @ Filter(nc, grandChild)) => Filter(And(nc, fc), grandChild)
}
}
/**
* Removes filters that can be evaluated trivially. This is done either by eliding the filter for
* cases where it will always evaluate to `true`, or substituting a dummy empty relation when the
* filter will always evaluate to `false`.
*/
object SimplifyFilters extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// If the filter condition always evaluate to true, remove the filter.
case Filter(Literal(true, BooleanType), child) => child
// If the filter condition always evaluate to null or false,
// replace the input with an empty relation.
case Filter(Literal(null, _), child) => LocalRelation(child.output, data = Seq.empty)
case Filter(Literal(false, BooleanType), child) => LocalRelation(child.output, data = Seq.empty)
}
}
/**
* Pushes [[Filter]] operators through [[Project]] operators, in-lining any [[Alias Aliases]]
* that were defined in the projection.
*
* This heuristic is valid assuming the expression evaluation cost is minimal.
*/
object PushPredicateThroughProject extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// SPARK-13473: We can't push the predicate down when the underlying projection output non-
// deterministic field(s). Non-deterministic expressions are essentially stateful. This
// implies that, for a given input row, the output are determined by the expression's initial
// state and all the input rows processed before. In another word, the order of input rows
// matters for non-deterministic expressions, while pushing down predicates changes the order.
case filter @ Filter(condition, project @ Project(fields, grandChild))
if fields.forall(_.deterministic) =>
// Create a map of Aliases to their values from the child projection.
// e.g., 'SELECT a + b AS c, d ...' produces Map(c -> a + b).
val aliasMap = AttributeMap(fields.collect {
case a: Alias => (a.toAttribute, a.child)
})
// Split the condition into small conditions by `And`, so that we can push down part of this
// condition without nondeterministic expressions.
val andConditions = splitConjunctivePredicates(condition)
val (deterministic, nondeterministic) = andConditions.partition(_.collect {
case a: Attribute if aliasMap.contains(a) => aliasMap(a)
}.forall(_.deterministic))
// If there is no nondeterministic conditions, push down the whole condition.
if (nondeterministic.isEmpty) {
project.copy(child = Filter(replaceAlias(condition, aliasMap), grandChild))
} else {
// If they are all nondeterministic conditions, leave it un-changed.
if (deterministic.isEmpty) {
filter
} else {
// Push down the small conditions without nondeterministic expressions.
val pushedCondition = deterministic.map(replaceAlias(_, aliasMap)).reduce(And)
Filter(nondeterministic.reduce(And),
project.copy(child = Filter(pushedCondition, grandChild)))
}
}
}
// Substitute any attributes that are produced by the child projection, so that we safely
// eliminate it.
private def replaceAlias(condition: Expression, sourceAliases: AttributeMap[Expression]) = {
condition.transform {
case a: Attribute => sourceAliases.getOrElse(a, a)
}
}
}
/**
* Push [[Filter]] operators through [[Generate]] operators. Parts of the predicate that reference
* attributes generated in [[Generate]] will remain above, and the rest should be pushed beneath.
*/
object PushPredicateThroughGenerate extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case filter @ Filter(condition, g: Generate) =>
// Predicates that reference attributes produced by the `Generate` operator cannot
// be pushed below the operator.
val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition {
conjunct => conjunct.references subsetOf g.child.outputSet
}
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val withPushdown = Generate(g.generator, join = g.join, outer = g.outer,
g.qualifier, g.generatorOutput, Filter(pushDownPredicate, g.child))
stayUp.reduceOption(And).map(Filter(_, withPushdown)).getOrElse(withPushdown)
} else {
filter
}
}
}
/**
* Pushes down [[Filter]] operators where the `condition` can be
* evaluated using only the attributes of the left or right side of a join. Other
* [[Filter]] conditions are moved into the `condition` of the [[Join]].
*
* And also Pushes down the join filter, where the `condition` can be evaluated using only the
* attributes of the left or right side of sub query when applicable.
*
* Check https://cwiki.apache.org/confluence/display/Hive/OuterJoinBehavior for more details
*/
object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper {
/**
* Splits join condition expressions into three categories based on the attributes required
* to evaluate them.
* @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth)
*/
private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = {
val (leftEvaluateCondition, rest) =
condition.partition(_.references subsetOf left.outputSet)
val (rightEvaluateCondition, commonCondition) =
rest.partition(_.references subsetOf right.outputSet)
(leftEvaluateCondition, rightEvaluateCondition, commonCondition)
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// push the where condition down into join filter
case f @ Filter(filterCondition, Join(left, right, joinType, joinCondition)) =>
val (leftFilterConditions, rightFilterConditions, commonFilterCondition) =
split(splitConjunctivePredicates(filterCondition), left, right)
joinType match {
case Inner =>
// push down the single side `where` condition into respective sides
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = (commonFilterCondition ++ joinCondition).reduceLeftOption(And)
Join(newLeft, newRight, Inner, newJoinCond)
case RightOuter =>
// push down the right side only `where` condition
val newLeft = left
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, RightOuter, newJoinCond)
(leftFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case _ @ (LeftOuter | LeftSemi) =>
// push down the left side only `where` condition
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, joinType, newJoinCond)
(rightFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case FullOuter => f // DO Nothing for Full Outer Join
}
// push down the join filter into sub query scanning if applicable
case f @ Join(left, right, joinType, joinCondition) =>
val (leftJoinConditions, rightJoinConditions, commonJoinCondition) =
split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right)
joinType match {
case _ @ (Inner | LeftSemi) =>
// push down the single side only join filter for both sides sub queries
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = commonJoinCondition.reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond)
case RightOuter =>
// push down the left side only join filter for left side sub query
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = (rightJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, RightOuter, newJoinCond)
case LeftOuter =>
// push down the right side only join filter for right sub query
val newLeft = left
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, LeftOuter, newJoinCond)
case FullOuter => f
}
}
}
/**
* Removes [[Cast Casts]] that are unnecessary because the input is already the correct type.
*/
object SimplifyCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Cast(e, dataType) if e.dataType == dataType => e
}
}
/**
* Removes [[UnaryPositive]] identify function
*/
object RemovePositive extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case UnaryPositive(child) => child
}
}
/**
* Combines two adjacent [[Limit]] operators into one, merging the
* expressions into one single expression.
*/
object CombineLimits extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case ll @ Limit(le, nl @ Limit(ne, grandChild)) =>
Limit(If(LessThan(ne, le), ne, le), grandChild)
}
}
/**
* Removes the inner case conversion expressions that are unnecessary because
* the inner conversion is overwritten by the outer one.
*/
object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case Upper(Upper(child)) => Upper(child)
case Upper(Lower(child)) => Upper(child)
case Lower(Upper(child)) => Lower(child)
case Lower(Lower(child)) => Lower(child)
}
}
}
/**
* Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values.
*
* This uses the same rules for increasing the precision and scale of the output as
* [[org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion.DecimalPrecision]].
*/
object DecimalAggregates extends Rule[LogicalPlan] {
import Decimal.MAX_LONG_DIGITS
/** Maximum number of decimal digits representable precisely in a Double */
private val MAX_DOUBLE_DIGITS = 15
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(Sum(UnscaledValue(e)), prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS =>
Cast(
Divide(Average(UnscaledValue(e)), Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4))
}
}
/**
* Converts local operations (i.e. ones that don't require data exchange) on LocalRelation to
* another LocalRelation.
*
* This is relatively simple as it currently handles only a single case: Project.
*/
object ConvertToLocalRelation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Project(projectList, LocalRelation(output, data)) =>
val projection = new InterpretedProjection(projectList, output)
LocalRelation(projectList.map(_.toAttribute), data.map(projection))
}
}
/**
* Replaces logical [[Distinct]] operator with an [[Aggregate]] operator.
* {{{
* SELECT DISTINCT f1, f2 FROM t ==> SELECT f1, f2 FROM t GROUP BY f1, f2
* }}}
*/
object ReplaceDistinctWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Distinct(child) => Aggregate(child.output, child.output, child)
}
}
/**
* Removes literals from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveLiteralFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case a @ Aggregate(grouping, _, _) =>
val newGrouping = grouping.filter(!_.foldable)
a.copy(groupingExpressions = newGrouping)
}
}
| practice-vishnoi/dev-spark-1 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala | Scala | apache-2.0 | 36,308 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
/**
* An end-to-end test suite specifically for testing Tungsten (Unsafe/CodeGen) mode.
*
* This is here for now so I can make sure Tungsten project is tested without refactoring existing
* end-to-end test infra. In the long run this should just go away.
*/
class DataFrameTungstenSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("test simple types") {
val df = sparkContext.parallelize(Seq((1, 2))).toDF("a", "b")
assert(df.select(struct("a", "b")).first().getStruct(0) === Row(1, 2))
}
test("test struct type") {
val struct = Row(1, 2L, 3.0F, 3.0)
val data = sparkContext.parallelize(Seq(Row(1, struct)))
val schema = new StructType()
.add("a", IntegerType)
.add("b",
new StructType()
.add("b1", IntegerType)
.add("b2", LongType)
.add("b3", FloatType)
.add("b4", DoubleType))
val df = spark.createDataFrame(data, schema)
assert(df.select("b").first() === Row(struct))
}
test("test nested struct type") {
val innerStruct = Row(1, "abcd")
val outerStruct = Row(1, 2L, 3.0F, 3.0, innerStruct, "efg")
val data = sparkContext.parallelize(Seq(Row(1, outerStruct)))
val schema = new StructType()
.add("a", IntegerType)
.add("b",
new StructType()
.add("b1", IntegerType)
.add("b2", LongType)
.add("b3", FloatType)
.add("b4", DoubleType)
.add("b5", new StructType()
.add("b5a", IntegerType)
.add("b5b", StringType))
.add("b6", StringType))
val df = spark.createDataFrame(data, schema)
assert(df.select("b").first() === Row(outerStruct))
}
test("primitive data type accesses in persist data") {
val data = Seq(true, 1.toByte, 3.toShort, 7, 15.toLong,
31.25.toFloat, 63.75, null)
val dataTypes = Seq(BooleanType, ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, IntegerType)
val schemas = dataTypes.zipWithIndex.map { case (dataType, index) =>
StructField(s"col$index", dataType, true)
}
val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(data)))
val df = spark.createDataFrame(rdd, StructType(schemas))
val row = df.persist.take(1).apply(0)
checkAnswer(df, row)
}
test("access cache multiple times") {
val df0 = sparkContext.parallelize(Seq(1, 2, 3), 1).toDF("x").cache
df0.count
val df1 = df0.filter("x > 1")
checkAnswer(df1, Seq(Row(2), Row(3)))
val df2 = df0.filter("x > 2")
checkAnswer(df2, Row(3))
val df10 = sparkContext.parallelize(Seq(3, 4, 5, 6), 1).toDF("x").cache
for (_ <- 0 to 2) {
val df11 = df10.filter("x > 5")
checkAnswer(df11, Row(6))
}
}
test("access only some column of the all of columns") {
val df = spark.range(1, 10).map(i => (i, (i + 1).toDouble)).toDF("l", "d")
df.cache
df.count
assert(df.filter("d < 3").count == 1)
}
}
| bravo-zhang/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameTungstenSuite.scala | Scala | apache-2.0 | 3,908 |
package edu.uw.at.iroberts.wirefugue.kafka.producer.kafka.scala
import org.apache.kafka.clients.producer
import org.apache.kafka.common.Cluster
/** An adapter trait useful for defining stateless partitioners
* in Scala. configure() and close() are predefined as no-ops
* and the partition() method can (and must) be defined using
* Option instead of nulls, and IndexedSeq[Byte] instead of
* Array[Byte].
*
* Created by Ian Robertson <[email protected]> on 6/15/17.
*/
trait SimplePartitioner extends producer.Partitioner {
// Provide default do-nothing methods for configure and close
def configure(configs: java.util.Map[String, _]) = ()
def close() = ()
def partition(topic: String,
key: Option[Any],
keyBytes: Option[IndexedSeq[Byte]],
value: Option[Any],
valueBytes: Option[IndexedSeq[Byte]],
cluster: Cluster): Int
override def partition(
topic: String,
key: Object,
keyBytes: Array[Byte],
value: Object,
valueBytes: Array[Byte],
cluster: Cluster
): Int = partition(
topic,
Option(key),
Option(keyBytes).map(_.toIndexedSeq),
Option(value),
Option(valueBytes).map(_.toIndexedSeq),
cluster
)
}
| robertson-tech/wirefugue | sensor/src/main/scala/edu/uw/at/iroberts/wirefugue/kafka/producer/kafka/scala/SimplePartitioner.scala | Scala | gpl-3.0 | 1,377 |
package me.smartco.akstore.store.rest.route
import akka.actor.ActorContext
import com.fasterxml.jackson.databind.ObjectMapper
import me.smartco.akstore.common.Constants
import me.smartco.akstore.common.model.{Payment, Address, AbstractDocument}
import me.smartco.akstore.integration.ServiceFacade
import me.smartco.akstore.store.mongodb.mall.{Advertisement, Category}
import me.smartco.akstore.store.mongodb.market.DispatchProduct
import me.smartco.akstore.store.service.PartnerService
import me.smartco.akstore.store.spring.Bean
import me.smartco.akstore.store.mongodb.core.AttachmentsRepository
import me.smartco.akstore.store.mongodb.partner.{DispatchOptions, PartnerStaff}
import me.smartco.akstore.common.util.ImgUtil
import me.smartco.akstore.user.model.User
import org.springframework.data.domain.PageRequest
import spray.http.{StatusCodes, BodyPart}
import spray.routing.{Directives, MalformedQueryParamRejection}
import scala.collection.mutable
import scala.concurrent.ExecutionContext
import spray.routing.Directives._
import me.smartco.akstore.store.rest.json.Resources._
import spray.httpx.unmarshalling._
import spray.httpx.marshalling._
import scala.collection.JavaConverters._
import StatusCodes._
/**
* Created by libin on 14-11-14.
*/
trait StaffRoute {
def staffRoute(user: User)(implicit context: ActorContext, mapper: ObjectMapper, executor: ExecutionContext) = {
val facade=Bean[ServiceFacade]
pathPrefix("admin") {
path("partners") {
post {
formFields('partner_name, 'partner_mobile, 'partner_password, '_name, 'address_province, 'address_city, 'address_street, 'location_lat.as[Double], 'location_lng.as[Double]) {
(name, mobile, password, shopName, province, city, street, lat, lng) =>
val partnerManager = facade.getPartnerService
val userService=facade.getUserService
complete {
val user = userService.register(mobile, password,Constants.partner_staff_role, partnerManager.getValidateCode(mobile))
val staff: PartnerStaff = partnerManager.createPartnerFromUser(user.getId,user.getUsername, name, mobile)
var shop = partnerManager.createShop4Partner(staff.getPartner, shopName)
shop.setAddress(new Address(street, city, province))
shop.setLocation(lat, lng)
partnerManager.getShopRepository.save(shop).toJson
}
}
} ~
get {
complete {
"xxxxx"
}
}
} ~
path("shops") {
get {
parameters('name, 'page.as[Int] ? 0) { (name, page) =>
complete {
val partnerManager = Bean[PartnerService]
partnerManager.getShopRepository.findByNameLike(name, new PageRequest(page, 50))
}
} ~
parameters('page.as[Int] ? 0) { (page) =>
complete {
val partnerManager = Bean[PartnerService]
partnerManager.getShopRepository.findAll(new PageRequest(page, 50))
}
}
} ~
post {
formFields('_id, 'partner_name, 'partner_account_payment_accountId.?, 'partner_account_payment_accountType
,'morning.as[Boolean]?true,'afternoon.as[Boolean]?false,'contact_phone,'_minFare.as[Int]?20
, '_name, 'address_province, 'address_city, 'address_street, 'location_lat.as[Double], 'location_lng.as[Double], 'picture_id.?) {
(id, name, accountId, accountType, morning,afternoon,contact_phone,_minFare,shopName, province, city, street, lat, lng, picture) =>
complete {
val partnerManager = Bean[PartnerService]
var shop = partnerManager.getShopRepository.findOne(id)
if (null != shop) {
shop.setName(shopName)
shop.setAddress(new Address(street, city, province))
shop.setLocation(lat, lng)
shop.setDispatchOptions(new DispatchOptions(morning,false,afternoon))
shop.getContact.setPhone(contact_phone)
shop.setMinFare(_minFare)
picture match {
case Some(attachmentId) =>
var repo = Bean[AttachmentsRepository]
shop.setPicture(repo.findOne(attachmentId))
case None =>
}
partnerManager.getShopRepository.save(shop)
var partner = shop.partner
partner.setName(name)
partnerManager.getPartnerRepository.save(partner)
accountId match {
case Some(id) =>
var account = partner.account
account.setPayment(new Payment(accountType, id))
partnerManager.getAccountRepository.save(account)
case None =>
}
shop.toJson
} else {
BadRequest
}
}
}
}
} ~
path("shops" / Segment) { shopId =>
complete {
val partnerManager = Bean[PartnerService]
partnerManager.getShopRepository.findOne(shopId).toJson
}
} ~
path("dispatch" / "products") {
val partnerManager = Bean[PartnerService]
post {
formFields('_id.?, '_name, '_description, '_unitPrice.as[Double],'_atomWeight.as[Float] ? 1.0f, '_unitWeight.as[Float] ? 1.0f, '_stock.as[Int],
'cat0_name.?, 'cat1_name.?, 'cat2_name.?,'_origin,'picture_id.?) {
(_id, name, description, unitPrice,_atomWeight, unitWeight, stock,cat0,cat1,cat2, origin,picture_id) =>
complete {
val id = _id.getOrElse("")
var product: DispatchProduct = null
if (id.isEmpty) {
product = new DispatchProduct(name, stock, java.math.BigDecimal.valueOf(unitPrice))
} else {
product = partnerManager.getDispatchProductRepository.findOne(id)
product.setName(name)
product.setStock(stock)
product.setUnitPrice(java.math.BigDecimal.valueOf(unitPrice))
}
picture_id match {
case Some(attachmentId) =>
var repo = Bean[AttachmentsRepository]
if (attachmentId.isEmpty == false)
product.setPicture(repo.findOne(attachmentId))
case None =>
}
product.setCat0(Category.get(cat0 getOrElse "生鲜"))
product.setCat1(Category.get(cat1 getOrElse null))
product.setCat2(Category.get(cat2 getOrElse null))
product.setOrigin(origin)
product.setDescription(description)
product.setUnitWeight(unitWeight)
product.setAtomWeight(_atomWeight)
partnerManager.getDispatchProductRepository.save(product).toJson
}
}
} ~
get {
parameters('page.as[Int] ? 0) { (page) =>
complete {
partnerManager.getDispatchProductRepository.findAll(AbstractDocument.pageRequest(page))
}
}
}
} ~
path("dispatch" / "products" / Segment) { id =>
val partnerManager = Bean[PartnerService]
complete {
partnerManager.getDispatchProductRepository.findOne(id).toJson
}
} ~
path("dispatch" / "orders") {
val partnerManager = Bean[PartnerService]
get {
parameters('page.as[Int] ? 0) { (page) =>
complete {
partnerManager.getDispatchOrderRepository.findAll(AbstractDocument.pageRequest(page))
}
}
}
} ~
path("dispatch" / "orders" / Segment) { id =>
val partnerManager = Bean[PartnerService]
complete {
partnerManager.getDispatchOrderRepository.findOne(id).toJson
}
}~
path("advertisements"){
val partnerManager = Bean[PartnerService]
post{
formFields('picture_id,'shop_id.?,'product_id.?){ (picture_id,shop_id,product_id) =>
complete{
val picture=partnerManager.getAttachmentsRepository.findOne(picture_id)
if(null!=picture){
val shop=partnerManager.getShopRepository.findOne(shop_id getOrElse "")
val product=partnerManager.getProductRepository.findOne(product_id getOrElse "")
val advertisement=new Advertisement
advertisement.setPicture(picture)
advertisement.setRefProduct(product)
advertisement.setRefShop(shop)
partnerManager.getAdvertisementRepository.save(advertisement).toBriefJson
}else{
(StatusCodes.BadRequest,"")
}
}
}
}
}~
path("advertisements"/Segment){ adv_id =>
val partnerManager = Bean[PartnerService]
delete{
complete{
partnerManager.getAdvertisementRepository.delete(adv_id)
partnerManager.getAdvertisementRepository.findByActive(true,Advertisement.getDefaultPageable(0)).toBriefJson
}
}
}
}
}
}
| redlion99/akstore | akstore-server/akstore-rest/src/main/scala/me/smartco/akstore/store/rest/route/StaffRoute.scala | Scala | apache-2.0 | 9,844 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.core
import akka.actor._
import akka.event.LoggingReceive.withLabel
import org.ensime.api._
import org.ensime.config.richconfig._
import org.ensime.util.FileUtils.toSourceFileInfo
import org.ensime.util.file._
class AnalyzerManager(
broadcaster: ActorRef,
analyzerCreator: List[EnsimeProjectId] => Props,
implicit val config: EnsimeConfig
) extends Actor
with ActorLogging
with Stash {
private val sauron =
context.actorOf(analyzerCreator(config.projects.map(_.id)))
// maps the active modules to their analyzers
private var analyzers: Map[EnsimeProjectId, ActorRef] = Map.empty
private def getOrSpawnNew(optionalId: Option[EnsimeProjectId]): ActorRef =
optionalId match {
case Some(id) =>
analyzers.get(id) match {
case Some(analyzer) =>
analyzer
case None =>
val name = s"${id.project}_${id.config}"
val newAnalyzer = context.actorOf(analyzerCreator(id :: Nil), name)
analyzers += (id -> newAnalyzer)
newAnalyzer
}
case None =>
sauron
}
override def preStart(): Unit = {
// for legacy clients on startup
broadcaster ! Broadcaster.Persist(AnalyzerReadyEvent)
broadcaster ! Broadcaster.Persist(FullTypeCheckCompleteEvent)
}
override def receive: Receive = ready
private def ready: Receive = withLabel("ready") {
case req @ RestartScalaCompilerReq(id, _) =>
if (analyzers.isEmpty)
broadcaster ! AnalyzerReadyEvent
else
id match {
case Some(projectId) =>
analyzers.get(projectId).foreach(_ forward req)
case None =>
analyzers.values foreach (_ forward req)
}
case req @ UnloadAllReq =>
analyzers.foreach {
case (_, analyzer) => analyzer forward req
}
case req @ TypecheckModule(moduleId) =>
getOrSpawnNew(Some(moduleId)) forward req
case req @ RemoveFileReq(file: File) =>
val fileInfo = SourceFileInfo(RawFile(file.toPath), None, None)
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ TypecheckFileReq(fileInfo) =>
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ TypecheckFilesReq(files) =>
val original = sender
val filesPerProject = files.groupBy(config.findProject(_))
context.actorOf(Props(new Actor {
private var remaining = filesPerProject.size
private var aggregate: List[String] = List.empty
override def preStart: Unit =
for ((optionalModuleId, list) <- filesPerProject)
getOrSpawnNew(optionalModuleId) ! TypecheckFilesReq(list)
override def receive = {
case res: RpcResponse if remaining > 1 =>
aggregate = addResponse(res, aggregate)
remaining -= 1
case res: RpcResponse =>
aggregate = addResponse(res, aggregate)
original ! combine(aggregate)
context.stop(self)
}
def addResponse(res: RpcResponse, agg: List[String]) = res match {
case EnsimeServerError(desc) =>
desc :: aggregate
case _ =>
aggregate
}
def combine(errors: List[String]): RpcResponse =
if (aggregate.isEmpty) // had no errors; return a VoidResponse
VoidResponse
else // return the cumulative error
EnsimeServerError(aggregate mkString ", ")
}))
case req @ RefactorReq(_, _, _) =>
val original = sender
context.actorOf(Props(new Actor {
override def preStart(): Unit =
context.actorOf(analyzerCreator(config.projects.map(_.id))) ! req
override def receive = {
case res: RpcResponse =>
original ! res
context.stop(self)
}
}))
case req @ CompletionsReq(fileInfo, _, _, _, _) =>
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ FqnOfSymbolAtPointReq(file, point) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ FqnOfTypeAtPointReq(file, point) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ SymbolAtPointReq(file, point: Int) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ DocUriAtPointReq(file, range: OffsetRange) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ TypeAtPointReq(file, range: OffsetRange) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ SymbolDesignationsReq(file, start, end, _) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ ImplicitInfoReq(file, range: OffsetRange) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ ExpandSelectionReq(file, start: Int, stop: Int) =>
val fileInfo = SourceFileInfo(RawFile(file.toPath), None, None)
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ StructureViewReq(fileInfo: SourceFileInfo) =>
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ UnloadFileReq(file) =>
getOrSpawnNew(config.findProject(file)) forward req
}
}
object AnalyzerManager {
def apply(
broadcaster: ActorRef,
creator: List[EnsimeProjectId] => Props
)(
implicit
config: EnsimeConfig
) = Props(new AnalyzerManager(broadcaster, creator, config))
}
| yyadavalli/ensime-server | core/src/main/scala/org/ensime/core/AnalyzerManager.scala | Scala | gpl-3.0 | 5,575 |
/*
* Copyright 2018 Zhang Di
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dizhang.seqspark.worker
import java.net.URI
import java.nio.file.{Files, Path, Paths}
import org.dizhang.seqspark.ds.Genotype
import org.dizhang.seqspark.ds.VCF._
import org.dizhang.seqspark.util.SeqContext
import org.dizhang.seqspark.util.UserConfig.hdfs
import org.apache.hadoop
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
object Export {
private val logger = LoggerFactory.getLogger(getClass)
def apply[A: Genotype](data: Data[A])(implicit ssc: SeqContext): Unit = {
val geno = implicitly[Genotype[A]]
val conf = ssc.userConfig.output.genotype
if (conf.export) {
val path = if (conf.path.isEmpty)
ssc.userConfig.input.genotype.path + "." + ssc.userConfig.project
else
conf.path
logger.info(s"going to export data to $path")
if (path.startsWith("file:")) {
val p = Paths.get(URI.create(path))
if (Files.exists(p)) {
Files.walk(p)
.iterator()
.asScala
.toList
.sorted(Ordering[Path].reverse)
.foreach(f => Files.delete(f))
}
} else {
val hdPath = new hadoop.fs.Path(path)
if (hdfs.exists(hdPath)) {
hdfs.delete(hdPath, true)
}
}
data.samples(conf.samples).saveAsTextFile(path)
}
if (conf.save || conf.cache) {
data.saveAsObjectFile(conf.path)
}
}
}
| statgenetics/seqspark | src/main/scala/org/dizhang/seqspark/worker/Export.scala | Scala | apache-2.0 | 2,013 |
import sbt._
object B extends Build
{
lazy val root = Project("root", file(".")).dependsOn( file("../plugin") )
} | jaceklaskowski/sbt | sbt/src/sbt-test/project/src-plugins/project/project/P.scala | Scala | bsd-3-clause | 115 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.sbt
import java.io.Closeable
import java.io.FileDescriptor
import java.io.FileInputStream
import java.io.FilterInputStream
import java.io.InputStream
import jline.console.ConsoleReader
import scala.annotation.tailrec
import scala.concurrent.duration._
trait PlayInteractionMode {
/**
* This is our means of blocking a `play run` call until
* the user has denoted, via some interface (console or GUI) that
* play should no longer be running.
*/
def waitForCancel(): Unit
/**
* Enables and disables console echo (or does nothing if no console).
* This ensures console echo is enabled on exception thrown in the
* given code block.
*/
def doWithoutEcho(f: => Unit): Unit
}
/**
* Marker trait to signify a non-blocking interaction mode.
*
* This is provided, rather than adding a new flag to PlayInteractionMode, to preserve binary compatibility.
*/
trait PlayNonBlockingInteractionMode extends PlayInteractionMode {
def waitForCancel() = ()
def doWithoutEcho(f: => Unit) = f
/**
* Start the server, if not already started
*
* @param server A callback to start the server, that returns a closeable to stop it
*/
def start(server: => Closeable): Unit
/**
* Stop the server started by the last start request, if such a server exists
*/
def stop(): Unit
}
/**
* Default behavior for interaction mode is to wait on JLine.
*/
object PlayConsoleInteractionMode extends PlayInteractionMode {
// This wraps the InputStream with some sleep statements so it becomes interruptible.
private[play] class InputStreamWrapper(is: InputStream, val poll: Duration) extends FilterInputStream(is) {
@tailrec final override def read(): Int =
if (is.available() != 0) is.read()
else {
Thread.sleep(poll.toMillis)
read()
}
@tailrec final override def read(b: Array[Byte]): Int =
if (is.available() != 0) is.read(b)
else {
Thread.sleep(poll.toMillis)
read(b)
}
@tailrec final override def read(b: Array[Byte], off: Int, len: Int): Int =
if (is.available() != 0) is.read(b, off, len)
else {
Thread.sleep(poll.toMillis)
read(b, off, len)
}
}
private def createReader: ConsoleReader = {
val in = new InputStreamWrapper(new FileInputStream(FileDescriptor.in), 2.milliseconds)
new ConsoleReader(in, System.out)
}
private def withConsoleReader[T](f: ConsoleReader => T): T = {
val consoleReader = createReader
try f(consoleReader)
finally consoleReader.close()
}
private def waitForKey(): Unit = {
withConsoleReader { consoleReader =>
@tailrec def waitEOF(): Unit = {
consoleReader.readCharacter() match {
case 4 | 13 | -1 => // STOP on Ctrl-D, Enter or EOF (listen to -1 for jline2, for some reason...)
case 11 => consoleReader.clearScreen(); waitEOF()
case 10 => println(); waitEOF()
case _ => waitEOF()
}
}
doWithoutEcho(waitEOF())
}
}
def doWithoutEcho(f: => Unit): Unit = {
withConsoleReader { consoleReader =>
val terminal = consoleReader.getTerminal
terminal.setEchoEnabled(false)
try f
finally terminal.restore()
}
}
override def waitForCancel(): Unit = waitForKey()
override def toString = "Console Interaction Mode"
}
/**
* Simple implementation of the non-blocking interaction mode
* that simply stores the current application in a static variable.
*/
object StaticPlayNonBlockingInteractionMode extends PlayNonBlockingInteractionMode {
private var current: Option[Closeable] = None
/**
* Start the server, if not already started
*
* @param server A callback to start the server, that returns a closeable to stop it
*/
def start(server: => Closeable) = synchronized {
current match {
case Some(_) => println("Not starting server since one is already started")
case None =>
println("Starting server")
current = Some(server)
}
}
/**
* Stop the server started by the last start request, if such a server exists
*/
def stop() = synchronized {
current match {
case None => println("Not stopping server since none is started")
case Some(server) =>
println("Stopping server")
server.close()
current = None
}
}
}
| wegtam/playframework | dev-mode/sbt-plugin/src/main/scala/play/sbt/PlayInteractionMode.scala | Scala | apache-2.0 | 4,480 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import java.io.File
import java.net.URI
import java.nio.file.Files
import java.util.{Locale, UUID}
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.util.control.NonFatal
import org.apache.hadoop.fs.Path
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.scalatest.concurrent.Eventually
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.catalog.SessionCatalog.DEFAULT_DATABASE
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.FilterExec
import org.apache.spark.sql.execution.datasources.DataSourceUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.UninterruptibleThread
import org.apache.spark.util.Utils
/**
* Helper trait that should be extended by all SQL test suites within the Spark
* code base.
*
* This allows subclasses to plugin a custom `SQLContext`. It comes with test data
* prepared in advance as well as all implicit conversions used extensively by dataframes.
* To use implicit methods, import `testImplicits._` instead of through the `SQLContext`.
*
* Subclasses should *not* create `SQLContext`s in the test suite constructor, which is
* prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM.
*/
private[sql] trait SQLTestUtils extends SparkFunSuite with SQLTestUtilsBase with PlanTest {
// Whether to materialize all test data before the first test is run
private var loadTestDataBeforeTests = false
protected override def beforeAll(): Unit = {
super.beforeAll()
if (loadTestDataBeforeTests) {
loadTestData()
}
}
/**
* Creates a temporary directory, which is then passed to `f` and will be deleted after `f`
* returns.
*/
protected override def withTempDir(f: File => Unit): Unit = {
super.withTempDir { dir =>
f(dir)
waitForTasksToFinish()
}
}
/**
* A helper function for turning off/on codegen.
*/
protected def testWithWholeStageCodegenOnAndOff(testName: String)(f: String => Unit): Unit = {
Seq("false", "true").foreach { codegenEnabled =>
val isTurnOn = if (codegenEnabled == "true") "on" else "off"
test(s"$testName (whole-stage-codegen ${isTurnOn})") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled) {
f(codegenEnabled)
}
}
}
}
/**
* Materialize the test data immediately after the `SQLContext` is set up.
* This is necessary if the data is accessed by name but not through direct reference.
*/
protected def setupTestData(): Unit = {
loadTestDataBeforeTests = true
}
/**
* Disable stdout and stderr when running the test. To not output the logs to the console,
* ConsoleAppender's `follow` should be set to `true` so that it will honor reassignments of
* System.out or System.err. Otherwise, ConsoleAppender will still output to the console even if
* we change System.out and System.err.
*/
protected def testQuietly(name: String)(f: => Unit): Unit = {
test(name) {
quietly {
f
}
}
}
/**
* Run a test on a separate `UninterruptibleThread`.
*/
protected def testWithUninterruptibleThread(name: String, quietly: Boolean = false)
(body: => Unit): Unit = {
val timeoutMillis = 10000
@transient var ex: Throwable = null
def runOnThread(): Unit = {
val thread = new UninterruptibleThread(s"Testing thread for test $name") {
override def run(): Unit = {
try {
body
} catch {
case NonFatal(e) =>
ex = e
}
}
}
thread.setDaemon(true)
thread.start()
thread.join(timeoutMillis)
if (thread.isAlive) {
thread.interrupt()
// If this interrupt does not work, then this thread is most likely running something that
// is not interruptible. There is not much point to wait for the thread to terminate, and
// we rather let the JVM terminate the thread on exit.
fail(
s"Test '$name' running on o.a.s.util.UninterruptibleThread timed out after" +
s" $timeoutMillis ms")
} else if (ex != null) {
throw ex
}
}
if (quietly) {
testQuietly(name) { runOnThread() }
} else {
test(name) { runOnThread() }
}
}
/**
* Copy file in jar's resource to a temp file, then pass it to `f`.
* This function is used to make `f` can use the path of temp file(e.g. file:/), instead of
* path of jar's resource which starts with 'jar:file:/'
*/
protected def withResourceTempPath(resourcePath: String)(f: File => Unit): Unit = {
val inputStream =
Thread.currentThread().getContextClassLoader.getResourceAsStream(resourcePath)
withTempDir { dir =>
val tmpFile = new File(dir, "tmp")
Files.copy(inputStream, tmpFile.toPath)
f(tmpFile)
}
}
/**
* Waits for all tasks on all executors to be finished.
*/
protected def waitForTasksToFinish(): Unit = {
eventually(timeout(10.seconds)) {
assert(spark.sparkContext.statusTracker
.getExecutorInfos.map(_.numRunningTasks()).sum == 0)
}
}
/**
* Creates the specified number of temporary directories, which is then passed to `f` and will be
* deleted after `f` returns.
*/
protected def withTempPaths(numPaths: Int)(f: Seq[File] => Unit): Unit = {
val files = Array.fill[File](numPaths)(Utils.createTempDir().getCanonicalFile)
try f(files) finally {
// wait for all tasks to finish before deleting files
waitForTasksToFinish()
files.foreach(Utils.deleteRecursively)
}
}
}
/**
* Helper trait that can be extended by all external SQL test suites.
*
* This allows subclasses to plugin a custom `SQLContext`.
* To use implicit methods, import `testImplicits._` instead of through the `SQLContext`.
*
* Subclasses should *not* create `SQLContext`s in the test suite constructor, which is
* prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM.
*/
private[sql] trait SQLTestUtilsBase
extends Eventually
with BeforeAndAfterAll
with SQLTestData
with PlanTestBase { self: Suite =>
protected def sparkContext = spark.sparkContext
// Shorthand for running a query using our SQLContext
protected lazy val sql = spark.sql _
/**
* A helper object for importing SQL implicits.
*
* Note that the alternative of importing `spark.implicits._` is not possible here.
* This is because we create the `SQLContext` immediately before the first test is run,
* but the implicits import is needed in the constructor.
*/
protected object testImplicits extends SQLImplicits {
protected override def _sqlContext: SQLContext = self.spark.sqlContext
}
protected override def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
SparkSession.setActiveSession(spark)
super.withSQLConf(pairs: _*)(f)
}
/**
* Drops functions after calling `f`. A function is represented by (functionName, isTemporary).
*/
protected def withUserDefinedFunction(functions: (String, Boolean)*)(f: => Unit): Unit = {
try {
f
} catch {
case cause: Throwable => throw cause
} finally {
// If the test failed part way, we don't want to mask the failure by failing to remove
// temp tables that never got created.
functions.foreach { case (functionName, isTemporary) =>
val withTemporary = if (isTemporary) "TEMPORARY" else ""
spark.sql(s"DROP $withTemporary FUNCTION IF EXISTS $functionName")
assert(
!spark.sessionState.catalog.functionExists(FunctionIdentifier(functionName)),
s"Function $functionName should have been dropped. But, it still exists.")
}
}
}
/**
* Drops temporary view `viewNames` after calling `f`.
*/
protected def withTempView(viewNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
viewNames.foreach { viewName =>
try spark.catalog.dropTempView(viewName) catch {
// If the test failed part way, we don't want to mask the failure by failing to remove
// temp views that never got created.
case _: NoSuchTableException =>
}
}
}
}
/**
* Drops global temporary view `viewNames` after calling `f`.
*/
protected def withGlobalTempView(viewNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
viewNames.foreach { viewName =>
try spark.catalog.dropGlobalTempView(viewName) catch {
// If the test failed part way, we don't want to mask the failure by failing to remove
// global temp views that never got created.
case _: NoSuchTableException =>
}
}
}
}
/**
* Drops table `tableName` after calling `f`.
*/
protected def withTable(tableNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
tableNames.foreach { name =>
spark.sql(s"DROP TABLE IF EXISTS $name")
}
}
}
/**
* Drops view `viewName` after calling `f`.
*/
protected def withView(viewNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f)(
viewNames.foreach { name =>
spark.sql(s"DROP VIEW IF EXISTS $name")
}
)
}
/**
* Drops cache `cacheName` after calling `f`.
*/
protected def withCache(cacheNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
cacheNames.foreach { cacheName =>
try uncacheTable(cacheName) catch {
case _: AnalysisException =>
}
}
}
}
// Blocking uncache table for tests
protected def uncacheTable(tableName: String): Unit = {
val tableIdent = spark.sessionState.sqlParser.parseTableIdentifier(tableName)
val cascade = !spark.sessionState.catalog.isTemporaryTable(tableIdent)
spark.sharedState.cacheManager.uncacheQuery(
spark,
spark.table(tableName).logicalPlan,
cascade = cascade,
blocking = true)
}
/**
* Creates a temporary database and switches current database to it before executing `f`. This
* database is dropped after `f` returns.
*
* Note that this method doesn't switch current database before executing `f`.
*/
protected def withTempDatabase(f: String => Unit): Unit = {
val dbName = s"db_${UUID.randomUUID().toString.replace('-', '_')}"
try {
spark.sql(s"CREATE DATABASE $dbName")
} catch { case cause: Throwable =>
fail("Failed to create temporary database", cause)
}
try f(dbName) finally {
if (spark.catalog.currentDatabase == dbName) {
spark.sql(s"USE $DEFAULT_DATABASE")
}
spark.sql(s"DROP DATABASE $dbName CASCADE")
}
}
/**
* Drops database `dbName` after calling `f`.
*/
protected def withDatabase(dbNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
dbNames.foreach { name =>
spark.sql(s"DROP DATABASE IF EXISTS $name CASCADE")
}
spark.sql(s"USE $DEFAULT_DATABASE")
}
}
/**
* Enables Locale `language` before executing `f`, then switches back to the default locale of JVM
* after `f` returns.
*/
protected def withLocale(language: String)(f: => Unit): Unit = {
val originalLocale = Locale.getDefault
try {
// Add Locale setting
Locale.setDefault(new Locale(language))
f
} finally {
Locale.setDefault(originalLocale)
}
}
/**
* Activates database `db` before executing `f`, then switches back to `default` database after
* `f` returns.
*/
protected def activateDatabase(db: String)(f: => Unit): Unit = {
spark.sessionState.catalog.setCurrentDatabase(db)
Utils.tryWithSafeFinally(f)(spark.sessionState.catalog.setCurrentDatabase("default"))
}
/**
* Strip Spark-side filtering in order to check if a datasource filters rows correctly.
*/
protected def stripSparkFilter(df: DataFrame): DataFrame = {
val schema = df.schema
val withoutFilters = df.queryExecution.executedPlan.transform {
case FilterExec(_, child) => child
}
spark.internalCreateDataFrame(withoutFilters.execute(), schema)
}
/**
* Turn a logical plan into a `DataFrame`. This should be removed once we have an easier
* way to construct `DataFrame` directly out of local data without relying on implicits.
*/
protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame = {
Dataset.ofRows(spark, plan)
}
/**
* This method is used to make the given path qualified, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
def makeQualifiedPath(path: String): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.makeQualified(hadoopPath).toUri
}
/**
* Returns full path to the given file in the resource folder
*/
protected def testFile(fileName: String): String = {
Thread.currentThread().getContextClassLoader.getResource(fileName).toString
}
/**
* Returns the size of the local directory except the metadata file and the temporary file.
*/
def getLocalDirSize(file: File): Long = {
assert(file.isDirectory)
file.listFiles.filter(f => DataSourceUtils.isDataFile(f.getName)).map(_.length).sum
}
}
private[sql] object SQLTestUtils {
def compareAnswers(
sparkAnswer: Seq[Row],
expectedAnswer: Seq[Row],
sort: Boolean): Option[String] = {
def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
// Converts data to types that we can do equality comparison using Scala collections.
// For BigDecimal type, the Scala type has a better definition of equality test (similar to
// Java's java.math.BigDecimal.compareTo).
// For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for
// equality test.
// This function is copied from Catalyst's QueryTest
val converted: Seq[Row] = answer.map { s =>
Row.fromSeq(s.toSeq.map {
case d: java.math.BigDecimal => BigDecimal(d)
case b: Array[Byte] => b.toSeq
case o => o
})
}
if (sort) {
converted.sortBy(_.toString())
} else {
converted
}
}
if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
val errorMessage =
s"""
| == Results ==
| ${sideBySide(
s"== Expected Answer - ${expectedAnswer.size} ==" +:
prepareAnswer(expectedAnswer).map(_.toString()),
s"== Actual Answer - ${sparkAnswer.size} ==" +:
prepareAnswer(sparkAnswer).map(_.toString())).mkString("\\n")}
""".stripMargin
Some(errorMessage)
} else {
None
}
}
}
| pgandhi999/spark | sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala | Scala | apache-2.0 | 16,080 |
package com.peterpotts.interview
import scala.collection.immutable.Seq
case class Point(x: Double, y: Double)
object TwinCake {
private val radius = 100.0
def cut(cherries: Seq[Point]): Option[Point] = {
def angle(point: Point) = math.toDegrees(math.atan2(point.y, point.x))
slice(cherries.map(angle)).map { theta =>
Point(radius * math.cos(math.toRadians(theta)), radius * math.sin(math.toRadians(theta)))
}
}
def slice(cherries: Seq[Double]): Option[Double] = {
val vectors: Seq[(Double, Int)] = cherries.map { angle =>
if (angle < 180.0) angle -> 1 else (angle - 180.0) -> -1
}
val dedupedVectors: Seq[(Double, Int)] = vectors.groupBy(_._1).mapValues(_.map(_._2).sum).toIndexedSeq.sortBy(_._1)
val angles: Seq[Double] = dedupedVectors.map(_._1)
val directions: Seq[Int] = dedupedVectors.map(_._2)
val half = directions.sum / 2
val sums = directions.scan(0)(_ + _).tail
if (angles.size < 2) {
None
} else {
sums.zipWithIndex.find(_._1 == half).map(_._2).flatMap { index =>
if (index + 1 == angles.size) {
// Wrap
val preAngle = angles(index)
val postAngle = angles.head + 180.0
val angle = (preAngle + postAngle) / 2.0
Some(if (angle < 180.0) angle else angle - 180.0)
} else {
// No wrap
val preAngle = angles(index)
val postAngle = angles(index + 1)
Some((preAngle + postAngle) / 2.0)
}
}
}
}
}
| peterpotts/interview | src/main/scala/com/peterpotts/interview/TwinCake.scala | Scala | mit | 1,513 |
package org.http4s
package headers
import org.http4s.parser.HttpHeaderParser
import org.http4s.util.Writer
object `X-B3-TraceId` extends HeaderKey.Internal[`X-B3-TraceId`] with HeaderKey.Singleton {
override def parse(s: String): ParseResult[`X-B3-TraceId`] =
HttpHeaderParser.X_B3_TRACEID(s)
}
final case class `X-B3-TraceId`(id: Long) extends Header.Parsed {
override def key: `X-B3-TraceId`.type = `X-B3-TraceId`
override def renderValue(writer: Writer): writer.type =
xB3RenderValueImpl(writer, id)
}
| reactormonk/http4s | core/src/main/scala/org/http4s/headers/X-B3-TraceId.scala | Scala | apache-2.0 | 523 |
package com.github.j5ik2o.reactive.redis.command.transactions
import java.util.UUID
import com.github.j5ik2o.reactive.redis.RedisIOException
import com.github.j5ik2o.reactive.redis.command.{ CommandRequest, CommandResponse, StringParsersSupport }
import com.github.j5ik2o.reactive.redis.parser.StringParsers._
import com.github.j5ik2o.reactive.redis.parser.model.{ ErrorExpr, Expr, SimpleExpr }
import fastparse.all._
final class MultiRequest(val id: UUID) extends CommandRequest with StringParsersSupport {
override type Response = MultiResponse
override val isMasterOnly: Boolean = true
override def asString: String = "MULTI"
override protected lazy val responseParser: P[Expr] = fastParse(simpleStringReply | errorReply)
override protected lazy val parseResponse: Handler = {
case (SimpleExpr(_), next) =>
(MultiSucceeded(UUID.randomUUID(), id), next)
case (ErrorExpr(msg), next) =>
(MultiFailed(UUID.randomUUID(), id, RedisIOException(Some(msg))), next)
}
override def equals(other: Any): Boolean = other match {
case that: MultiRequest =>
id == that.id
case _ => false
}
@SuppressWarnings(Array("org.wartremover.warts.JavaSerializable"))
override def hashCode(): Int = {
val state = Seq(id)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override def toString: String = s"MultiRequest($id)"
}
object MultiRequest {
def apply(id: UUID): MultiRequest = new MultiRequest(id)
def unapply(self: MultiRequest): Option[UUID] = Some(self.id)
def create(id: UUID): MultiRequest = apply(id)
}
sealed trait MultiResponse extends CommandResponse
final case class MultiSucceeded(id: UUID, requestId: UUID) extends MultiResponse
final case class MultiFailed(id: UUID, requestId: UUID, ex: RedisIOException) extends MultiResponse
| j5ik2o/reactive-redis | core/src/main/scala/com/github/j5ik2o/reactive/redis/command/transactions/MultiRequest.scala | Scala | mit | 1,894 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package ext_api {
package facebook {
import _root_.java.net.{HttpURLConnection, URL, URLEncoder}
import _root_.java.io.DataOutputStream
import _root_.java.util.Date
import _root_.scala.xml.{Node, XML, NodeSeq}
object FacebookRestApi {
def apiKey = System.getProperty("com.facebook.api_key")
def secret = System.getProperty("com.facebook.secret")
def apiKey_=(key: String) = System.setProperty("com.facebook.api_key", key)
def secret_=(key: String) = System.setProperty("com.facebook.secret", key)
}
object FacebookClient {
import FacebookRestApi._
val TARGET_API_VERSION = "1.0"
val FB_SERVER = "api.facebook.com/restserver.php"
val SERVER_ADDR = "http://" + FB_SERVER
val HTTPS_SERVER_ADDR = "https://" + FB_SERVER
val SERVER_URL = new URL(SERVER_ADDR)
val HTTPS_SERVER_URL = new URL(HTTPS_SERVER_ADDR)
val CrLf = "\r\n"
val Pref = "--"
def urlEncode(name: String): String = URLEncoder.encode(name, "UTF-8")
def stripSig(in: String): String = if (in != null && in.startsWith("fb_sig_")) in.substring(7) else in
def convert(in: List[(String, Any)]): List[String] = in.map{case (name, value) => stripSig(name)+"="+value}
def byteToHex(b: Byte): String = Integer.toHexString((b & 0xf0) >>> 4) + Integer.toHexString(b & 0x0f)
def genSignature(allParams: List[(String, Any)], secret: String): String = {
val md = _root_.java.security.MessageDigest.getInstance("MD5")
val theStr = convert(allParams).sort(_ < _).mkString("") + secret
md.digest((theStr).getBytes).map(byteToHex(_)).mkString("")
}
private[facebook] def call(params: List[(String, Any)]): Node = {
val theParams = params.map{case (name, value) => urlEncode(name)+"="+urlEncode(value.toString)}.mkString("&")
SERVER_URL.openConnection match {
case conn: HttpURLConnection => {
conn.setRequestMethod("POST") // [ticket #27]
conn.setDoOutput(true)
conn.connect
conn.getOutputStream.write(theParams.getBytes())
XML.load(conn.getInputStream())
}
}
}
private[facebook] def buildParams(methodName: String, params: FacebookParam*): List[(String, Any)] = {
val allParams: List[(String, Any)] =
("method", methodName) ::
("api_key", apiKey) ::
("v", TARGET_API_VERSION) ::
params.map(p => (p.key, p.value)).toList
val signature = genSignature(allParams, secret)
val ret = "sig" -> signature :: allParams
ret
}
def callMethod(meth: SessionlessFacebookMethod): Node =
call(buildParams(meth.name, meth.params: _*))
def !?(meth: SessionlessFacebookMethod): Node =
callMethod(meth)
def fromSession(session: FacebookSession) : FacebookClient = {
new FacebookClient(session)
}
def fromAuthToken(authToken: String) : Option[FacebookClient] = {
FacebookSession.fromAuthToken(authToken).map(fromSession)
}
type State = {
def sessionKey: Option[String]
def expiration: Option[Long]
def uid: Option[String]
}
def fromState(implicit state: State) : Option[FacebookClient] = {
for (
key <- state.sessionKey;
exp <- state.expiration;
uid <- state.uid
) yield fromSession(FacebookSession(key, exp, uid))
}
}
class FacebookClient(val apiKey: String, val secret: String, val session: FacebookSession) {
import FacebookRestApi._
import FacebookClient._
def this(session: FacebookSession) = this(FacebookRestApi.apiKey, FacebookRestApi.secret, session)
def callMethod(meth: FacebookMethod, fileName: String, mimeType: String, file: Array[Byte], params: FacebookParam* ): Node = {
val boundary = System.currentTimeMillis.toString
SERVER_URL.openConnection match {
case conn: HttpURLConnection => {
conn.setDoInput(true)
conn.setDoOutput(true)
conn.setUseCaches(false)
conn.setRequestProperty("Content-Type", "multipart/form-data; boundary=" + boundary)
conn.setRequestProperty("MIME-version", "1.0")
val out = new DataOutputStream(conn.getOutputStream())
buildParams(meth, params: _*).foreach {
case (name, value) =>
out.writeBytes(Pref + boundary + CrLf)
out.writeBytes("Content-disposition: form-data; name='" + name + "'")
out.writeBytes(CrLf + CrLf + value.toString + CrLf)
}
out.writeBytes(Pref + boundary + CrLf)
out.writeBytes("Content-disposition: form-data; filename='" +
fileName + "'" + CrLf)
out.writeBytes("Content-Type: " + mimeType + CrLf + CrLf)
out.write(file)
out.writeBytes(CrLf + Pref + boundary + Pref + CrLf)
out.flush()
out.close()
XML.load(conn.getInputStream)
}
}
}
def callMethod(meth: FacebookMethod): Node =
call(buildParams(meth, meth.params: _*))
def callMethod(meth: FacebookMethod, otherParams: FacebookParam*): Node =
call(buildParams(meth, (meth.params.toList ::: otherParams.toList): _*))
def !?(meth: FacebookMethod): Node = callMethod(meth)
def !?(meth: FacebookMethod, otherParams: UniversalParam*) = callMethod(meth, otherParams: _*)
def !?(meth: UploadPhoto): Node = callMethod(meth, meth.fileName, meth.mimeType, meth.fileData)
private def buildParams(meth: FacebookMethod, params: FacebookParam*): List[(String, Any)] = {
val allParams: List[FacebookParam] =
(if (meth.requiresSession)
List(FacebookParam("call_id" -> System.currentTimeMillis), FacebookParam("session_key" -> session.key))
else
Nil) :::
params.toList
FacebookClient.buildParams(meth.name, allParams: _*)
}
def getInfo(users: Collection[Long], fields: FacebookField*): Node = {
callMethod(GetInfo(users, fields: _*))
}
}
object FacebookSession {
def apply(key: String, expiration: Long, uid: String) : FacebookSession =
new FacebookSession(key, expiration, uid)
def fromAuthToken(authToken: String): Option[FacebookSession] = {
val response = FacebookClient !? AuthGetSession(authToken)
val key = (response \\ "session_key").text
val uid = (response \\ "uid").text
val expiration = (response \\ "expires").text
if (key == "")
None
else
Some(FacebookSession(key, expiration.toLong, uid))
}
}
class FacebookSession(val key: String, val expiration: Long, val uid: String)
class FacebookMethod(val name: String, attachment: Boolean, val params: FacebookParam*) {
def this(nm: String, params: FacebookParam*) = { this(nm, false, params: _*) }
def requiresSession: Boolean = true
}
class SessionlessFacebookMethod(override val name: String, override val params: FacebookParam*) extends FacebookMethod(name, false, params: _*) {
override def requiresSession = false
}
case object AuthCreateToken extends SessionlessFacebookMethod("facebook.auth.createToken")
case class AuthGetSession(authToken: String) extends SessionlessFacebookMethod("facebook.auth.getSession", AuthToken(authToken))
case class GetFriends(optionalParams: GetFriendsParam*) extends FacebookMethod("facebook.friends.get", optionalParams: _*)
case object GetFriendLists extends FacebookMethod("facebook.friends.getLists")
case class FqlQuery(query: String) extends FacebookMethod("facebook.fql.query", Query(query))
case class GetEvents(filters: GetEventsParam*) extends FacebookMethod("facebook.events.get", filters: _*)
case class GetEventsMembers(eventId: Long) extends FacebookMethod("facebook.events.getMembers", EventId(eventId))
case object GetAppUsers extends FacebookMethod("facebook.friends.getAppUsers")
//case object GetRequests extends FacebookMethod("facebook.friends.getRequests") /*This method is not listed in the current facebook api. deprecated?*/
case class AreFriends(friends1: Collection[Long], friends2: Collection[Long]) extends FacebookMethod("facebook.friends.areFriends", FacebookParam("uids1", friends1.mkString(",")), FacebookParam("uids2", friends2.mkString(",")))
case class GetInfo(users: Collection[Long], fields: FacebookField*) extends FacebookMethod("facebook.users.getInfo", UserIds(users.toList: _*), FacebookFields(fields: _*))
case object GetUser extends FacebookMethod("facebook.users.getLoggedInUser")
case class GetPhotos(primaryFilter: GetPhotosParam, otherFilters: GetPhotosParam*) extends FacebookMethod("facebook.photos.get", (primaryFilter :: otherFilters.toList): _*)
case class GetAlbums(primaryFilter: GetAlbumsParam, otherFilters: GetAlbumsParam*) extends FacebookMethod("facebook.photos.getAlbums", (primaryFilter :: otherFilters.toList): _*)
case class GetPhotoTags(photoIds: Long*) extends FacebookMethod(" facebook.photos.getTags", PhotoIds(photoIds: _*))
case class CreatePhotoAlbum(albumName: String, otherParams: CreatePhotoAlbumParam*) extends FacebookMethod("facebook.photos.createAlbum", (NameParam(albumName) :: otherParams.toList): _*)
case class AddPhotoTags(photoId: Long, tags: PhotoTag*) extends FacebookMethod("facebook.photos.addTag", PhotoId(photoId), Tags(tags: _*))
case class UploadPhoto(fileName: String, mimeType: String, fileData: Array[Byte], otherParams: UploadPhotoParam*) extends FacebookMethod("facebook.photos.upload", true, otherParams: _*)
case object GetNotifications extends FacebookMethod("facebook.notifications.get")
case class SendNotifications(notification: NodeSeq, recipients: Long*) extends FacebookMethod("facebook.notifications.send", RecipientIds(recipients: _*), Notification(notification))
//case class SendRequest extends FacebookMethod("facebook.notifications.sendRequest", 5) /*This method was disabled by the facebook api*/
case class GetGroups(filters: GetGroupsParam*) extends FacebookMethod("facebook.groups.get", filters: _*)
case class GetGroupMembers(groupId: Long) extends FacebookMethod("facebook.groups.getMembers", GroupId(groupId))
case class SetFBML(optionalParams: SetFbmlParam*) extends FacebookMethod("facebook.profile.setFBML", optionalParams: _*)
case class GetFBML(optionalParams: GetFbmlParam*) extends FacebookMethod("facebook.profile.getFBML", optionalParams: _*)
case class RefreshImage(imageUrl: String) extends FacebookMethod("facebook.fbml.refreshImgSrc", Url(imageUrl))
case class RefreshRefURL(refUrl: String) extends FacebookMethod("facebook.fbml.refreshRefUrl", Url(refUrl))
case class SetRefHandle(handle: String, markup: NodeSeq) extends FacebookMethod("facebook.fbml.setRefHandle", RefHandle(handle), FBML(markup))
case class PublishStory(title: NodeSeq, publishParams: PublishStoryParam*) extends FacebookMethod("facebook.feed.publishStoryToUser", (Title(title) :: publishParams.toList): _*)
case class PublishAction(title: NodeSeq, publishParams: PublishActionParam*) extends FacebookMethod("facebook.feed.publishActionOfUser", (Title(title) :: publishParams.toList): _*)
class FacebookField(val name: String)
case object AboutMe extends FacebookField("about_me")
case object Activities extends FacebookField("activities")
case object Affiliations extends FacebookField("affiliations")
case object Birthday extends FacebookField("birthday")
case object Books extends FacebookField("books")
case object CurrentLocation extends FacebookField("current_location")
case object EducationHistory extends FacebookField("education_history")
case object FirstName extends FacebookField("first_name")
case object AddedApp extends FacebookField("has_added_app")
case object Hometown extends FacebookField("hometown_location")
case object Highschool extends FacebookField("hs_info")
case object Interests extends FacebookField("interests")
case object AppUser extends FacebookField("is_app_user")
case object LastName extends FacebookField("last_name")
case object MeetingFor extends FacebookField("meeting_for")
case object LookingFor extends FacebookField("meeting_for")
case object MeetingSex extends FacebookField("meeting_sex")
case object InterestedIn extends FacebookField("meeting_sex")
case object Movies extends FacebookField("movies")
case object Music extends FacebookField("music")
case object Name extends FacebookField("name")
case object NotesCount extends FacebookField("notes_count")
case object Pic extends FacebookField("pic")
case object BigPic extends FacebookField("pic_big")
case object SmallPic extends FacebookField("pic_small")
case object SquarePic extends FacebookField("pic_square")
case object PoliticalView extends FacebookField("political")
case object UpdateTime extends FacebookField("profile_update_time")
case object Quotes extends FacebookField("quotes")
case object Relationship extends FacebookField("relationship_status")
case object RelationshipStatus extends FacebookField("relationship_status")
case object Religion extends FacebookField("religion")
case object Sex extends FacebookField("sex")
case object SignificantOther extends FacebookField("significant_other_id")
case object Status extends FacebookField("status")
case object Timezone extends FacebookField("timezone")
case object TV extends FacebookField("tv")
case object WallCount extends FacebookField("wall_count")
case object WorkHistory extends FacebookField("work_history")
sealed abstract class PhotoTag(x: Double, y: Double){ def toJSON: String }
case class TagById(userId: Long, x: Double, y: Double) extends PhotoTag(x, y){
def toJSON = "{'x':'" + x.toString + "','y':'" + y.toString + "','tag_uid':" + userId.toString + "}"
}
case class TagByName(name: String, x: Double, y: Double) extends PhotoTag(x, y){
def toJSON = "{'x':'" + x.toString + "','y':'" + y.toString + "','tag_text':'" + name.toString + "'}"
}
object FacebookParam {
def apply(key: String, value: Any) = new FacebookParam(key, value)
def apply(pair: (String, Any)) = new FacebookParam(pair)
}
class FacebookParam(val key: String, val value: Any){
def this(pair: (String, Any)) = this(pair._1, pair._2)
}
trait UniversalParam extends FacebookParam
trait GetFriendsParam extends FacebookParam
trait GetEventsParam extends FacebookParam
trait GetPhotosParam extends FacebookParam
trait GetAlbumsParam extends FacebookParam
trait GetPhotoTagsParam extends FacebookParam
trait CreatePhotoAlbumParam extends FacebookParam
trait AddPhotoTagsParam extends FacebookParam
trait SendNotificationParam extends FacebookParam
trait GetGroupsParam extends FacebookParam
trait GetGroupMembersParam extends FacebookParam
trait SetFbmlParam extends FacebookParam
trait GetFbmlParam extends FacebookParam
trait RefreshImageParam extends FacebookParam
trait RefreshRefParam extends FacebookParam
trait SetRefHandleParam extends FacebookParam
trait PublishStoryParam extends FacebookParam
trait PublishActionParam extends FacebookParam
trait UploadPhotoParam extends FacebookParam
case object XMLFormat extends FacebookParam("format", "XML") with UniversalParam
case object JSONFormat extends FacebookParam("format", "JSON") with UniversalParam
case class Callback(functionName: String) extends FacebookParam("callback", functionName) with UniversalParam
case class AuthToken(token: String) extends FacebookParam("auth_token", token)
case class Query(query: String) extends FacebookParam("query", query)
case class FacebookFields(fields: FacebookField*) extends FacebookParam("fields", fields.map(_.name).mkString(","))
case class FriendListId(friendListId: Long) extends FacebookParam("flid", friendListId) with GetFriendsParam
case class UserId(userId: Long) extends FacebookParam("uid", userId) with GetEventsParam with GetAlbumsParam with GetGroupsParam with SetFbmlParam with GetFbmlParam
case class UserIds(userIds: Long*) extends FacebookParam("uids", userIds.mkString(","))
case class EventId(eventId: Long) extends FacebookParam("eid", eventId)
case class EventIds(eventIds: Long*) extends FacebookParam("eids", eventIds.mkString(",")) with GetEventsParam
case class StartTime(startTime: Date) extends FacebookParam("start_time", startTime.getTime()) with GetEventsParam
case class EndTime(endTime: Date) extends FacebookParam("end_time", endTime.getTime()) with GetEventsParam
case object RsvpAttending extends FacebookParam("rsvp_status", "attending") with GetEventsParam
case object RsvpUnsure extends FacebookParam("rsvp_status", "unsure") with GetEventsParam
case object RsvpDeclined extends FacebookParam("rsvp_status", "declined") with GetEventsParam
case object RsvpNotReplied extends FacebookParam("rsvp_status", "not_replied") with GetEventsParam
case class SubjectId(subjectId: Long) extends FacebookParam("subj_id", subjectId) with GetPhotosParam
case class AlbumId(albumId: Long) extends FacebookParam("aid", albumId) with GetPhotosParam with AddPhotoTagsParam with UploadPhotoParam
case class AlbumIds(albumIds: Long*) extends FacebookParam("aids", albumIds.mkString(",")) with GetAlbumsParam
case class PhotoId(photoId: Long) extends FacebookParam("pid", photoId) with AddPhotoTagsParam
case class PhotoIds(photoIds: Long*) extends FacebookParam("pids", photoIds.mkString(",")) with GetPhotosParam with GetPhotoTagsParam
case class NameParam(name: String) extends FacebookParam("name", name) with CreatePhotoAlbumParam
case class Location(location: String) extends FacebookParam("location", location) with CreatePhotoAlbumParam
case class Description(description: String) extends FacebookParam("description", description) with CreatePhotoAlbumParam
case class TagUserId(userId: Long) extends FacebookParam("tag_uid", userId)
case class TagText(text: String) extends FacebookParam("tag_text", text)
case class Tags(tags: PhotoTag*) extends FacebookParam("tags", tags.map(_.toJSON).mkString("[", ",", "]"))
case class Notification(markup: NodeSeq) extends FacebookParam("notification", markup) with SendNotificationParam
case class RecipientIds(recipientIds: Long*) extends FacebookParam("to_ids", recipientIds.mkString(",")) with SendNotificationParam
case class GroupId(groupId: Long) extends FacebookParam("gid", groupId) with GetGroupMembersParam
case class GroupIds(groupIds: Long*) extends FacebookParam("gids", groupIds.mkString(",")) with GetGroupsParam
case class ProfileMarkup(markup: NodeSeq) extends FacebookParam("profile", markup) with SetFbmlParam
case class ProfileActionMarkup(markup: NodeSeq) extends FacebookParam("profile_action", markup) with SetFbmlParam
case class MobileProfileMarkup(markup: NodeSeq) extends FacebookParam("mobile_profile", markup) with SetFbmlParam
case class Url(url: String) extends FacebookParam("url", url) with RefreshImageParam with RefreshRefParam
case class RefHandle(handle: String) extends FacebookParam("handle", handle) with SetRefHandleParam
case class FBML(markup: NodeSeq) extends FacebookParam("fbml", markup) with SetRefHandleParam
case class Title(markup: NodeSeq) extends FacebookParam("title", markup) with PublishStoryParam with PublishActionParam
case class Body(markup: NodeSeq) extends FacebookParam("body", markup) with PublishStoryParam with PublishActionParam
case class FirstImage(imageUrl: String) extends FacebookParam("image_1", imageUrl) with PublishStoryParam with PublishActionParam
case class FirstImageLink(imageUrl: String) extends FacebookParam("image_1_link", imageUrl) with PublishStoryParam with PublishActionParam
case class SecondImage(imageUrl: String) extends FacebookParam("image_2", imageUrl) with PublishStoryParam with PublishActionParam
case class SecondImageLink(imageUrl: String) extends FacebookParam("image_2_link", imageUrl) with PublishStoryParam with PublishActionParam
case class ThirdImage(imageUrl: String) extends FacebookParam("image_3", imageUrl) with PublishStoryParam with PublishActionParam
case class ThirdImageLink(imageUrl: String) extends FacebookParam("image_3_link", imageUrl) with PublishStoryParam with PublishActionParam
case class FourthImage(imageUrl: String) extends FacebookParam("image_4", imageUrl) with PublishStoryParam with PublishActionParam
case class FourthImageLink(imageUrl: String) extends FacebookParam("image_4_link", imageUrl) with PublishStoryParam with PublishActionParam
case class Priority(priority: String) extends FacebookParam("priority", priority) with PublishStoryParam
case class Caption(caption: String) extends FacebookParam("caption", caption) with UploadPhotoParam
}
}
}
| jeppenejsum/liftweb | framework/lift-modules/lift-facebook/src/main/scala/net/liftweb/ext_api/facebook/FacebookRestApi.scala | Scala | apache-2.0 | 20,726 |
package org.jetbrains.plugins.scala
package lang.surroundWith.descriptors
import com.intellij.lang.surroundWith.{SurroundDescriptor, Surrounder}
import com.intellij.psi.{PsiElement, PsiFile}
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType._
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.docsyntax.ScalaDocSyntaxElementType
import org.jetbrains.plugins.scala.lang.surroundWith.surrounders.scaladoc._
import scala.collection.mutable.ArrayBuffer
/**
* User: Dmitry Naydanov
* Date: 3/2/12
*/
class ScalaDocCommentDataSurroundDescriptor extends SurroundDescriptor {
val surrounders: Array[Surrounder] = Array[Surrounder](new ScalaDocWithBoldSurrounder, new ScalaDocWithUnderlinedSurrounder,
new ScalaDocWithMonospaceSurrounder, new ScalaDocWithItalicSurrounder, new ScalaDocWithSubscriptSurrounder,
new ScalaDocWithSuperscriptSurrounder)
override def getElementsToSurround(file: PsiFile, startOffset: Int, endOffset: Int): Array[PsiElement] = {
if (endOffset == startOffset) return PsiElement.EMPTY_ARRAY
val validBoundElements = Set(DOC_COMMENT_DATA, DOC_WHITESPACE)
def checkBoundElement(element: PsiElement): Boolean = validBoundElements.contains(element.getNode.getElementType)
def checkSyntaxBoundElement(element: PsiElement, isStart: Boolean): Boolean =
element.getNode.getElementType.isInstanceOf[ScalaDocSyntaxElementType] &&
(isStart && startOffset == element.getTextOffset || !isStart && endOffset == element.getTextRange.getEndOffset)
val startElement = file.findElementAt(startOffset)
val endElement = file.findElementAt(endOffset - 1)
if (startElement == null || endElement == null) return PsiElement.EMPTY_ARRAY
val isFirstElementMarked = if (checkBoundElement(startElement)) { //cannot extract function because of return
false
} else {
if (checkSyntaxBoundElement(startElement, true)) true else return PsiElement.EMPTY_ARRAY
}
val isLastElementMarked = if (checkBoundElement(endElement)) {
false
} else {
if (checkSyntaxBoundElement(endElement, false)) true else return PsiElement.EMPTY_ARRAY
}
if (startElement.getParent != endElement.getParent) {
(isFirstElementMarked, isLastElementMarked) match {
case (true, true) if (startElement.getParent.getParent == endElement.getParent.getParent) =>
case (true, false) if (startElement.getParent.getParent == endElement.getParent) =>
case (false, true) if (startElement.getParent == endElement.getParent.getParent) =>
case _ => return PsiElement.EMPTY_ARRAY
}
} else if (isFirstElementMarked && isLastElementMarked) { // in case <selection>__blah blah__</selection>
return Array(startElement.getParent)
}
if (endElement == startElement) {
return Array(startElement)
}
var (nextElement, elementsToSurround) = if (isFirstElementMarked) {
if (startElement.getParent.getTextRange.getEndOffset <= endOffset)
(startElement.getParent.getNextSibling, ArrayBuffer(startElement.getParent))
else
return PsiElement.EMPTY_ARRAY
} else {
(startElement.getNextSibling, ArrayBuffer(startElement))
}
val lastBoundElement = if (isLastElementMarked) {
if (endElement.getTextOffset >= startOffset) (endElement.getParent) else return PsiElement.EMPTY_ARRAY
} else {
endElement
}
var hasAsterisk = false
do {
if (nextElement == null) return PsiElement.EMPTY_ARRAY
if ((!Set(DOC_COMMENT_DATA, DOC_COMMENT_LEADING_ASTERISKS, DOC_WHITESPACE).contains(nextElement.getNode.getElementType) &&
!nextElement.getNode.getElementType.isInstanceOf[ScalaDocSyntaxElementType]) ||
(nextElement.getNode.getElementType == DOC_WHITESPACE && nextElement.getText.indexOf("\\n") != nextElement.getText.lastIndexOf("\\n"))) {
return PsiElement.EMPTY_ARRAY
} else if (nextElement.getNode.getElementType == DOC_COMMENT_LEADING_ASTERISKS) {
if (hasAsterisk) return PsiElement.EMPTY_ARRAY
hasAsterisk = true
} else if (nextElement.getNode.getElementType != DOC_WHITESPACE) {
hasAsterisk = false
}
elementsToSurround += nextElement
} while (nextElement != lastBoundElement && (nextElement = nextElement.getNextSibling, true)._2);
elementsToSurround.toArray
}
override def getSurrounders: Array[Surrounder] = surrounders
override def isExclusive: Boolean = false
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/surroundWith/descriptors/ScalaDocCommentDataSurroundDescriptor.scala | Scala | apache-2.0 | 4,503 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.liferay61.handler
import at.nonblocking.cliwix.core.ExecutionContext
import at.nonblocking.cliwix.core.command._
import at.nonblocking.cliwix.core.converter.LiferayEntityConverter
import at.nonblocking.cliwix.core.handler.Handler
import at.nonblocking.cliwix.core.util.ResourceAwareCollectionFactory
import at.nonblocking.cliwix.model.UserGroup
import com.liferay.portal.NoSuchUserGroupException
import com.liferay.portal.service._
import scala.beans.BeanProperty
import scala.collection.JavaConversions._
import java.{util=>jutil}
class UserGroupListHandler extends Handler[UserGroupListCommand, jutil.Map[String, UserGroup]] {
@BeanProperty
var userGroupService: UserGroupLocalService = _
@BeanProperty
var groupService: GroupLocalService = _
@BeanProperty
var converter: LiferayEntityConverter = _
@BeanProperty
var resourceAwareCollectionFactory: ResourceAwareCollectionFactory = _
private[core] override def handle(command: UserGroupListCommand): CommandResult[jutil.Map[String, UserGroup]] = {
val userGroups = this.userGroupService.getUserGroups(command.companyId)
val resultMap = this.resourceAwareCollectionFactory.createMap[String, UserGroup](userGroups.size())
userGroups.foreach { userGroup =>
logger.debug("Export UserGroup: {}", userGroup.getName)
val cliwixUserGroup = this.converter.convertToCliwixUserGroup(userGroup)
resultMap.put(cliwixUserGroup.identifiedBy, cliwixUserGroup)
}
CommandResult(resultMap)
}
}
class UserGroupInsertHandler extends Handler[UserGroupInsertCommand, UserGroup] {
@BeanProperty
var userGroupService: UserGroupLocalService = _
@BeanProperty
var userService: UserLocalService = _
@BeanProperty
var converter: LiferayEntityConverter = _
private[core] override def handle(command: UserGroupInsertCommand): CommandResult[UserGroup] = {
val cliwixUserGroup = command.userGroup
val defaultUser = ExecutionContext.securityContext.defaultUser
logger.debug("Adding user group: {}", cliwixUserGroup)
val insertedUserGroup = this.userGroupService.addUserGroup(defaultUser.getUserId, command.companyId,
cliwixUserGroup.getName, cliwixUserGroup.getDescription)
if (cliwixUserGroup.getMemberUsers != null) {
val userIds = cliwixUserGroup.getMemberUsers.map { mu =>
handleNoSuchUser(mu.getScreenName) {
this.userService.getUserByScreenName(command.companyId, mu.getScreenName)
}
}
.filter(_ != null)
.map(_.getUserId)
this.userService.addUserGroupUsers(insertedUserGroup.getUserGroupId, userIds.toArray)
}
val insertedCliwixUserGroup = this.converter.convertToCliwixUserGroup(insertedUserGroup)
CommandResult(insertedCliwixUserGroup)
}
}
class UserGroupUpdateHandler extends Handler[UpdateCommand[UserGroup], UserGroup] {
@BeanProperty
var userGroupService: UserGroupLocalService = _
@BeanProperty
var userService: UserLocalService = _
@BeanProperty
var converter: LiferayEntityConverter = _
private[core] override def handle(command: UpdateCommand[UserGroup]): CommandResult[UserGroup] = {
val cliwixUserGroup = command.entity
assert(cliwixUserGroup.getUserGroupId != null, "userGroupId != null")
logger.debug("Updating user group: {}", cliwixUserGroup)
val userGroup = this.userGroupService.getUserGroup(cliwixUserGroup.getUserGroupId)
this.converter.mergeToLiferayUserGroup(cliwixUserGroup, userGroup)
val updatedUserGroup = this.getUserGroupService.updateUserGroup(userGroup)
if (cliwixUserGroup.getMemberUsers != null) {
val existingUserIds =
this.userService.getUserGroupUsers(userGroup.getUserGroupId).map(_.getUserId)
val userIds = cliwixUserGroup.getMemberUsers.map { mu =>
handleNoSuchUser(mu.getScreenName) {
this.userService.getUserByScreenName(updatedUserGroup.getCompanyId, mu.getScreenName)
}
}
.filter(_ != null)
.map(_.getUserId)
val addedUserIds = userIds.filter(userId => !existingUserIds.contains(userId))
val removedUserIds = existingUserIds.filter(userId => !userIds.contains(userId))
this.userService.addUserGroupUsers(updatedUserGroup.getUserGroupId, addedUserIds.toArray)
this.userService.unsetUserGroupUsers(updatedUserGroup.getUserGroupId, removedUserIds.toArray)
}
val updatedCliwixUserGroup = this.converter.convertToCliwixUserGroup(updatedUserGroup)
CommandResult(updatedCliwixUserGroup)
}
}
class UserGroupGetByIdHandler extends Handler[GetByDBIdCommand[UserGroup], UserGroup] {
@BeanProperty
var userGroupService: UserGroupLocalService = _
@BeanProperty
var groupService: GroupLocalService = _
@BeanProperty
var converter: LiferayEntityConverter = _
override private[core] def handle(command: GetByDBIdCommand[UserGroup]): CommandResult[UserGroup] = {
try {
val userGroup = this.userGroupService.getUserGroup(command.dbId)
val cliwixUserGroup = this.converter.convertToCliwixUserGroup(userGroup)
CommandResult(cliwixUserGroup)
} catch {
case e: NoSuchUserGroupException =>
logger.warn(s"UserGroup with id ${command.dbId} not found", e)
CommandResult(null)
case e: Throwable => throw e
}
}
}
class UserGroupGetByIdentifierHandler extends Handler[GetByIdentifierOrPathCommand[UserGroup], UserGroup] {
@BeanProperty
var userGroupService: UserGroupLocalService = _
@BeanProperty
var groupService: GroupLocalService = _
@BeanProperty
var converter: LiferayEntityConverter = _
override private[core] def handle(command: GetByIdentifierOrPathCommand[UserGroup]): CommandResult[UserGroup] = {
try {
val userGroup = this.userGroupService.getUserGroup(command.companyId, command.identifierOrPath)
val cliwixUserGroup = this.converter.convertToCliwixUserGroup(userGroup)
CommandResult(cliwixUserGroup)
} catch {
case e: NoSuchUserGroupException =>
logger.warn(s"UserGroup with name ${command.identifierOrPath} not found", e)
CommandResult(null)
case e: Throwable => throw e
}
}
}
class UserGroupDeleteHandler extends Handler[DeleteCommand[UserGroup], UserGroup] {
@BeanProperty
var userGroupService: UserGroupLocalService = _
private[core] override def handle(command: DeleteCommand[UserGroup]): CommandResult[UserGroup] = {
logger.debug("Deleting user group: {}", command.entity)
//In some Liferay versions deleteUserGroup() returns a UserGroup in some not,
//so we must use reflection here
val deleteUserGroupMethod = this.userGroupService.getClass.getMethod("deleteUserGroup", classOf[Long])
deleteUserGroupMethod.invoke(this.userGroupService, command.entity.getUserGroupId)
CommandResult(null)
}
} | nonblocking/cliwix | cliwix-core-handlers-6-1/src/main/scala/at/nonblocking/cliwix/core/liferay61/handler/UserGroupHandler.scala | Scala | agpl-3.0 | 7,613 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.{ArrayBuffer, HashSet, HashMap, Map}
import scala.language.reflectiveCalls
import scala.util.control.NonFatal
import org.scalatest.{BeforeAndAfter, FunSuiteLike}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
import org.apache.spark.util.CallSite
import org.apache.spark.executor.TaskMetrics
class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler)
extends DAGSchedulerEventProcessLoop(dagScheduler) {
override def post(event: DAGSchedulerEvent): Unit = {
try {
// Forward event to `onReceive` directly to avoid processing event asynchronously.
onReceive(event)
} catch {
case NonFatal(e) => onError(e)
}
}
}
/**
* An RDD for passing to DAGScheduler. These RDDs will use the dependencies and
* preferredLocations (if any) that are passed to them. They are deliberately not executable
* so we can test that DAGScheduler does not try to execute RDDs locally.
*/
class MyRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil) extends RDD[(Int, Int)](sc, dependencies) with Serializable {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions = (0 until numPartitions).map(i => new Partition {
override def index = i
}).toArray
override def getPreferredLocations(split: Partition): Seq[String] =
if (locations.isDefinedAt(split.index))
locations(split.index)
else
Nil
override def toString: String = "DAGSchedulerSuiteRDD " + id
}
class DAGSchedulerSuiteDummyException extends Exception
class DAGSchedulerSuite extends FunSuiteLike with BeforeAndAfter with LocalSparkContext with Timeouts {
val conf = new SparkConf
/** Set of TaskSets the DAGScheduler has requested executed. */
val taskSets = scala.collection.mutable.Buffer[TaskSet]()
/** Stages for which the DAGScheduler has called TaskScheduler.cancelTasks(). */
val cancelledStages = new HashSet[Int]()
val taskScheduler = new TaskScheduler() {
override def rootPool: Pool = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
override def start() = {}
override def stop() = {}
override def executorHeartbeatReceived(execId: String, taskMetrics: Array[(Long, TaskMetrics)],
blockManagerId: BlockManagerId): Boolean = true
override def submitTasks(taskSet: TaskSet) = {
// normally done by TaskSetManager
taskSet.tasks.foreach(_.epoch = mapOutputTracker.getEpoch)
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean) {
cancelledStages += stageId
}
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
}
/** Length of time to wait while draining listener events. */
val WAIT_TIMEOUT_MILLIS = 10000
val sparkListener = new SparkListener() {
val successfulStages = new HashSet[Int]
val failedStages = new ArrayBuffer[Int]
val stageByOrderOfExecution = new ArrayBuffer[Int]
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted) {
val stageInfo = stageCompleted.stageInfo
stageByOrderOfExecution += stageInfo.stageId
if (stageInfo.failureReason.isEmpty) {
successfulStages += stageInfo.stageId
} else {
failedStages += stageInfo.stageId
}
}
}
var mapOutputTracker: MapOutputTrackerMaster = null
var scheduler: DAGScheduler = null
var dagEventProcessLoopTester: DAGSchedulerEventProcessLoop = null
/**
* Set of cache locations to return from our mock BlockManagerMaster.
* Keys are (rdd ID, partition ID). Anything not present will return an empty
* list of cache locations silently.
*/
val cacheLocations = new HashMap[(Int, Int), Seq[BlockManagerId]]
// stub out BlockManagerMaster.getLocations to use our cacheLocations
val blockManagerMaster = new BlockManagerMaster(null, conf, true) {
override def getLocations(blockIds: Array[BlockId]): Seq[Seq[BlockManagerId]] = {
blockIds.map {
_.asRDDId.map(id => (id.rddId -> id.splitIndex)).flatMap(key => cacheLocations.get(key)).
getOrElse(Seq())
}.toSeq
}
override def removeExecutor(execId: String) {
// don't need to propagate to the driver, which we don't have
}
}
/** The list of results that DAGScheduler has collected. */
val results = new HashMap[Int, Any]()
var failure: Exception = _
val jobListener = new JobListener() {
override def taskSucceeded(index: Int, result: Any) = results.put(index, result)
override def jobFailed(exception: Exception) = { failure = exception }
}
before {
// Enable local execution for this test
val conf = new SparkConf().set("spark.localExecution.enabled", "true")
sc = new SparkContext("local", "DAGSchedulerSuite", conf)
sparkListener.successfulStages.clear()
sparkListener.failedStages.clear()
failure = null
sc.addSparkListener(sparkListener)
taskSets.clear()
cancelledStages.clear()
cacheLocations.clear()
results.clear()
mapOutputTracker = new MapOutputTrackerMaster(conf)
scheduler = new DAGScheduler(
sc,
taskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env) {
override def runLocally(job: ActiveJob) {
// don't bother with the thread while unit testing
runLocallyWithinThread(job)
}
}
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(scheduler)
}
override def afterAll() {
super.afterAll()
}
/**
* Type of RDD we use for testing. Note that we should never call the real RDD compute methods.
* This is a pair RDD type so it can always be used in ShuffleDependencies.
*/
type PairOfIntsRDD = RDD[(Int, Int)]
/**
* Process the supplied event as if it were the top of the DAGScheduler event queue, expecting
* the scheduler not to exit.
*
* After processing the event, submit waiting stages as is done on most iterations of the
* DAGScheduler event loop.
*/
private def runEvent(event: DAGSchedulerEvent) {
dagEventProcessLoopTester.post(event)
}
/**
* When we submit dummy Jobs, this is the compute function we supply. Except in a local test
* below, we do not expect this function to ever be executed; instead, we will return results
* directly through CompletionEvents.
*/
private val jobComputeFunc = (context: TaskContext, it: Iterator[(_)]) =>
it.next.asInstanceOf[Tuple2[_, _]]._1
/** Send the given CompletionEvent messages for the tasks in the TaskSet. */
private def complete(taskSet: TaskSet, results: Seq[(TaskEndReason, Any)]) {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(CompletionEvent(taskSet.tasks(i), result._1, result._2, null, createFakeTaskInfo(), null))
}
}
}
private def completeWithAccumulator(accumId: Long, taskSet: TaskSet,
results: Seq[(TaskEndReason, Any)]) {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(CompletionEvent(taskSet.tasks(i), result._1, result._2,
Map[Long, Any]((accumId, 1)), createFakeTaskInfo(), null))
}
}
}
/** Sends the rdd to the scheduler for scheduling and returns the job id. */
private def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc,
allowLocal: Boolean = false,
listener: JobListener = jobListener): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, func, partitions, allowLocal, CallSite("", ""), listener))
jobId
}
/** Sends TaskSetFailed to the scheduler. */
private def failed(taskSet: TaskSet, message: String) {
runEvent(TaskSetFailed(taskSet, message))
}
/** Sends JobCancelled to the DAG scheduler. */
private def cancel(jobId: Int) {
runEvent(JobCancelled(jobId))
}
test("[SPARK-3353] parent stage should have lower stage id") {
sparkListener.stageByOrderOfExecution.clear()
sc.parallelize(1 to 10).map(x => (x, x)).reduceByKey(_ + _, 4).count()
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.stageByOrderOfExecution.length === 2)
assert(sparkListener.stageByOrderOfExecution(0) < sparkListener.stageByOrderOfExecution(1))
}
test("zero split job") {
var numResults = 0
val fakeListener = new JobListener() {
override def taskSucceeded(partition: Int, value: Any) = numResults += 1
override def jobFailed(exception: Exception) = throw exception
}
submit(new MyRDD(sc, 0, Nil), Array(), listener = fakeListener)
assert(numResults === 0)
}
test("run trivial job") {
submit(new MyRDD(sc, 1, Nil), Array(0))
complete(taskSets(0), List((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("local job") {
val rdd = new PairOfIntsRDD(sc, Nil) {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
Array(42 -> 0).iterator
override def getPartitions = Array( new Partition { override def index = 0 } )
override def getPreferredLocations(split: Partition) = Nil
override def toString = "DAGSchedulerSuite Local RDD"
}
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, jobComputeFunc, Array(0), true, CallSite("", ""), jobListener))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("local job oom") {
val rdd = new PairOfIntsRDD(sc, Nil) {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new java.lang.OutOfMemoryError("test local job oom")
override def getPartitions = Array( new Partition { override def index = 0 } )
override def getPreferredLocations(split: Partition) = Nil
override def toString = "DAGSchedulerSuite Local RDD"
}
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, jobComputeFunc, Array(0), true, CallSite("", ""), jobListener))
assert(results.size == 0)
assertDataStructuresEmpty
}
test("run trivial job w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0))
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("cache location preferences w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
cacheLocations(baseRdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(finalRdd, Array(0))
val taskSet = taskSets(0)
assertLocations(taskSet, Seq(Seq("hostA", "hostB")))
complete(taskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("avoid exponential blowup when getting preferred locs list") {
// Build up a complex dependency graph with repeated zip operations, without preferred locations.
var rdd: RDD[_] = new MyRDD(sc, 1, Nil)
(1 to 30).foreach(_ => rdd = rdd.zip(rdd))
// getPreferredLocs runs quickly, indicating that exponential graph traversal is avoided.
failAfter(10 seconds) {
val preferredLocs = scheduler.getPreferredLocs(rdd,0)
// No preferred locations are returned.
assert(preferredLocs.length === 0)
}
}
test("unserializable task") {
val unserializableRdd = new MyRDD(sc, 1, Nil) {
class UnserializableClass
val unserializable = new UnserializableClass
}
submit(unserializableRdd, Array(0))
assert(failure.getMessage.startsWith(
"Job aborted due to stage failure: Task not serializable:"))
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty
}
test("trivial job failure") {
submit(new MyRDD(sc, 1, Nil), Array(0))
failed(taskSets(0), "some failure")
assert(failure.getMessage === "Job aborted due to stage failure: some failure")
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty
}
test("trivial job cancellation") {
val rdd = new MyRDD(sc, 1, Nil)
val jobId = submit(rdd, Array(0))
cancel(jobId)
assert(failure.getMessage === s"Job $jobId cancelled ")
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty
}
test("job cancellation no-kill backend") {
// make sure that the DAGScheduler doesn't crash when the TaskScheduler
// doesn't implement killTask()
val noKillTaskScheduler = new TaskScheduler() {
override def rootPool: Pool = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
override def start() = {}
override def stop() = {}
override def submitTasks(taskSet: TaskSet) = {
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean) {
throw new UnsupportedOperationException
}
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
override def executorHeartbeatReceived(execId: String, taskMetrics: Array[(Long, TaskMetrics)],
blockManagerId: BlockManagerId): Boolean = true
}
val noKillScheduler = new DAGScheduler(
sc,
noKillTaskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env) {
override def runLocally(job: ActiveJob) {
// don't bother with the thread while unit testing
runLocallyWithinThread(job)
}
}
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(noKillScheduler)
val jobId = submit(new MyRDD(sc, 1, Nil), Array(0))
cancel(jobId)
// Because the job wasn't actually cancelled, we shouldn't have received a failure message.
assert(failure === null)
// When the task set completes normally, state should be correctly updated.
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.isEmpty)
assert(sparkListener.successfulStages.contains(0))
}
test("run trivial shuffle") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
assert(mapOutputTracker.getServerStatuses(shuffleId, 0).map(_._1) ===
Array(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
complete(taskSets(1), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("run trivial shuffle with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// the 2nd ResultTask failed
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"), null)))
// this will get called
// blockManagerMaster.removeExecutor("exec-hostA")
// ask the scheduler to try it again
scheduler.resubmitFailedStages()
// have the 2nd attempt pass
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
// we can see both result blocks now
assert(mapOutputTracker.getServerStatuses(shuffleId, 0).map(_._1.host) === Array("hostA", "hostB"))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty
}
test("trivial shuffle with multiple fetch failures") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getServerStatuses(shuffleId, 0).map(_._1.host) ===
Array("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(CompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null,
Map[Long, Any](),
createFakeTaskInfo(),
null))
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.contains(1))
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(CompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 1, 1, "ignored"),
null,
Map[Long, Any](),
createFakeTaskInfo(),
null))
// The SparkListener should not receive redundant failure events.
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.size == 1)
}
test("ignore late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
// pretend we were told hostA went away
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA"))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
val taskSet = taskSets(0)
// should be ignored for being too old
runEvent(CompletionEvent(taskSet.tasks(0), Success, makeMapStatus("hostA", 1), null, createFakeTaskInfo(), null))
// should work because it's a non-failed host
runEvent(CompletionEvent(taskSet.tasks(0), Success, makeMapStatus("hostB", 1), null, createFakeTaskInfo(), null))
// should be ignored for being too old
runEvent(CompletionEvent(taskSet.tasks(0), Success, makeMapStatus("hostA", 1), null, createFakeTaskInfo(), null))
// should work because it's a new epoch
taskSet.tasks(1).epoch = newEpoch
runEvent(CompletionEvent(taskSet.tasks(1), Success, makeMapStatus("hostA", 1), null, createFakeTaskInfo(), null))
assert(mapOutputTracker.getServerStatuses(shuffleId, 0).map(_._1) ===
Array(makeBlockManagerId("hostB"), makeBlockManagerId("hostA")))
complete(taskSets(1), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty
}
test("run shuffle with map stage failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
// Fail the map stage. This should cause the entire job to fail.
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(failure.getMessage === s"Job aborted due to stage failure: $stageFailureMessage")
// Listener bus should get told about the map stage failing, but not the reduce stage
// (since the reduce stage hasn't been started yet).
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.failedStages.toSet === Set(0))
assertDataStructuresEmpty
}
/**
* Makes sure that failures of stage used by multiple jobs are correctly handled.
*
* This test creates the following dependency graph:
*
* shuffleMapRdd1 shuffleMapRDD2
* | \ |
* | \ |
* | \ |
* | \ |
* reduceRdd1 reduceRdd2
*
* We start both shuffleMapRdds and then fail shuffleMapRdd1. As a result, the job listeners for
* reduceRdd1 and reduceRdd2 should both be informed that the job failed. shuffleMapRDD2 should
* also be cancelled, because it is only used by reduceRdd2 and reduceRdd2 cannot complete
* without shuffleMapRdd1.
*/
test("failure of stage used by two jobs") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, null)
val shuffleMapRdd2 = new MyRDD(sc, 2, Nil)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, null)
val reduceRdd1 = new MyRDD(sc, 2, List(shuffleDep1))
val reduceRdd2 = new MyRDD(sc, 2, List(shuffleDep1, shuffleDep2))
// We need to make our own listeners for this test, since by default submit uses the same
// listener for all jobs, and here we want to capture the failure for each job separately.
class FailureRecordingJobListener() extends JobListener {
var failureMessage: String = _
override def taskSucceeded(index: Int, result: Any) {}
override def jobFailed(exception: Exception) = { failureMessage = exception.getMessage }
}
val listener1 = new FailureRecordingJobListener()
val listener2 = new FailureRecordingJobListener()
submit(reduceRdd1, Array(0, 1), listener=listener1)
submit(reduceRdd2, Array(0, 1), listener=listener2)
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(cancelledStages.toSet === Set(0, 2))
// Make sure the listeners got told about both failed stages.
assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS))
assert(sparkListener.successfulStages.isEmpty)
assert(sparkListener.failedStages.toSet === Set(0, 2))
assert(listener1.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assert(listener2.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assertDataStructuresEmpty
}
test("run trivial shuffle with out-of-band failure and retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// blockManagerMaster.removeExecutor("exec-hostA")
// pretend we were told hostA went away
runEvent(ExecutorLost("exec-hostA"))
// DAGScheduler will immediately resubmit the stage after it appears to have no pending tasks
// rather than marking it is as failed and waiting.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// have hostC complete the resubmitted task
complete(taskSets(1), Seq((Success, makeMapStatus("hostC", 1))))
assert(mapOutputTracker.getServerStatuses(shuffleId, 0).map(_._1) ===
Array(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("recursive shuffle failures") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, null)
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne))
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, null)
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo))
submit(finalRdd, Array(0))
// have the first stage complete normally
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// have the second stage complete normally
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostC", 1))))
// fail the third stage because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDepTwo.shuffleId, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// have DAGScheduler try again
scheduler.resubmitFailedStages()
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 2))))
complete(taskSets(4), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(5), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("cached post-shuffle") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, null)
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne))
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, null)
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo))
submit(finalRdd, Array(0))
cacheLocations(shuffleTwoRdd.id -> 0) = Seq(makeBlockManagerId("hostD"))
cacheLocations(shuffleTwoRdd.id -> 1) = Seq(makeBlockManagerId("hostC"))
// complete stage 2
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// complete stage 1
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// pretend stage 0 failed because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDepTwo.shuffleId, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// DAGScheduler should notice the cached copy of the second shuffle and try to get it rerun.
scheduler.resubmitFailedStages()
assertLocations(taskSets(3), Seq(Seq("hostD")))
// allow hostD to recover
complete(taskSets(3), Seq((Success, makeMapStatus("hostD", 1))))
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
val acc = new Accumulator[Int](0, new AccumulatorParam[Int] {
override def addAccumulator(t1: Int, t2: Int): Int = t1 + t2
override def zero(initialValue: Int): Int = 0
override def addInPlace(r1: Int, r2: Int): Int = {
throw new DAGSchedulerSuiteDummyException
}
})
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
// Run this within a local thread
sc.parallelize(1 to 10, 2).map { item => acc.add(1) }.take(1)
// Make sure we can still run local commands as well as cluster commands.
assert(sc.parallelize(1 to 10, 2).count() === 10)
assert(sc.parallelize(1 to 10, 2).first() === 1)
}
test("misbehaved resultHandler should not crash DAGScheduler and SparkContext") {
val e1 = intercept[SparkDriverExecutionException] {
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
Seq(0),
allowLocal = true,
(part: Int, result: Int) => throw new DAGSchedulerSuiteDummyException)
}
assert(e1.getCause.isInstanceOf[DAGSchedulerSuiteDummyException])
val e2 = intercept[SparkDriverExecutionException] {
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
Seq(0, 1),
allowLocal = false,
(part: Int, result: Int) => throw new DAGSchedulerSuiteDummyException)
}
assert(e2.getCause.isInstanceOf[DAGSchedulerSuiteDummyException])
// Make sure we can still run local commands as well as cluster commands.
assert(sc.parallelize(1 to 10, 2).count() === 10)
assert(sc.parallelize(1 to 10, 2).first() === 1)
}
test("accumulator not calculated for resubmitted result stage") {
//just for register
val accum = new Accumulator[Int](0, AccumulatorParam.IntAccumulatorParam)
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(Accumulators.originals(accum.id).value === 1)
assertDataStructuresEmpty
}
/**
* Assert that the supplied TaskSet has exactly the given hosts as its preferred locations.
* Note that this checks only the host and not the executor ID.
*/
private def assertLocations(taskSet: TaskSet, hosts: Seq[Seq[String]]) {
assert(hosts.size === taskSet.tasks.size)
for ((taskLocs, expectedLocs) <- taskSet.tasks.map(_.preferredLocations).zip(hosts)) {
assert(taskLocs.map(_.host) === expectedLocs)
}
}
private def makeMapStatus(host: String, reduces: Int): MapStatus =
MapStatus(makeBlockManagerId(host), Array.fill[Long](reduces)(2))
private def makeBlockManagerId(host: String): BlockManagerId =
BlockManagerId("exec-" + host, host, 12345)
private def assertDataStructuresEmpty = {
assert(scheduler.activeJobs.isEmpty)
assert(scheduler.failedStages.isEmpty)
assert(scheduler.jobIdToActiveJob.isEmpty)
assert(scheduler.jobIdToStageIds.isEmpty)
assert(scheduler.stageIdToStage.isEmpty)
assert(scheduler.runningStages.isEmpty)
assert(scheduler.shuffleToMapStage.isEmpty)
assert(scheduler.waitingStages.isEmpty)
assert(scheduler.outputCommitCoordinator.isEmpty)
}
// Nothing in this test should break if the task info's fields are null, but
// OutputCommitCoordinator requires the task info itself to not be null.
private def createFakeTaskInfo(): TaskInfo = {
val info = new TaskInfo(0, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1 // to prevent spurious errors in JobProgressListener
info
}
}
| Dax1n/spark-core | core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala | Scala | apache-2.0 | 31,959 |
package object scala {
type DeprecatedOverriding = deprecatedOverriding
type DeprecatedInheritance = deprecatedInheritance
}
| sgkim126/snippets | scala/deprecatedAnnotations/DeprecatedAnnotations.scala | Scala | bsd-2-clause | 129 |
/*
* Copyright 2015 herd contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.herd
import java.net.URI
import org.apache.commons.io.FilenameUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.util.Progressable
/** Provides a minimal shim to emulate an actual S3A filesystem implementation for unit testing */
class MockS3AFileSystem extends FileSystem {
private val local = new LocalFileSystem()
private var uri: URI = _
override def rename(path: Path, path1: Path): Boolean = {
local.rename(localizePath(path), localizePath(path1))
}
override def listStatus(path: Path): Array[FileStatus] = {
local.listStatus(localizePath(path))
}
override def append(path: Path, i: Int, progressable: Progressable): FSDataOutputStream = {
local.append(localizePath(path), i, progressable)
}
override def delete(path: Path, b: Boolean): Boolean = {
local.delete(localizePath(path), b)
}
override def setWorkingDirectory(path: Path): Unit = {
local.setWorkingDirectory(localizePath(path))
}
override def mkdirs(path: Path, fsPermission: FsPermission): Boolean = {
local.mkdirs(localizePath(path), fsPermission)
}
override def getWorkingDirectory: Path = local.getWorkingDirectory
override def open(path: Path, i: Int): FSDataInputStream = {
local.open(localizePath(path), i)
}
override def create(path: Path, fsPermission: FsPermission, b: Boolean, i: Int, i1: Short, l: Long, progressable: Progressable): FSDataOutputStream = {
local.create(localizePath(path), fsPermission, b, i, i1, l, progressable)
}
override def initialize(name: URI, conf: Configuration): Unit = {
super.initialize(name, conf)
uri = URI.create(name.getScheme + "://" + name.getAuthority)
local.initialize(URI.create("file://" + FilenameUtils.separatorsToUnix(System.getProperty("user.dir"))), conf)
}
override def getScheme(): String = "s3a"
override def getUri: URI = uri
override def getFileStatus(path: Path): FileStatus = {
local.getFileStatus(localizePath(path))
}
private def localizePath(path: Path): Path = new Path(uri.relativize(path.toUri))
}
| FINRAOS/herd | herd-code/herd-tools/herd-spark-data-source/src/test/scala/org/apache/spark/sql/herd/MockS3AFilesystem.scala | Scala | apache-2.0 | 2,773 |
object test {
val b = List(1, 2, 3);
def main(args: Array[String]) =
Console.println(
b match {
case List(1, 2, 3) => true;
case _ => false;
}
)
}
| yusuke2255/dotty | tests/pending/pos/seqtest2.scala | Scala | bsd-3-clause | 179 |
package sangria.execution.deferred
import java.util.concurrent.atomic.AtomicInteger
import sangria.ast
import sangria.ast.Document
import sangria.execution.{DeferredWithInfo, Executor}
import sangria.macros._
import sangria.schema._
import sangria.util.{FutureResultSupport, Pos}
import sangria.util.SimpleGraphQlSupport._
import scala.concurrent.{ExecutionContext, Future}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class DeferredResolverSpec extends AnyWordSpec with Matchers with FutureResultSupport {
def deferredResolver(implicit ec: ExecutionContext) = {
case class LoadCategories(ids: Seq[String]) extends Deferred[Seq[String]]
lazy val CategoryType: ObjectType[Unit, String] = ObjectType(
"Category",
() =>
fields[Unit, String](
Field("name", StringType, resolve = c => s"Cat ${c.value}"),
Field("descr", StringType, resolve = c => s"Cat ${c.value} descr"),
Field("self", CategoryType, resolve = c => c.value),
Field("selfFut", CategoryType, resolve = c => Future(c.value)),
Field(
"selfFutComplex",
CategoryType,
complexity = Some((_, _, _) => 1000),
resolve = c => Future(c.value)),
Field(
"children",
ListType(CategoryType),
arguments = Argument("count", IntType) :: Nil,
resolve = c => LoadCategories((1 to c.arg[Int]("count")).map(i => s"${c.value}.$i"))
),
Field(
"childrenComplex",
ListType(CategoryType),
complexity = Some((_, _, _) => 1000),
arguments = Argument("count", IntType) :: Nil,
resolve = c => LoadCategories((1 to c.arg[Int]("count")).map(i => s"${c.value}.$i"))
),
Field(
"childrenFut",
ListType(CategoryType),
arguments = Argument("count", IntType) :: Nil,
resolve = c =>
DeferredFutureValue(
Future.successful(LoadCategories((1 to c.arg[Int]("count")).map(i =>
s"${c.value}.$i"))))
)
)
)
val QueryType = ObjectType(
"Query",
fields[Unit, Unit](
Field(
"root",
CategoryType,
resolve = _ => DeferredValue(LoadCategories(Seq("root"))).map(_.head)),
Field(
"rootFut",
CategoryType,
resolve =
_ => DeferredFutureValue(Future.successful(LoadCategories(Seq("root")))).map(_.head)),
Field(
"fail1",
OptionType(CategoryType),
resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head)),
Field(
"fail2",
OptionType(CategoryType),
resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head))
)
)
val MutationType = ObjectType(
"Mutation",
fields[Unit, Unit](
Field(
"root",
OptionType(CategoryType),
resolve = _ => DeferredValue(LoadCategories(Seq("root"))).map(_.head)),
Field(
"fail1",
OptionType(CategoryType),
resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head)),
Field(
"fail2",
OptionType(CategoryType),
resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head))
)
)
class MyDeferredResolver extends DeferredResolver[Any] {
val callsCount = new AtomicInteger(0)
val valueCount = new AtomicInteger(0)
override val includeDeferredFromField
: Option[(Field[_, _], Vector[ast.Field], Args, Double) => Boolean] =
Some((_, _, _, complexity) => complexity < 100)
override def groupDeferred[T <: DeferredWithInfo](deferred: Vector[T]) = {
val (expensive, cheap) = deferred.partition(_.complexity > 100)
Vector(expensive, cheap)
}
def resolve(deferred: Vector[Deferred[Any]], ctx: Any, queryState: Any)(implicit
ec: ExecutionContext) = {
callsCount.getAndIncrement()
valueCount.addAndGet(deferred.size)
deferred.map {
case LoadCategories(ids) if ids contains "fail" =>
Future.failed(new IllegalStateException("foo"))
case LoadCategories(ids) => Future.successful(ids)
}
}
}
val schema = Schema(QueryType, Some(MutationType))
def exec(query: Document) = {
val resolver = new MyDeferredResolver
val result = Executor.execute(schema, query, deferredResolver = resolver).await
resolver -> result
}
"result in a single resolution of once level" in {
val query =
graphql"""
{
root {
name
children(count: 5) {
children(count: 5) {
children(count: 5) {
children(count: 5) {
children(count: 5) {
name
}
}
childrenFut(count: 2) {
children(count: 2) {
name
}
}
self {
children(count: 3) {
children(count: 3) {
name
}
}
}
selfFut {
children(count: 3) {
children(count: 3) {
name
}
}
}
}
}
}
}
}
"""
val (resolver, _) = exec(query)
resolver.callsCount.get should be(6)
resolver.valueCount.get should be(2157)
}
"do not wait for future values" in {
val query =
graphql"""
{
root {
name
children(count: 3) {
s1: selfFutComplex {
children(count: 5) {
children(count: 5) {
name
}
}
}
s2: selfFutComplex {
children(count: 5) {
children(count: 5) {
name
}
}
}
selfFut {
children(count: 5) {
children(count: 5) {
name
}
}
}
selfFut {
children(count: 5) {
children(count: 5) {
name
}
}
}
}
}
}
"""
val (resolver, _) = exec(query)
resolver.callsCount.get should be(16)
resolver.valueCount.get should be(56)
}
"Group complex/expensive deferred values together" in {
val query =
graphql"""
{
rootFut {
name
c1: childrenComplex(count: 5) {
self {
childrenFut(count: 5) {
name
}
}
}
c2: childrenComplex(count: 5) {
self {
childrenFut(count: 5) {
name
}
}
}
childrenFut(count: 5) {
self {
childrenFut(count: 5) {
name
}
}
}
}
}
"""
val (resolver, r) = exec(query)
resolver.callsCount.get should be(5)
resolver.valueCount.get should be(19)
}
"failed queries should be handled appropriately" in checkContainsErrors(
schema,
(),
"""
{
fail1 {name}
root {name}
fail2 {name}
}
""",
Map("fail1" -> null, "root" -> Map("name" -> "Cat root"), "fail2" -> null),
List("foo" -> List(Pos(3, 11)), "foo" -> List(Pos(5, 11))),
resolver = new MyDeferredResolver
)
"failed mutations should be handled appropriately" in checkContainsErrors(
schema,
(),
"""
mutation {
fail1 {name}
root {name}
fail2 {name}
}
""",
Map("fail1" -> null, "root" -> Map("name" -> "Cat root"), "fail2" -> null),
List("foo" -> List(Pos(3, 11)), "foo" -> List(Pos(5, 11))),
resolver = new MyDeferredResolver
)
}
"DeferredResolver" when {
"using standard execution context" should {
behave.like(deferredResolver(ExecutionContext.Implicits.global))
}
"using sync execution context" should {
behave.like(deferredResolver(sync.executionContext))
}
}
}
| OlegIlyenko/sangria | modules/core/src/test/scala/sangria/execution/deferred/DeferredResolverSpec.scala | Scala | apache-2.0 | 9,020 |
package xitrum.sockjs
import scala.collection.mutable.ArrayBuffer
import akka.actor.{Actor, ActorRef, ReceiveTimeout, Terminated}
import scala.concurrent.duration._
import xitrum.{Action, Config, SockJsText}
// There are 2 kinds of non-WebSocket client: receiver and sender
// receiver/sender client <-> NonWebSocketSessionActor <-> SockJsAction
// (See SockJsActions.scala)
//
// For WebSocket:
// receiver/sender client <-> SockJsAction
case object SubscribeFromReceiverClient
case object AbortFromReceiverClient
case class MessagesFromSenderClient(messages: Seq[String])
case class MessageFromHandler(index: Int, message: String)
case class CloseFromHandler(index: Int)
case object SubscribeResultToReceiverClientAnotherConnectionStillOpen
case object SubscribeResultToReceiverClientClosed
case class SubscribeResultToReceiverClientMessages(messages: Seq[String])
case object SubscribeResultToReceiverClientWaitForMessage
case class NotificationToReceiverClientMessage(index: Int, message: String, handler: ActorRef)
case class NotificationToReceiverClientClosed(index: Int, handler: ActorRef)
case object NotificationToReceiverClientHeartbeat
case class NotificationToHandlerChannelCloseSuccess(index: Int)
case class NotificationToHandlerChannelCloseFailure(index: Int)
case class NotificationToHandlerChannelWriteSuccess(index: Int)
case class NotificationToHandlerChannelWriteFailure(index: Int)
object NonWebSocketSession {
// The session must time out after 5 seconds of not having a receiving connection
// http://sockjs.github.com/sockjs-protocol/sockjs-protocol-0.3.3.html#section-46
private val TIMEOUT_CONNECTION = 5.seconds
private val TIMEOUT_CONNECTION_MILLIS = TIMEOUT_CONNECTION.toMillis
}
/**
* There should be at most one subscriber:
* http://sockjs.github.com/sockjs-protocol/sockjs-protocol-0.3.3.html
*
* To avoid out of memory, the actor is stopped when there's no subscriber
* for a long time. Timeout is also used to check if there's no message
* for subscriber for a long time.
* See TIMEOUT_CONNECTION and TIMEOUT_HEARTBEAT in NonWebSocketSessions.
*/
class NonWebSocketSession(var receiverCliento: Option[ActorRef], pathPrefix: String, action: Action) extends Actor {
import NonWebSocketSession._
private[this] var sockJsActorRef: ActorRef = _
// Messages from handler to client are buffered here
private[this] val bufferForClientSubscriber = ArrayBuffer.empty[String]
// ReceiveTimeout may not occurred if there's frequent Publish, thus we
// need to manually check if there's no subscriber for a long time.
// lastSubscribedAt must be Long to avoid Integer overflow, beacuse
// System.currentTimeMillis() is used.
private[this] var lastSubscribedAt = 0L
// Until the timeout occurs, the server must constantly serve the close message
private[this] var closed = false
override def preStart() {
// Attach sockJsActorRef to the current actor, so that sockJsActorRef is
// automatically stopped when the current actor stops
sockJsActorRef = Config.routes.sockJsRouteMap.createSockJsAction(context, pathPrefix)
context.watch(sockJsActorRef)
sockJsActorRef ! (self, action)
lastSubscribedAt = System.currentTimeMillis()
// At start (see constructor), there must be a receiver client
val receiverClient = receiverCliento.get
// Unsubscribed when stopped
context.watch(receiverClient)
// Will be set to TIMEOUT_CONNECTION when the receiver client stops
context.setReceiveTimeout(SockJsAction.TIMEOUT_HEARTBEAT)
}
private def unwatchAndStop() {
receiverCliento.foreach(context.unwatch)
context.unwatch(sockJsActorRef)
context.stop(sockJsActorRef)
context.stop(self)
}
def receive = {
// When non-WebSocket receiverClient stops normally after sending data to
// browser, we need to wait for TIMEOUT_CONNECTION amount of time for the
// to reconnect. Non-streaming client disconnects everytime. Note that for
// browser to do garbage collection, streaming client also disconnects after
// sending a large amount of data (4KB in test mode).
//
// See also AbortFromReceiverClient below.
case Terminated(monitored) =>
if (monitored == sockJsActorRef && !closed) {
// See CloseFromHandler
unwatchAndStop()
} else if (receiverCliento == Some(monitored)) {
context.unwatch(monitored)
receiverCliento = None
context.setReceiveTimeout(TIMEOUT_CONNECTION)
}
// Similar to Terminated but no TIMEOUT_CONNECTION is needed
case AbortFromReceiverClient =>
unwatchAndStop()
case SubscribeFromReceiverClient =>
val s = sender()
if (closed) {
s ! SubscribeResultToReceiverClientClosed
} else {
lastSubscribedAt = System.currentTimeMillis()
if (receiverCliento.isEmpty) {
receiverCliento = Some(s)
context.watch(s)
context.setReceiveTimeout(SockJsAction.TIMEOUT_HEARTBEAT)
if (bufferForClientSubscriber.isEmpty) {
s ! SubscribeResultToReceiverClientWaitForMessage
} else {
s ! SubscribeResultToReceiverClientMessages(bufferForClientSubscriber.toList)
bufferForClientSubscriber.clear()
}
} else {
s ! SubscribeResultToReceiverClientAnotherConnectionStillOpen
}
}
case CloseFromHandler(index) =>
// Until the timeout occurs, the server must serve the close message
closed = true
receiverCliento.foreach { receiverClient =>
receiverClient ! NotificationToReceiverClientClosed(index, sockJsActorRef)
context.unwatch(receiverClient)
receiverCliento = None
context.setReceiveTimeout(TIMEOUT_CONNECTION)
}
case MessagesFromSenderClient(messages) =>
if (!closed) messages.foreach { msg => sockJsActorRef ! SockJsText(msg) }
case MessageFromHandler(index, message) =>
if (!closed) {
receiverCliento match {
case None =>
// Stop if there's no subscriber for a long time
val now = System.currentTimeMillis()
if (now - lastSubscribedAt > TIMEOUT_CONNECTION_MILLIS)
unwatchAndStop()
else
bufferForClientSubscriber.append(message)
case Some(receiverClient) =>
// buffer is empty at this moment, because receiverCliento is not empty
receiverClient ! NotificationToReceiverClientMessage(index, message, sockJsActorRef)
}
}
case ReceiveTimeout =>
if (closed || receiverCliento.isEmpty) {
// Closed or no subscriber for a long time
unwatchAndStop()
} else {
// No message for subscriber for a long time
receiverCliento.get ! NotificationToReceiverClientHeartbeat
}
}
}
| georgeOsdDev/xitrum | src/main/scala/xitrum/sockjs/NonWebSocketSession.scala | Scala | mit | 6,865 |
package com.adendamedia.salad.api
import io.lettuce.core.RedisFuture
import scala.compat.java8.FutureConverters._
import scala.concurrent.{ExecutionContext, Future}
import scala.language.implicitConversions
import scala.util.{Failure, Success, Try}
object ImplicitFutureConverters {
/**
* Implicitly convert Future Java types into Future Scala types.
* Implicit conversions chain as follows:
* RedisFuture[JavaType] -> Future[JavaType] -> Future[ScalaType]
*/
implicit def CompletionStageToFuture[J](in: RedisFuture[J]): Future[J] =
in.toScala
implicit def RedisFutureJavaBooleanToFutureScalaBoolean(in: RedisFuture[java.lang.Boolean])
(implicit executionContext: ExecutionContext)
: Future[Boolean] =
in.toScala
implicit def FutureJavaBooleanToFutureScalaBoolean(in: Future[java.lang.Boolean])
(implicit executionContext: ExecutionContext)
: Future[Boolean] =
in.map(_ == true)
implicit def RedisFutureJavaLongToFutureScalaBoolean(in: RedisFuture[java.lang.Long])
(implicit executionContext: ExecutionContext)
: Future[Boolean] =
in.toScala
implicit def FutureJavaLongToFutureScalaBoolean(in: Future[java.lang.Long])
(implicit executionContext: ExecutionContext)
: Future[Boolean] =
in.map(_ == 1)
implicit def RedisFutureJavaLongToFutureScalaLong(in: RedisFuture[java.lang.Long])
(implicit executionContext: ExecutionContext)
: Future[Long] =
in.toScala
implicit def FutureJavaLongToFutureScalaLong(in: Future[java.lang.Long])
(implicit executionContext: ExecutionContext)
: Future[Long] =
in.map(_.toLong)
/**
* These implicits are apt to cause compiler problems so they are implemented as wrappers that
* must be invoked manually.
* ie. saladAPI.api.clusterReplicate(poorestMaster).isOK
*/
// Ensure that unchecked exceptions can be mapped over.
implicit class EnsureExceptionsMappable[J](in: Try[RedisFuture[J]]) {
def toFuture: Future[J] = in match {
case Success(future) => future
case Failure(t) => Future.failed(t)
}
}
// For simple-string-reply, we get either success or an exception
// which maps to either Future.success or Future.failed
implicit class FutureSimpleStringReply(in: Future[String]) {
def isOK(implicit executionContext: ExecutionContext)
: Future[Unit] = in.map {
case "OK" => Future.successful(Unit)
case err => Future.failed(new Exception(err))
}
}
}
| adenda/salad | src/main/scala/com/adendamedia/salad/api/ImplicitFutureConverters.scala | Scala | lgpl-3.0 | 2,752 |
package business
import java.sql.SQLException
import models.Profile
import org.mindrot.jbcrypt.BCrypt
import test.core.BaseTestSpec
import scala.util.{Failure, Success}
/**
* Created by justin on 3/2/15.
*/
class ProfileManagerTest extends BaseTestSpec {
val profileManager: ProfileManager = new ProfileManager with TestDaoTrait
val testProfile = Profile(Some(1), "AAAA", "asdf", "[email protected]", false)
val dbConnectionError = "Error connecting to the db"
"Checking if user exists" should "return true if username is found" in {
(profileManager.profiles.byUsername _) expects("AAAA") returning (Success(Some(testProfile)))
assert(profileManager.userExists("AAAA").get)
}
it should "return false if username is not found" in {
(profileManager.profiles.byUsername _) expects("AAAA") returning(Success(None))
assert(!profileManager.userExists("AAAA").get)
}
it should "log and return None should there be a failure calling for profile by username" in {
(profileManager.profiles.byUsername _) expects("AAAA") returning(Failure(new SQLException(dbConnectionError)))
profileManager.userExists("AAAA") should be (None)
}
it should "return true if user id is found" in {
(profileManager.profiles.byId _) expects(1l) returning(Success(Some(testProfile)))
assert(profileManager.userExists(1).get)
}
it should "return false if user id is not found" in {
(profileManager.profiles.byId _) expects(1l) returning(Success(None))
assert(!profileManager.userExists(1).get)
}
it should "log and return None should there be a failure calling for profile by id" in {
(profileManager.profiles.byId _) expects(1l) returning(Failure(new SQLException(dbConnectionError)))
profileManager.userExists(1) should be (None)
}
"Create user profile" should "be able to create a new user profile" in {
val actual = successfulCreateUserBase()
actual.username should be ("tom")
actual.email should be ("[email protected]")
}
it should "be able to create a new user profile as an admin" in {
val actual = successfulCreateUserBase(true)
actual.isAdmin should be (true)
}
it should "block creation if user already exists" in {
(profileManager.profiles.byUsername _) expects("tom") returning(Success(Some(testProfile)))
profileManager.createUser("tom", "1234", "[email protected]", false) should be (null)
}
it should "strip the password from the returning object" in {
val actual = successfulCreateUserBase()
actual.password should be (null)
}
it should "return null if None was returned from the user existance check" in {
(profileManager.profiles.byUsername _) expects("AAAA") returning(Failure(new SQLException(dbConnectionError)))
profileManager.createUser("AAAA", "", "", false) should be (null)
}
it should "return null if there was a db failure while inserting profile" in {
(profileManager.profiles.byUsername _) expects("AAAA") returning(Success(None))
(profileManager.profiles.+= _) expects(*) returning(Failure(new SQLException(dbConnectionError)))
profileManager.createUser("AAAA", "", "", false) should be (null)
}
"A login" should "be able to occur successfully if the moons are aligned" in {
val expectedResponse = Profile(Some(1), "tom", BCrypt.hashpw("1234", BCrypt.gensalt(4)), "[email protected]", false)
(profileManager.profiles.byUsername _) expects("tom") returning(Success(Some(expectedResponse)))
profileManager.attemptLogin("tom", "1234") should not be null
}
it should "fail if the user lookup result is null" in {
(profileManager.profiles.byUsername _) expects("tom") returning(Success(None))
profileManager.attemptLogin("tom", "1234") should be (null)
}
it should "fail if the passwords do not match" in {
val expectedResponse = Profile(Some(1), "tom", BCrypt.hashpw("1234", BCrypt.gensalt(4)), "[email protected]", false)
(profileManager.profiles.byUsername _) expects("tom") returning(Success(Some(expectedResponse)))
profileManager.attemptLogin("tom", "1235") should be (null)
}
it should "fail, log error, and return null if there is a db error calling profiles by username" in {
(profileManager.profiles.byUsername _) expects(*) returning(Failure(new SQLException(dbConnectionError)))
profileManager.attemptLogin("tom", "1234") should be (null)
}
"Count profiles" should "be able to return a count of the profiles" in {
(profileManager.profiles.size _) expects() returning(Success(100))
profileManager.countProfiles should be (100)
}
it should "return -1 and log to fail gracefully" in {
(profileManager.profiles.size _) expects() returning(Failure(new SQLException(dbConnectionError)))
profileManager.countProfiles should be (-1)
}
"Query Profile by id" should "return a profile in Option if a profile is found using id" in {
(profileManager.profiles.byId _) expects(1l) returning(Success(Some(testProfile)))
profileManager.queryUserProfileById(1) should be (Some(testProfile))
}
it should "return None if a profile is not found using id" in {
(profileManager.profiles.byId _) expects(*) returning(Success(None))
profileManager.queryUserProfileById(1) should be (None)
}
it should "log and return None should there be a failure calling the db using id" in {
(profileManager.profiles.byId _) expects(*) returning(Failure(new SQLException(dbConnectionError)))
profileManager.queryUserProfileById(1) should be (None)
}
"Query Profile by username" should "return a profile in Option if a profile is found using username" in {
(profileManager.profiles.byUsername _) expects("AAAA") returning(Success(Some(testProfile)))
profileManager.queryUserProfileByUsername("AAAA") should be (Some(testProfile))
}
it should "return None if a profile is found using username" in {
(profileManager.profiles.byUsername _) expects(*) returning(Success(None))
profileManager.queryUserProfileByUsername("AAAA") should be (None)
}
it should "log and return None should there be a failure calling the db using username" in {
(profileManager.profiles.byUsername _) expects(*) returning(Failure(new SQLException(dbConnectionError)))
profileManager.queryUserProfileByUsername("AAAA") should be (None)
}
"Updating user's admin status" should "return true and null if logged in user is admin and separate from updatee and db call successful" in {
(profileManager.profiles.updateAdminStatus _) expects(1l, true) returning(Success(1))
profileManager.updateUserAdminStatus(1, true, Some(Profile(Some(2), null, null, null, true))) should be(true, null)
}
it should "fail to update user admin status when the user to be updated is the user logged in" in {
profileManager.updateUserAdminStatus(1, true, Some(Profile(Some(1), null, null, null, true))) should be(false, profileManager.currentUserIsUserBeingUpdatedError)
}
it should "fail to update user admin status if there is no logged in user" in {
profileManager.updateUserAdminStatus(1, true, None) should be(false, profileManager.userNotSignedInError)
}
it should "fail to update user admin status if the logged in user isn't admin" in {
profileManager.updateUserAdminStatus(1, true, Some(Profile(Some(2), null, null, null, false))) should be(false, profileManager.loggedInUserNotAdminError)
}
it should "fail to update user admin status if the requested user isn't found" in {
(profileManager.profiles.updateAdminStatus _) expects(1l, true) returning(Success(0))
profileManager.updateUserAdminStatus(1, true, Some(Profile(Some(2), null, null, null, true))) should be(false, profileManager.userNotFoundError)
}
it should "fail to update user admin status and log error if the database has an error" in {
(profileManager.profiles.updateAdminStatus _) expects(1l, true) returning(Failure(new SQLException(dbConnectionError)))
profileManager.updateUserAdminStatus(1, true, Some(Profile(Some(2), null, null, null, true))) should be(false, profileManager.dbError)
}
def successfulCreateUserBase(isAdmin: Boolean=false) = {
(profileManager.profiles.byUsername _) expects("tom") returning(Success(None))
(profileManager.profiles += _) expects(Profile(None, "tom", "1234", "[email protected]", isAdmin)) returning(Success(1))
profileManager.createUser("tom", "1234", "[email protected]", isAdmin)
}
}
| maximx1/lecarton | test/business/ProfileManagerTest.scala | Scala | mit | 8,428 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReferences
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateOrdering
import org.apache.spark.sql.types._
/**
* A base class for generated/interpreted row ordering.
*/
class BaseOrdering extends Ordering[InternalRow] {
def compare(a: InternalRow, b: InternalRow): Int = {
throw new UnsupportedOperationException
}
}
/**
* An interpreted row ordering comparator.
*/
class InterpretedOrdering(ordering: Seq[SortOrder]) extends BaseOrdering {
def this(ordering: Seq[SortOrder], inputSchema: Seq[Attribute]) =
this(bindReferences(ordering, inputSchema))
override def compare(a: InternalRow, b: InternalRow): Int = {
var i = 0
val size = ordering.size
while (i < size) {
val order = ordering(i)
val left = order.child.eval(a)
val right = order.child.eval(b)
if (left == null && right == null) {
// Both null, continue looking.
} else if (left == null) {
return if (order.nullOrdering == NullsFirst) -1 else 1
} else if (right == null) {
return if (order.nullOrdering == NullsFirst) 1 else -1
} else {
val comparison = order.dataType match {
case dt: AtomicType if order.direction == Ascending =>
dt.ordering.asInstanceOf[Ordering[Any]].compare(left, right)
case dt: AtomicType if order.direction == Descending =>
dt.ordering.asInstanceOf[Ordering[Any]].reverse.compare(left, right)
case a: ArrayType if order.direction == Ascending =>
a.interpretedOrdering.asInstanceOf[Ordering[Any]].compare(left, right)
case a: ArrayType if order.direction == Descending =>
a.interpretedOrdering.asInstanceOf[Ordering[Any]].reverse.compare(left, right)
case s: StructType if order.direction == Ascending =>
s.interpretedOrdering.asInstanceOf[Ordering[Any]].compare(left, right)
case s: StructType if order.direction == Descending =>
s.interpretedOrdering.asInstanceOf[Ordering[Any]].reverse.compare(left, right)
case other =>
throw new IllegalArgumentException(s"Type $other does not support ordered operations")
}
if (comparison != 0) {
return comparison
}
}
i += 1
}
0
}
}
object InterpretedOrdering {
/**
* Creates a [[InterpretedOrdering]] for the given schema, in natural ascending order.
*/
def forSchema(dataTypes: Seq[DataType]): InterpretedOrdering = {
new InterpretedOrdering(dataTypes.zipWithIndex.map {
case (dt, index) => SortOrder(BoundReference(index, dt, nullable = true), Ascending)
})
}
}
object RowOrdering extends CodeGeneratorWithInterpretedFallback[Seq[SortOrder], BaseOrdering] {
/**
* Returns true iff the data type can be ordered (i.e. can be sorted).
*/
def isOrderable(dataType: DataType): Boolean = dataType match {
case NullType => true
case dt: AtomicType => true
case struct: StructType => struct.fields.forall(f => isOrderable(f.dataType))
case array: ArrayType => isOrderable(array.elementType)
case udt: UserDefinedType[_] => isOrderable(udt.sqlType)
case _ => false
}
/**
* Returns true iff outputs from the expressions can be ordered.
*/
def isOrderable(exprs: Seq[Expression]): Boolean = exprs.forall(e => isOrderable(e.dataType))
override protected def createCodeGeneratedObject(in: Seq[SortOrder]): BaseOrdering = {
GenerateOrdering.generate(in)
}
override protected def createInterpretedObject(in: Seq[SortOrder]): BaseOrdering = {
new InterpretedOrdering(in)
}
def create(order: Seq[SortOrder], inputSchema: Seq[Attribute]): BaseOrdering = {
createObject(bindReferences(order, inputSchema))
}
/**
* Creates a row ordering for the given schema, in natural ascending order.
*/
def createNaturalAscendingOrdering(dataTypes: Seq[DataType]): BaseOrdering = {
val order: Seq[SortOrder] = dataTypes.zipWithIndex.map {
case (dt, index) => SortOrder(BoundReference(index, dt, nullable = true), Ascending)
}
create(order, Seq.empty)
}
}
| goldmedal/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala | Scala | apache-2.0 | 5,091 |
package synthesis
//dummy
object APAInputAssignments
/** This object offers global methods methods to deal with input assignments.
*/
object InputAssignment {
//Combines input sentences
def listToCommonString(input_assignment:List[InputAssignment], indent:String):String = {
val prog_input = input_assignment map (_.toCommonString(indent)) match {
case Nil => ""
case l => l reduceLeft {(t1, t2) => (t1 + "\\n" + t2)}
}
prog_input
}
}
/** An input assignment is a way to assign some expressions to input variables
* like in "val a = b/2+c", where a, b and c are input variables.
*/
sealed abstract class InputAssignment {
/** Returns a list of input variables contained in the expression of this input assignment */
def input_variables: List[InputVar]
/** Extracts a non-exhaustive list of simple assignments of InputTerms to InputVars. */
def extract:List[(InputVar, APAInputTerm)]
/** Returns a string representing this assignment under the current rendering mode. */
def toCommonString(indent: String):String = APASynthesis.rendering_mode match {
case RenderingScala() => toScalaString(indent)
case RenderingPython() => toPythonString(indent)
}
/** Returns a scala string representing the variables on the left of <code>val ... = ...</code> */
def varToScalaString = this match {
case SingleInputAssignment(i, t) => i.name
case BezoutInputAssignment(vl, tl) => "List(" + (vl map { l => "List(" + (l map (_.name) reduceLeft (_+","+_)) + ")"} reduceLeft (_+","+_)) + ")"
}
/** Returns a scala string representing the value on the right of <code>val ... = ...</code> */
def valToScalaString = this match {
case SingleInputAssignment(i, t) => t
case BezoutInputAssignment(vl, tl) => "Common.bezoutWithBase(1, "+(tl map (_.toString) reduceLeft (_+", "+_))+")"
}
/** Returns a python string representing the variables on the left of <code>val ... = ...</code> */
def varToPythonString = this match {
case SingleInputAssignment(i, t) => i.name
case BezoutInputAssignment(vl, tl) => "(" + (vl map { l => "(" + (l map (_.name) reduceLeft (_+","+_)) + ")"} reduceLeft (_+","+_)) + ")"
}
/** Returns a python string representing the value on the right of <code>val ... = ...</code> */
def valToPythonString = this match {
case SingleInputAssignment(i, t) => t
case BezoutInputAssignment(vl, tl) => "bezoutWithBase(1, "+(tl map (_.toString) reduceLeft (_+", "+_))+")"
}
/** Returns the whole assignment as a scala string */
def toScalaString(indent: String): String = {
indent+"val "+ varToScalaString + " = " + valToScalaString
}
/** Returns the whole assignment as a python string */
def toPythonString(indent: String): String = {
indent+ varToPythonString + " = " + valToPythonString
}
/** Returns the assignment were all input variables have been replaced by corresponding input terms. */
def replaceList(l : List[(InputVar, APAInputTerm)]):List[InputAssignment]
/** Returns the assignment were the sign abstraction s is applied to each occurence of t1 */
def assumeSignInputTerm(t1: APAInputTerm, s: SignAbstraction):InputAssignment
}
// A simple assignment corresponding to <code>val v = t</code>
case class SingleInputAssignment(v: InputVar, t: APAInputTerm) extends InputAssignment {
def input_variables = List(v)
def extract = List((v, t))
def replaceList(l : List[(InputVar, APAInputTerm)]) = List(SingleInputAssignment(v, t.replaceList(l)))
def assumeSignInputTerm(t1: APAInputTerm, s: SignAbstraction) = SingleInputAssignment(v, t.assumeSignInputTerm(t1, s))
}
// A complex Bézout assignemnt corresponding to <code>val (v1::v2::Nil)::(v3::v4::Nil)::Nil = Common.bezoutWithBase(1, t1, t2)</code>
case class BezoutInputAssignment(v: List[List[InputVar]], t: List[APAInputTerm]) extends InputAssignment {
def input_variables = v.flatten : List[InputVar]
def extract = Nil
def replaceList(l: List[(InputVar, APAInputTerm)]) = BezoutInputAssignment(v, t map (_.replaceList(l))).simplified
/** Returns a simplified version of the assignment as a list of input assignments. */
/** Simplification occurs if some coefficients are equal to 1 or -1, or in other simple cases. */
def simplified: List[InputAssignment] = {
t map (_.simplified) match {
case t if t forall {
case APAInputCombination(i, Nil) => true
case _ => false
} =>
val bezout_coefs:List[Int] = t map {
case APAInputCombination(i, Nil) => i
case t => throw new Exception("Theoretically unreachable section : "+t+" should be an integer")
}
// Double zip and add all assignments to variables
val assignments: List[(InputVar, APAInputTerm)] = (
(v zip Common.bezoutWithBase(1, bezout_coefs)) map {
case (l1, l2) => l1 zip (
l2 map {
case i => APAInputCombination(i)
}
)
}
).flatten
val assignment_converted = assignments.map({ case (v, t) => SingleInputAssignment(v, t)})
assignment_converted
case a::Nil => // This corresponds to equations of the type 1+a*v = 0. If there is a solution, it is exactly -a (a has to be equal to 1 or -1)
val List(List(iv)) = v
List(SingleInputAssignment(iv, -a))
case a::b::Nil => // This corresponds to equations of the type 1+a*u+b*v = 0
// There is an optimization if either a or b has an absolute value of 1.
(a, b) match {
case (APAInputCombination(i, Nil), b) if Math.abs(i) == 1 =>
// case 1 + i*u + a*v == 0
val index_b = 2
var map_index_term = Map[Int, APAInputTerm]() + (index_b -> b)
val new_ints = Common.bezoutWithBase(1, i, index_b)
val assignments = convertAssignments(v, new_ints, map_index_term)
val assignment_converted = assignments.map({ case (v, t) => SingleInputAssignment(v, t)})
assignment_converted
case (a, APAInputCombination(j, Nil)) if Math.abs(j) == 1 =>
val index_a = 2
var map_index_term = Map[Int, APAInputTerm]() + (index_a -> a)
val new_ints = Common.bezoutWithBase(1, index_a, j)
val assignments = convertAssignments(v, new_ints, map_index_term)
val assignment_converted = assignments.map({ case (v, t) => SingleInputAssignment(v, t)})
assignment_converted
case _ => List(BezoutInputAssignment(v, t))
}
case t =>
val t_indexed = t.zipWithIndex
t_indexed find {
case (APAInputCombination(i, Nil), index) if Math.abs(i) == 1 => true
case _ => false
} match {
case Some((APAInputCombination(one_coefficient, Nil), index)) =>
// Corresponds to something trivial like 1 + a*x + b*y + z + c*w = 0
// The corresponding assignment is x = y1, y = y2, z = -1-a*x-b*y-c*w and w = y3
// (1 )T (0, 0, -1, 0) (a)
// (ya) . (1, 0, -a, 0) . (b) + 1 == 0
// (yb) (0, 1, -b, 0) (1)
// (yc) (0, 0, -c, 1) (c)
// To find the solution, encode a = 10, b = 20, c=30, and in the found solution, replace -10 by -a, etc.
var map_index_term = Map[Int, APAInputTerm]()
val to_solve_bezout_on = t_indexed map { case (term, i) =>
if(i == index) {
one_coefficient
} else {
val final_index = 10*i+10
map_index_term += (final_index -> term)
final_index
}
}
val new_ints = Common.bezoutWithBase(1, to_solve_bezout_on)
val assignments = convertAssignments(v, new_ints, map_index_term)
val assignment_converted = assignments.map({ case (v, t) => SingleInputAssignment(v, t)})
assignment_converted
case _ => // Essentially None
List(BezoutInputAssignment(v, t))
}
}
}
/** Converts an integer Bézout solution to a InputTerm solution, where specific integers */
/** given in map_index_terms are replaced with some input terms. */
def convertAssignments(v: List[List[InputVar]],
solved_for_ints: List[List[Int]],
map_index_term: Map[Int, APAInputTerm]): List[(InputVar, APAInputTerm)] = {
(
(v zip solved_for_ints) map {
case (l1, l2) => l1 zip (
l2 map {
case index if map_index_term contains index =>
map_index_term(index)
case index if map_index_term contains (-index) =>
-map_index_term(index)
case i =>
APAInputCombination(i)
}
)
}
).flatten
}
/** Propagate a sign assumption. Does nothing for Bézout assignment. */
def assumeSignInputTerm(t1: APAInputTerm, s: SignAbstraction) = this
}
| epfl-lara/comfusy | src/main/scala/APAInputAssignments.scala | Scala | bsd-2-clause | 9,076 |
package com.sksamuel.scrimage.filter
import org.scalatest.{ OneInstancePerTest, BeforeAndAfter, FunSuite }
import com.sksamuel.scrimage.Image
/** @author Stephen Samuel */
class UnsharpFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val original = getClass.getResourceAsStream("/bird_small.png")
val expected = getClass.getResourceAsStream("/com/sksamuel/scrimage/filters/bird_small_unsharp.png")
test("filter output matches expected") {
assert(Image(original).filter(UnsharpFilter()) === Image(expected))
}
}
| carlosFattor/scrimage | scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/UnsharpFilterTest.scala | Scala | apache-2.0 | 552 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.express.flow
import org.kiji.annotations.ApiAudience
import org.kiji.annotations.ApiStability
import org.kiji.schema.KijiColumnName
import org.kiji.schema.KijiURI
/**
* Factory methods for constructing [[org.kiji.express.flow.KijiSource]]s that will be used as
* inputs to a KijiExpress flow.
*
* Example usage:
*
* {{{
* val column3 = QualifiedColumnInputSpec.builder
* .withColumn("info", "column3")
* .withSchemaSpec(DefaultReader)
* .build
*
* //Fields API
* KijiInput.builder
* .withTableURI("kiji://localhost:2181/default/mytable")
* .withTimeRangeSpec(TimeRangeSpec.Between(5, 10))
* .withColumns("info:column1" -> 'column1, "info:column2" -> 'column2)
* .addColumnSpecs(column3 -> 'column3)
* // Selects a 30% sample of data between startEid and endEid.
* .withRowRangeSpec(RowRangeSpec.Between(startEid, endEid)
* .withRowFilterSpec(RowFilterSpec.Random(0.3F))
* .build
*
* //Typed API
* KijiInput.typedBuilder
* .withTableURI("kiji://localhost:2181/default/mytable")
* .withColumns("info:column1", "info:column2")
* .addColumnSpecs(column3)
* .withRowRangeSpec(RowRangeSpec.Between(startEid, endEid)
* .withRowFilterSpec(RowFilterSpec.Random(0.3F))
* .build
* }}}
*
* Note: Columns containing no values will be replaced with an empty sequence unless all requested
* columns are empty in which case the entire row will be skipped.
*/
@ApiAudience.Public
@ApiStability.Stable
object KijiInput {
/** Default time range for KijiSource */
private val DEFAULT_TIME_RANGE: TimeRangeSpec = TimeRangeSpec.All
/**
* Create a new empty KijiInput.Builder.
*
* @return a new empty KijiInput.Builder.
*/
def builder: Builder = Builder()
/**
* Create a new KijiInput.Builder as a copy of the given Builder.
*
* @param other Builder to copy.
* @return a new KijiInput.Builder as a copy of the given Builder.
*/
def builder(other: Builder): Builder = Builder(other)
/**
* Create an empty KijiInput.TypedBuilder.
*
* @return an empty KijiInput.TypedBuilder
*/
def typedBuilder: TypedBuilder = TypedBuilder()
/**
* Create a new KijiInput.TypedBuilder as a copy of the given TypedBuilder.
*
* @param other TypedBuilder to copy.
* @return a new KijiInput.TypedBuilder as a copy of the given TypedBuilder.
*/
def typedBuilder(other:TypedBuilder): TypedBuilder = TypedBuilder(other)
/**
* Builder for [[org.kiji.express.flow.KijiSource]]s to be used as inputs.
*
* @param mTableURI string of the table from which to read.
* @param mTimeRange from which to read values.
* @param mColumnSpecs specification of columns from which to read.
* @param mRowRangeSpec rows from which to read.
* @param mRowFilterSpec filters used to read.
*/
@ApiAudience.Public
@ApiStability.Stable
final class Builder private(
private[this] var mTableURI: Option[String],
private[this] var mTimeRange: Option[TimeRangeSpec],
private[this] var mColumnSpecs: Option[Map[_ <: ColumnInputSpec, Symbol]],
private[this] var mRowRangeSpec: Option[RowRangeSpec],
private[this] var mRowFilterSpec: Option[RowFilterSpec]
) {
/** protects read and write access to private var fields. */
private val monitor = new AnyRef
/**
* Get the Kiji URI of the table from which to read from this Builder.
*
* @return the Kiji URI of the table from which to read from this Builder.
*/
def tableURI: Option[String] = monitor.synchronized(mTableURI)
/**
* Get the input time range specification from this Builder.
*
* @return the input time range specification from this Builder.
*/
def timeRange: Option[TimeRangeSpec] = monitor.synchronized(mTimeRange)
/**
* Get the input specifications from this Builder.
*
* @return the input specifications from this Builder.
*/
def columnSpecs: Option[Map[_ <: ColumnInputSpec, Symbol]] = monitor.synchronized(mColumnSpecs)
/**
* Get the input row range specification from this Builder.
*
* @return the input row range specification from this Builder.
*/
def rowRangeSpec: Option[RowRangeSpec] = monitor.synchronized(mRowRangeSpec)
/**
* Get the input row filter specification from this Builder.
*
* @return the input row filter specification from this Builder.
*/
def rowFilterSpec: Option[RowFilterSpec] = monitor.synchronized(mRowFilterSpec)
/**
* Configure the KijiSource to read values from the table with the given Kiji URI.
*
* @param tableURI of the table from which to read.
* @return this builder.
*/
def withTableURI(tableURI: String): Builder = monitor.synchronized {
require(tableURI != null, "Table URI may not be null.")
require(mTableURI.isEmpty, "Table URI already set to: " + mTableURI.get)
mTableURI = Some(tableURI)
this
}
/**
* Configure the KijiSource to read values from the table with the given Kiji URI.
*
* @param tableURI of the table from which to read.
* @return this builder.
*/
def withTableURI(tableURI: KijiURI): Builder = withTableURI(tableURI.toString)
/**
* Configure the KijiSource to read values from the given range of input times.
*
* @param timeRangeSpec specification of times from which to read.
* @return this builder.
*/
def withTimeRangeSpec(timeRangeSpec: TimeRangeSpec): Builder = monitor.synchronized {
require(timeRangeSpec != null, "Time range may not be null.")
require(mTimeRange.isEmpty, "Time range already set to: " + mTimeRange.get)
mTimeRange = Some(timeRangeSpec)
this
}
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def withColumns(columns: (String, Symbol)*): Builder = withColumns(columns.toMap)
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def withColumns(columns: Map[String, Symbol]): Builder =
withColumnSpecs(columns.map { Builder.columnToSpec })
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def addColumns(columns: (String, Symbol)*): Builder = addColumns(columns.toMap)
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def addColumns(columns: Map[String, Symbol]): Builder =
addColumnSpecs(columns.map { Builder.columnToSpec })
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def withColumnSpecs(columnSpecs: (_ <: ColumnInputSpec, Symbol)*): Builder =
withColumnSpecs(columnSpecs.toMap[ColumnInputSpec, Symbol])
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def addColumnSpecs(columnSpecs: (_ <: ColumnInputSpec, Symbol)*): Builder =
addColumnSpecs(columnSpecs.toMap[ColumnInputSpec, Symbol])
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def withColumnSpecs(columnSpecs: Map[_ <: ColumnInputSpec, Symbol]): Builder = {
require(columnSpecs != null, "Column input specs may not be null.")
require(columnSpecs.size == columnSpecs.values.toSet.size,
"Column input specs may not include duplicate Fields. found: " + columnSpecs)
monitor.synchronized {
require(mColumnSpecs.isEmpty, "Column input specs already set to: " + mColumnSpecs.get)
mColumnSpecs = Some(columnSpecs)
}
this
}
/**
* Configure the KijiSource to read values from the given columns into the corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this builder.
*/
def addColumnSpecs(columnSpecs: Map[_ <: ColumnInputSpec, Symbol]): Builder = {
require(columnSpecs != null, "Column input specs may not be null.")
require(columnSpecs.size == columnSpecs.values.toSet.size,
"Column input specs may not include duplicate Fields. found: " + columnSpecs)
monitor.synchronized {
mColumnSpecs match {
case Some(cs) => {
val symbols: List[Symbol] = columnSpecs.values.toList
val duplicateField: Boolean = cs.toIterable.exists { entry: (ColumnInputSpec, Symbol) =>
val (_, field) = entry
symbols.contains(field)
}
require(!duplicateField, ("Column input specs already set to: %s May not add duplicate "
+ "Fields.").format(mColumnSpecs.get))
mColumnSpecs = Some(cs ++ columnSpecs)
}
case None => mColumnSpecs = Some(columnSpecs)
}
}
this
}
/**
* Configure the KijiSource to traverse rows within the requested row range specification.
*
* @param rowRangeSpec requested range for rows.
* @return this builder.
*/
def withRowRangeSpec(rowRangeSpec: RowRangeSpec): Builder = monitor.synchronized {
require(rowRangeSpec != null, "Row range spec may not be null.")
require(mRowRangeSpec.isEmpty, "Row range spec already set to: " + mRowRangeSpec.get)
mRowRangeSpec = Some(rowRangeSpec)
this
}
/**
* Configure the KijiSource to traverse rows with the requested row filter specification.
*
* @param rowFilterSpec requested row filter.
* @return this builder.
*/
def withRowFilterSpec(rowFilterSpec: RowFilterSpec): Builder = monitor.synchronized {
require(rowFilterSpec != null, "Row filter spec may not be null.")
require(mRowFilterSpec.isEmpty, "Row filter spec already set to: " + mRowFilterSpec.get)
mRowFilterSpec = Some(rowFilterSpec)
this
}
/**
* Build a new KijiSource configured for input from the values stored in this Builder.
*
* @throws IllegalStateException if the builder is not in a valid state to be built.
* @return a new KijiSource configured for input from the values stored in this Builder.
*/
def build: KijiSource = monitor.synchronized {
KijiInput(
tableURI.getOrElse(throw new IllegalStateException("Table URI must be specified.")),
timeRange.getOrElse(DEFAULT_TIME_RANGE),
columnSpecs.getOrElse(
throw new IllegalStateException("Column input specs must be specified.")),
rowRangeSpec.getOrElse(RowRangeSpec.All),
rowFilterSpec.getOrElse(RowFilterSpec.NoFilter))
}
}
/**
* Builder for [[TypedKijiSource]]'s to be used as inputs.
*
* @param mTableURI string of the table from which to read.
* @param mTimeRange from which to read values.
* @param mColumnSpecs is the list of specification of columns from which to read.
* @param mRowRangeSpec rows from which to read.
* @param mRowFilterSpec filters used to read.
*/
@ApiAudience.Public
@ApiStability.Evolving
final class TypedBuilder private(
private[this] var mTableURI: Option[String],
private[this] var mTimeRange: Option[TimeRangeSpec],
private[this] var mColumnSpecs: Option[List[_ <: ColumnInputSpec]],
private[this] var mRowRangeSpec: Option[RowRangeSpec],
private[this] var mRowFilterSpec: Option[RowFilterSpec]
) {
/** protects read and write access to private var fields. */
private val monitor = new AnyRef
/**
* Get the Kiji URI of the table from which to read from this TypedBuilder.
*
* @return the Kiji URI of the table from which to read from this TypedBuilder.
*/
def tableURI: Option[String] = monitor.synchronized(mTableURI)
/**
* Get the input time range specification from this TypedBuilder.
*
* @return the input time range specification from this TypedBuilder.
*/
def timeRange: Option[TimeRangeSpec] = monitor.synchronized(mTimeRange)
/**
* Get the input specifications from this TypedBuilder.
*
* @return the input specifications from this TypedBuilder.
*/
def columnSpecs: Option[List[_ <: ColumnInputSpec]] = monitor.synchronized(mColumnSpecs)
/**
* Get the input row range specification from this TypedBuilder.
*
* @return the input row range specification from this TypedBuilder.
*/
def rowRangeSpec: Option[RowRangeSpec] = monitor.synchronized(mRowRangeSpec)
/**
* Get the input row filter specification from this TypedBuilder.
*
* @return the input row filter specification from this TypedBuilder.
*/
def rowFilterSpec: Option[RowFilterSpec] = monitor.synchronized(mRowFilterSpec)
/**
* Configure the [[TypedKijiSource]] to read values from the table with the given Kiji URI.
*
* @param tableURI of the table from which to read.
* @return this TypedBuilder.
*/
def withTableURI(tableURI: String): TypedBuilder = monitor.synchronized {
require(tableURI != null, "Table URI may not be null.")
require(mTableURI.isEmpty, "Table URI already set to: " + mTableURI.get)
mTableURI = Some(tableURI)
this
}
/**
* Configure the [[TypedKijiSource]] to read values from the table with the given Kiji URI.
*
* @param tableURI of the table from which to read.
* @return this TypedBuilder.
*/
def withTableURI(tableURI: KijiURI): TypedBuilder = withTableURI(tableURI.toString)
/**
* Configure the [[TypedKijiSource]] to read values from the given range of input times.
*
* @param timeRangeSpec specification of times from which to read.
* @return this TypedBuilder.
*/
def withTimeRangeSpec(timeRangeSpec: TimeRangeSpec): TypedBuilder = monitor.synchronized {
require(timeRangeSpec != null, "Time range may not be null.")
require(mTimeRange.isEmpty, "Time range already set to: " + mTimeRange.get)
mTimeRange = Some(timeRangeSpec)
this
}
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def withColumns(columns: (String)*): TypedBuilder = withColumns(columns.toList)
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def withColumns(columns: List[String]): TypedBuilder =
withColumnSpecs(columns.map { TypedBuilder.columnToSpec })
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def addColumns(columns: String *): TypedBuilder = addColumns(columns.toList)
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columns mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def addColumns(columns: List[String]): TypedBuilder =
addColumnSpecs(columns.map { TypedBuilder.columnToSpec })
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def withColumnSpecs(columnSpecs: (ColumnInputSpec)*): TypedBuilder =
withColumnSpecs(columnSpecs.toList)
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def addColumnSpecs(columnSpecs: (ColumnInputSpec)*): TypedBuilder =
addColumnSpecs(columnSpecs.toList)
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def withColumnSpecs(columnSpecs: List[ColumnInputSpec]): TypedBuilder = {
require(columnSpecs != null, "Column input specs may not be null.")
require(columnSpecs.size == columnSpecs.toSet.size,
"Column input specs may not include duplicate Fields. found: " + columnSpecs)
monitor.synchronized {
require(mColumnSpecs.isEmpty, "Column input specs already set to: " + mColumnSpecs.get)
mColumnSpecs = Some(columnSpecs)
}
this
}
/**
* Configure the [[TypedKijiSource]] to read values from the given columns into the
* corresponding fields.
*
* @param columnSpecs mapping from column inputs to fields which will hold the values from those
* columns.
* @return this TypedBuilder.
*/
def addColumnSpecs(columnSpecs: List[_ <: ColumnInputSpec]): TypedBuilder = {
require(columnSpecs != null, "Column input specs may not be null.")
require(columnSpecs.size == columnSpecs.toSet.size,
"Column input specs may not include duplicate Fields. found: " + columnSpecs)
monitor.synchronized {
mColumnSpecs match {
case Some(cs) => mColumnSpecs = Some(cs ++ columnSpecs)
case None => mColumnSpecs = Some(columnSpecs)
}
}
this
}
/**
* Configure the [[TypedKijiSource]] to traverse rows within the requested row range
* specification.
*
* @param rowRangeSpec requested range for rows.
* @return this TypedBuilder
*/
def withRowRangeSpec(rowRangeSpec: RowRangeSpec): TypedBuilder = monitor.synchronized {
require(rowRangeSpec != null, "Row range spec may not be null.")
require(mRowRangeSpec.isEmpty, "Row range spec already set to: " + mRowRangeSpec.get)
mRowRangeSpec = Some(rowRangeSpec)
this
}
/**
* Configure the [[TypedKijiSource]] to traverse rows with the requested row filter
* specification.
*
* @param rowFilterSpec requested row filter.
* @return this builder.
*/
def withRowFilterSpec(rowFilterSpec: RowFilterSpec): TypedBuilder = monitor.synchronized {
require(rowFilterSpec != null, "Row filter spec may not be null.")
require(mRowFilterSpec.isEmpty, "Row filter spec already set to: " + mRowFilterSpec.get)
mRowFilterSpec = Some(rowFilterSpec)
this
}
/**
* Build a new [[TypedKijiSource]] configured for input from the values stored in this
* TypedBuilder.
*
* @throws IllegalStateException if the builder is not in a valid state to be built.
* @return a new TypedKijiSource configured for input from the values stored in this
* TypedBuilder.
*/
def build: TypedKijiSource[ExpressResult] = monitor.synchronized {
KijiInput.typedKijiSource(
tableURI.getOrElse(throw new IllegalStateException("Table URI must be specified.")),
timeRange.getOrElse(DEFAULT_TIME_RANGE),
columnSpecs.getOrElse(
throw new IllegalStateException("Column input specs must be specified.")),
rowRangeSpec.getOrElse(RowRangeSpec.All),
rowFilterSpec.getOrElse(RowFilterSpec.NoFilter))
}
}
/**
* Companion object providing utility methods and factory methods for creating new instances of
* [[org.kiji.express.flow.KijiInput.Builder]].
*/
@ApiAudience.Public
@ApiStability.Stable
object Builder {
/**
* Create a new empty Builder.
*
* @return a new empty Builder.
*/
def apply(): Builder = new Builder(None, None, None, None, None)
/**
* Create a new Builder as a copy of the given Builder.
*
* @param other Builder to copy.
* @return a new Builder as a copy of the given Builder.
*/
def apply(other: Builder): Builder = other.monitor.synchronized {
// synchronize to get consistent snapshot of other
new Builder(
other.tableURI,
other.timeRange,
other.columnSpecs,
other.rowRangeSpec,
other.rowFilterSpec)
}
/**
* Converts a column -> Field mapping to a ColumnInputSpec -> Field mapping.
*
* @param pair column to Field binding.
* @return ColumnInputSpec to Field binding.
*/
private def columnToSpec(pair: (String, Symbol)): (_ <: ColumnInputSpec, Symbol) = {
val (column, field) = pair
val colName: KijiColumnName = KijiColumnName.create(column)
if (colName.isFullyQualified) {
(QualifiedColumnInputSpec(colName.getFamily, colName.getQualifier), field)
} else {
(ColumnFamilyInputSpec(colName.getFamily), field)
}
}
}
/**
* Companion object providing utility methods and factory methods for creating new instances of
* [[org.kiji.express.flow.KijiInput.TypedBuilder]].
*/
object TypedBuilder {
/**
* Create a new empty TypedBuilder.
*
* @return a new empty TypedBuilder.
*/
def apply(): TypedBuilder = new TypedBuilder(None, None, None, None, None)
/**
* Create a new TypedBuilder as a copy of the given TypedBuilder.
*
* @param other TypedBuilder to copy.
* @return a new TypedBuilder as a copy of the given TypedBuilder.
*/
def apply(other: TypedBuilder): TypedBuilder = other.monitor.synchronized {
// synchronize to get consistent snapshot of other
new TypedBuilder(
other.tableURI,
other.timeRange,
other.columnSpecs,
other.rowRangeSpec,
other.rowFilterSpec)
}
/**
* Converts a string identifying a column to a ColumnInputSpec.
*
* @param columnNameString is the string name
* @return a ColumnInputSpec.
*/
private def columnToSpec(columnNameString: String): (ColumnInputSpec) = {
val (column) = columnNameString
val colName: KijiColumnName = KijiColumnName.create(column)
if (colName.isFullyQualified) {
QualifiedColumnInputSpec(colName.getFamily, colName.getQualifier)
} else {
ColumnFamilyInputSpec(colName.getFamily)
}
}
}
/**
* A factory method for creating a KijiSource.
*
* @param tableUri addressing a table in a Kiji instance.
* @param timeRange that cells must fall into to be retrieved.
* @param columns are a series of pairs mapping column input specs to tuple field names.
* Columns are specified as "family:qualifier" or, in the case of a column family input spec,
* simply "family".
* @param rowRangeSpec the specification for which row interval to scan
* @param rowFilterSpec the specification for which filter to apply.
* @return a source for data in the Kiji table, whose row tuples will contain fields with cell
* data from the requested columns and map-type column families.
*/
private[express] def apply(
tableUri: String,
timeRange: TimeRangeSpec,
columns: Map[_ <: ColumnInputSpec, Symbol],
rowRangeSpec: RowRangeSpec,
rowFilterSpec: RowFilterSpec
): KijiSource = {
new KijiSource(
tableUri,
timeRange,
None,
inputColumns = columns.map { entry: (ColumnInputSpec, Symbol) => entry.swap },
rowRangeSpec = rowRangeSpec,
rowFilterSpec = rowFilterSpec
)
}
/**
* Method for creating a TypedKijiSource.
*
* @param tableUri addressing a table in a Kiji instance.
* @param timeRange that cells must fall into to be retrieved.
* @param columns are a series of pairs mapping column input specs to tuple field names.
* Columns are specified as "family:qualifier" or, in the case of a column family input spec,
* simply "family".
* @param rowRangeSpec the specification for which row interval to scan
* @param rowFilterSpec the specification for which filter to apply.
* @return a typed source for data in the Kiji table, whose row tuples will contain fields with
* cell data from the requested columns and map-type column families.
*/
private[express] def typedKijiSource(
tableUri: String,
timeRange: TimeRangeSpec,
columns: List[_ <: ColumnInputSpec],
rowRangeSpec: RowRangeSpec,
rowFilterSpec: RowFilterSpec
): TypedKijiSource[ExpressResult] = {
new TypedKijiSource[ExpressResult](
tableUri,
timeRange,
columns ,
rowRangeSpec = rowRangeSpec,
rowFilterSpec = rowFilterSpec
)
}
}
| kijiproject/kiji-express | kiji-express/src/main/scala/org/kiji/express/flow/KijiInput.scala | Scala | apache-2.0 | 27,262 |
package com.lookout.borderpatrol.session
import java.util.concurrent.TimeUnit
import com.twitter.util.{Duration, Time}
import org.scalatest.{FlatSpec, Matchers}
class SecretSpec extends FlatSpec with Matchers {
def currentExpiry: Time = SecretExpiry.currentExpiry
behavior of "Secret"
it should "expire in a day" in {
val currentSecret = Secret(currentExpiry)
currentSecret.expiry.moreOrLessEquals(Time.now, Duration(1, TimeUnit.DAYS)) shouldBe true
}
"A Secret" should "be comparable" in {
val currentSecret = Secret(currentExpiry)
currentSecret shouldEqual currentSecret
currentSecret should not equal Secret(Time.fromSeconds(0))
}
it should "have a relatively unique id" in {
val expires = currentExpiry
val sameId = Secret(expires).id == Secret(expires).id && Secret(expires).id == Secret(expires).id
sameId shouldBe false
}
}
| rtyler/borderpatrol | borderpatrol-core/src/test/scala/com/lookout/borderpatrol/session/SecretSpec.scala | Scala | mit | 887 |
package example
sealed abstract class Animal(val cry: String)
case object Cat extends Animal("にゃー")
case object Dog extends Animal("わん")
object Animal {
def checkAnimal(animal: Animal): Unit = animal match {
case Cat =>
println(s"!! 😺 !! ${animal} ${animal.cry}")
case Dog =>
println(s"!! 🐩 !! ${animal} ${animal.cry}")
}
def test0(): Unit = {
val c = Cat
Animal.checkAnimal(c)
val d = Dog
Animal.checkAnimal(d)
}
}
| ohtomi/sandbox | scala-start/src/main/scala/example/Animal.scala | Scala | mit | 482 |
package go3d.server
import go3d.{Game, newGame, Black, White}
import scala.io.Source
import io.circe.parser._
var Games: Map[String, Game] = Map()
def registerGame(boardSize: Int): String =
val gameId = IdGenerator.getId
val game = newGame(boardSize)
Games = Games + (gameId -> game)
return gameId
def readGame(saveFile: java.io.File): SaveGame =
val source = Source.fromFile(saveFile)
val fileContents = source.getLines.mkString
source.close()
val result = decode[SaveGame](fileContents)
if result.isLeft then throw ReadSaveGameError(result.left.getOrElse(null).getMessage)
return result.getOrElse(null)
def restoreGame(saveGame: SaveGame): Unit =
val gameId = saveGame.players.last._2.gameId
Players(gameId) = saveGame.players
Games = Games + (gameId -> saveGame.game)
def openGames(): Array[String] =
Players.filter(p => p._2.contains(Black) && !p._2.contains(White)).map(_._1).toArray
| lene/go-3 | src/main/scala/server/Games.scala | Scala | gpl-2.0 | 923 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.table
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.functions.aggfunctions.CountAggFunction
import org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions.{CountDistinctWithMergeAndReset, WeightedAvgWithMergeAndReset}
import org.apache.flink.table.runtime.utils.TableProgramsCollectionTestBase
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.utils.{NonMergableCount, Top10}
import org.apache.flink.test.util.TestBaseUtils
import org.apache.flink.types.Row
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import java.math.BigDecimal
import scala.collection.JavaConverters._
import scala.collection.mutable
@RunWith(classOf[Parameterized])
class AggregationsITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testAggregationWithCaseClass(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val inputTable = CollectionDataSets.getSmallNestedTupleDataSet(env).toTable(tEnv, 'a, 'b)
tEnv.createTemporaryView("MyTable", inputTable)
val result = tEnv.scan("MyTable")
.where('a.get("_1") > 0)
.select('a.get("_1").avg, 'a.get("_2").sum, 'b.count)
val expected = "2,6,3"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
.select('_1.sum, '_1.sum0, '_1.min, '_1.max, '_1.count, '_1.avg)
val results = t.toDataSet[Row].collect()
val expected = "231,231,1,21,21,11"
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testWorkingAggregationDataTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
.select('_1.avg, '_2.avg, '_3.avg, '_4.avg, '_5.avg, '_6.avg, '_7.count)
val expected = "1,1,1,1,1.5,1.5,2"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = env.fromElements(
(1: Byte, 1: Short),
(2: Byte, 2: Short)).toTable(tEnv)
.select('_1.avg, '_1.sum, '_1.count, '_2.avg, '_2.sum)
val expected = "1,3,2,1,3"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationWithArithmetic(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
.select(('_1 + 2).avg + 2, '_2.count + 5)
val expected = "5.5,7"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationWithTwoCount(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
.select('_1.count, '_2.count)
val expected = "2,2"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationAfterProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
.select('_1, '_2, '_3)
.select('_1.avg, '_2.sum, '_3.count)
val expected = "1,3,2"
val result = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
@Test
def testSQLStyleAggregations(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.select(
"""Sum( a) as a1, a.sum as a2,
|Min (a) as b1, a.min as b2,
|Max (a ) as c1, a.max as c2,
|Avg ( a ) as d1, a.avg as d2,
|Count(a) as e1, a.count as e2
""".stripMargin)
val expected = "231,231,1,1,21,21,11,11,21,21"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testPojoAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val input = env.fromElements(
WC("hello", 1),
WC("hello", 1),
WC("ciao", 1),
WC("hola", 1),
WC("hola", 1))
val expr = input.toTable(tEnv)
val result = expr
.groupBy('word)
.select('word, 'frequency.sum as 'frequency)
.filter('frequency === 2)
.toDataSet[WC]
val mappedResult = result.map(w => (w.word, w.frequency * 10)).collect()
val expected = "(hello,20)\\n" + "(hola,20)"
TestBaseUtils.compareResultAsText(mappedResult.asJava, expected)
}
@Test
def testDistinct(): Unit = {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val distinct = ds.select('b).distinct()
val expected = "1\\n" + "2\\n" + "3\\n" + "4\\n" + "5\\n" + "6\\n"
val results = distinct.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testDistinctAfterAggregate(): Unit = {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val ds = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
val distinct = ds.groupBy('a, 'e).select('e).distinct()
val expected = "1\\n" + "2\\n" + "3\\n"
val results = distinct.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val countFun = new CountAggFunction
val wAvgFun = new WeightedAvgWithMergeAndReset
val countDistinct = new CountDistinctWithMergeAndReset
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.sum, countFun('c), wAvgFun('b, 'a), wAvgFun('a, 'a), countDistinct('c))
val expected = "1,1,1,1,1,1\\n" + "2,5,2,2,2,2\\n" + "3,15,3,3,5,3\\n" + "4,34,4,4,8,4\\n" +
"5,65,5,5,13,5\\n" + "6,111,6,6,18,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupingKeyForwardIfNotUsed(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('a.sum)
val expected = "1\\n" + "5\\n" + "15\\n" + "34\\n" + "65\\n" + "111\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupNoAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env)
.toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('a.sum as 'd, 'b)
.groupBy('b, 'd)
.select('b)
val expected = "1\\n" + "2\\n" + "3\\n" + "4\\n" + "5\\n" + "6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregateEmptyDataSets(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val myAgg = new NonMergableCount
val t1 = env.fromCollection(new mutable.MutableList[(Int, String)]).toTable(tEnv, 'a, 'b)
.select('a.sum, 'a.count)
val t2 = env.fromCollection(new mutable.MutableList[(Int, String)]).toTable(tEnv, 'a, 'b)
.select('a.sum, myAgg('b), 'a.count)
val expected1 = "null,0"
val expected2 = "null,0,0"
val results1 = t1.toDataSet[Row].collect()
val results2 = t2.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results1.asJava, expected1)
TestBaseUtils.compareResultAsText(results2.asJava, expected2)
}
@Test
def testGroupedAggregateWithLongKeys(): Unit = {
// This uses very long keys to force serialized comparison.
// With short keys, the normalized key is sufficient.
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val ds = env.fromElements(
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2))
.rebalance().setParallelism(2).toTable(tEnv, 'a, 'b, 'c)
.groupBy('a, 'b)
.select('c.sum)
val expected = "10\\n" + "8\\n"
val results = ds.collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithConstant1(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.select('a, 4 as 'four, 'b)
.groupBy('four, 'a)
.select('four, 'b.sum)
val expected = "4,2\\n" + "4,3\\n" + "4,5\\n" + "4,5\\n" + "4,5\\n" + "4,6\\n" +
"4,6\\n" + "4,6\\n" + "4,3\\n" + "4,4\\n" + "4,6\\n" + "4,1\\n" + "4,4\\n" +
"4,4\\n" + "4,5\\n" + "4,6\\n" + "4,2\\n" + "4,3\\n" + "4,4\\n" + "4,5\\n" + "4,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithConstant2(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.select('b, 4 as 'four, 'a)
.groupBy('b, 'four)
.select('four, 'a.sum)
val expected = "4,1\\n" + "4,5\\n" + "4,15\\n" + "4,34\\n" + "4,65\\n" + "4,111\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithExpression(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e, 'b % 3)
.select('c.min, 'e, 'a.avg, 'd.count)
val expected = "0,1,1,1\\n" + "3,2,3,3\\n" + "7,1,4,2\\n" + "14,2,5,1\\n" +
"5,3,4,2\\n" + "2,1,3,2\\n" + "1,2,3,3\\n" + "12,3,5,1"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.sum)
.where('b === 2)
val expected = "2,5\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAnalyticAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv =BatchTableEnvironment.create(env)
val ds = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, BigDecimal.ONE),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, new BigDecimal(2))).toTable(tEnv)
val res = ds.select(
'_1.stddevPop, '_2.stddevPop, '_3.stddevPop, '_4.stddevPop, '_5.stddevPop,
'_6.stddevPop, '_7.stddevPop,
'_1.stddevSamp, '_2.stddevSamp, '_3.stddevSamp, '_4.stddevSamp, '_5.stddevSamp,
'_6.stddevSamp, '_7.stddevSamp,
'_1.varPop, '_2.varPop, '_3.varPop, '_4.varPop, '_5.varPop,
'_6.varPop, '_7.varPop,
'_1.varSamp, '_2.varSamp, '_3.varSamp, '_4.varSamp, '_5.varSamp,
'_6.varSamp, '_7.varSamp)
val expected =
"0,0,0," +
"0,0.5,0.5,0.5," +
"1,1,1," +
"1,0.70710677,0.7071067811865476,0.7071067811865476," +
"0,0,0," +
"0,0.25,0.25,0.25," +
"1,1,1," +
"1,0.5,0.5,0.5"
val results = res.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testComplexAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val top10Fun = new Top10
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, top10Fun('b.cast(Types.INT), 'a.cast(Types.FLOAT)))
val expected =
"1,[(1,1.0), null, null, null, null, null, null, null, null, null]\\n" +
"2,[(2,3.0), (2,2.0), null, null, null, null, null, null, null, null]\\n" +
"3,[(3,6.0), (3,5.0), (3,4.0), null, null, null, null, null, null, null]\\n" +
"4,[(4,10.0), (4,9.0), (4,8.0), (4,7.0), null, null, null, null, null, null]\\n" +
"5,[(5,15.0), (5,14.0), (5,13.0), (5,12.0), (5,11.0), null, null, null, null, null]\\n" +
"6,[(6,21.0), (6,20.0), (6,19.0), (6,18.0), (6,17.0), (6,16.0), null, null, null, null]"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testCollect(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.collect)
val expected =
"1,{1=1}\\n2,{2=1, 3=1}\\n3,{4=1, 5=1, 6=1}\\n4,{8=1, 9=1, 10=1, 7=1}\\n" +
"5,{11=1, 12=1, 13=1, 14=1, 15=1}\\n6,{16=1, 17=1, 18=1, 19=1, 20=1, 21=1}"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
}
case class WC(word: String, frequency: Long)
| jinglining/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/batch/table/AggregateITCase.scala | Scala | apache-2.0 | 16,617 |
/*
* Copyright (c) 2015-6 Alexandre Archambault
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.annotation.{ Annotation => saAnnotation }
import org.junit.Test
import shapeless.test.{illTyped, typed}
object AnnotationTestsDefinitions {
case class First() extends saAnnotation
case class Second(i: Int, s: String) extends saAnnotation
case class Third(c: Char) extends saAnnotation
case class Other() extends saAnnotation
case class Last(b: Boolean) extends saAnnotation
case class Unused() extends saAnnotation
@Other case class CC(
@First i: Int,
s: String,
@Second(2, "b") ob: Option[Boolean]
)
@Last(true) trait Something
sealed trait Base
@First case class BaseI(i: Int) extends Base
@Second(3, "e") @Third('c') case class BaseS(s: String) extends Base
sealed trait Base2
case class BaseI2(i: Int) extends Base2 @First
case class BaseS2(s: String) extends Base2 @Second(3, "e") @Third('c')
trait Dummy
case class CC2(
i: Int @First,
s: String,
ob: Option[Boolean] @Second(2, "b")
)
case class CC3(
@First i: Int,
s: String,
@Second(2, "b") @Third('c') ob: Option[Boolean]
)
case class CC4(
i: Int @First,
s: String,
ob: Option[Boolean] @Second(2, "b") @Third('c')
)
type PosInt = Int @First
type Email = String @Third('c')
case class User(age: PosInt, email: Email)
}
class AnnotationTests {
import AnnotationTestsDefinitions._
@Test
def simpleAnnotation: Unit = {
{
val other = Annotation[Other, CC].apply()
assert(other == Other())
val last = Annotation[Last, Something].apply()
assert(last == Last(true))
}
{
val other: Other = Annotation[Other, CC].apply()
assert(other == Other())
val last: Last = Annotation[Last, Something].apply()
assert(last == Last(true))
}
}
@Test
def optionalAnnotation: Unit = {
{
val other = Annotation[Option[Other], CC].apply()
assert(other == Some(Other()))
val last = Annotation[Option[Last], Something].apply()
assert(last == Some(Last(true)))
}
{
val other: Option[Other] = Annotation[Option[Other], Something].apply()
assert(other == None)
val last: Option[Last] = Annotation[Option[Last], CC].apply()
assert(last == None)
}
}
@Test
def invalidAnnotation: Unit = {
illTyped(" Annotation[Other, Dummy] ", "could not find implicit value for parameter annotation: .*")
illTyped(" Annotation[Dummy, CC] ", "could not find implicit value for parameter annotation: .*")
}
@Test
def simpleAnnotations: Unit = {
{
val first: Some[First] :: None.type :: None.type :: HNil = Annotations[First, CC].apply()
assert(first == Some(First()) :: None :: None :: HNil)
val second: None.type :: None.type :: Some[Second] :: HNil = Annotations[Second, CC].apply()
assert(second == None :: None :: Some(Second(2, "b")) :: HNil)
val unused: None.type :: None.type :: None.type :: HNil = Annotations[Unused, CC].apply()
assert(unused == None :: None :: None :: HNil)
val firstSum: Some[First] :: None.type :: HNil = Annotations[First, Base].apply()
assert(firstSum == Some(First()) :: None :: HNil)
val secondSum: None.type :: Some[Second] :: HNil = Annotations[Second, Base].apply()
assert(secondSum == None :: Some(Second(3, "e")) :: HNil)
}
{
val first = Annotations[First, CC].apply()
assert(first == Some(First()) :: None :: None :: HNil)
val second = Annotations[Second, CC].apply()
assert(second == None :: None :: Some(Second(2, "b")) :: HNil)
val unused = Annotations[Unused, CC].apply()
assert(unused == None :: None :: None :: HNil)
val firstSum = Annotations[First, Base].apply()
assert(firstSum == Some(First()) :: None :: HNil)
val secondSum = Annotations[Second, Base].apply()
assert(secondSum == None :: Some(Second(3, "e")) :: HNil)
}
}
@Test
def invalidAnnotations: Unit = {
illTyped(" Annotations[Dummy, CC] ", "could not find implicit value for parameter annotations: .*")
illTyped(" Annotations[Dummy, Base] ", "could not find implicit value for parameter annotations: .*")
illTyped(" Annotations[Second, Dummy] ", "could not find implicit value for parameter annotations: .*")
}
@Test
def typeAnnotations: Unit = {
{
val first: Some[First] :: None.type :: None.type :: HNil = TypeAnnotations[First, CC2].apply()
assert(first == Some(First()) :: None :: None :: HNil)
val second: None.type :: None.type :: Some[Second] :: HNil = TypeAnnotations[Second, CC2].apply()
assert(second == None :: None :: Some(Second(2, "b")) :: HNil)
val unused: None.type :: None.type :: None.type :: HNil = TypeAnnotations[Unused, CC2].apply()
assert(unused == None :: None :: None :: HNil)
}
{
val first = TypeAnnotations[First, CC2].apply()
assert(first == Some(First()) :: None :: None :: HNil)
val second = TypeAnnotations[Second, CC2].apply()
assert(second == None :: None :: Some(Second(2, "b")) :: HNil)
val unused = TypeAnnotations[Unused, CC2].apply()
assert(unused == None :: None :: None :: HNil)
}
}
@Test
def invalidTypeAnnotations: Unit = {
illTyped(" TypeAnnotations[Dummy, CC2] ", "could not find implicit value for parameter annotations: .*")
illTyped(" TypeAnnotations[Dummy, Base] ", "could not find implicit value for parameter annotations: .*")
illTyped(" TypeAnnotations[Second, Dummy] ", "could not find implicit value for parameter annotations: .*")
}
@Test
def allAnnotations: Unit = {
val cc = AllAnnotations[CC3].apply()
typed[(First :: HNil) :: HNil :: (Second :: Third :: HNil) :: HNil](cc)
assert(cc == (First() :: HNil) :: HNil :: (Second(2, "b") :: Third('c') :: HNil) :: HNil)
val st = AllAnnotations[Base].apply()
typed[(First :: HNil) :: (Second :: Third :: HNil) :: HNil](st)
}
@Test
def allTypeAnnotations: Unit = {
val st = AllTypeAnnotations[Base2].apply() // sealed trait
typed[(First :: HNil) :: (Second :: Third :: HNil) :: HNil](st)
val cc = AllTypeAnnotations[CC4].apply() // case class
typed[(First :: HNil) :: HNil :: (Second :: Third :: HNil) :: HNil](cc)
assert(cc == (First() :: HNil) :: HNil :: (Second(2, "b") :: Third('c') :: HNil) :: HNil)
val user = AllTypeAnnotations[User].apply() // type refs
typed[(First :: HNil) :: (Third :: HNil) :: HNil](user)
assert(user == (First() :: HNil) :: (Third('c') :: HNil) :: HNil)
}
}
| isaka/shapeless | core/src/test/scala/shapeless/annotation.scala | Scala | apache-2.0 | 7,192 |
/**
* Copyright 2017 https://github.com/sndnv
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package noisecluster.test.specs.transport.aeron
import akka.actor.ActorSystem
import io.aeron.Aeron
import io.aeron.driver.MediaDriver
import noisecluster.jvm.test.utils._
import noisecluster.jvm.transport.aeron.{Source, Target, Defaults}
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random
class BasicAeronSpec extends FlatSpec with Matchers {
private var testDataSent = 0L
private var testDataReceived = 0L
private def testDataHandler: (Array[Byte], Int) => Unit = (_: Array[Byte], length: Int) => {
testDataReceived += length
}
private implicit val ec: ExecutionContext = ExecutionContext.Implicits.global
private implicit val loggingSystem = ActorSystem("testLoggingSystem")
private val channel = "aeron:ipc"
private val stream = 42
private val driver = MediaDriver.launch(Defaults.getNewDriverContext)
private implicit val aeron = Aeron.connect(Defaults.getNewSystemContext)
private val source: Source = Source(stream, channel, Defaults.BufferSize)
private val target: Target = Target(stream, channel, Defaults.IdleStrategy, Defaults.FragmentLimit)
private val testByteArraySize = 1000
private var targetFuture = Future {
target.start(testDataHandler)
}
waitUntil(what = "target becomes active", waitTimeMs = 500, waitAttempts = 10) {
target.isActive
}
"A source and a target" should "successfully exchange data" in {
val bytes = Array.ofDim[Byte](testByteArraySize)
Random.nextBytes(bytes)
source.send(bytes)
testDataSent += testByteArraySize
waitUntil(what = "data is received by target", waitTimeMs = 500, waitAttempts = 10) {
testDataSent == testDataReceived
}
testDataSent should be(testDataReceived)
testDataReceived should be(testByteArraySize)
}
"A target" should "successfully stop accepting data" in {
target.isActive should be(true)
target.stop()
waitUntil(what = "target becomes inactive", waitTimeMs = 500, waitAttempts = 10) {
!target.isActive
}
target.isActive should be(false)
assertThrows[IllegalStateException] {
target.stop()
}
}
it should "successfully restart and accept data" in {
target.isActive should be(false)
targetFuture = Future {
target.start(testDataHandler)
}
waitUntil(what = "target becomes active", waitTimeMs = 500, waitAttempts = 10) {
target.isActive
}
target.isActive should be(true)
val bytes = Array.ofDim[Byte](testByteArraySize)
Random.nextBytes(bytes)
source.send(bytes)
testDataSent += testByteArraySize
waitUntil(what = "data is received by target", waitTimeMs = 500, waitAttempts = 10) {
testDataSent == testDataReceived
}
testDataSent should be(testDataReceived)
testDataReceived should be(testByteArraySize * 2)
}
it should "fail to close an active connection" in {
assertThrows[IllegalStateException] {
target.close()
}
}
it should "successfully stop and close its connection" in {
target.isActive should be(true)
target.stop()
target.close()
target.isActive should be(false)
}
"A system" should "successfully terminate" in {
aeron.close()
driver.close()
}
}
| sndnv/noisecluster | noisecluster-jvm/src/test/scala/noisecluster/test/specs/transport/aeron/BasicAeronSpec.scala | Scala | apache-2.0 | 3,884 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.bson.codecs.macrocodecs
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.bson._
import org.bson.codecs.configuration.{ CodecRegistries, CodecRegistry }
import org.bson.codecs.{ Codec, DecoderContext, Encoder, EncoderContext }
import org.mongodb.scala.bson.BsonNull
/**
*
* @tparam T the case class type for the codec
* @since 2.0
*/
trait MacroCodec[T] extends Codec[T] {
/**
* Creates a `Map[String, Class[_]]` mapping the case class name and the type.
*/
val caseClassesMap: Map[String, Class[_]]
/**
* Creates a `Map[Class[_], Boolean]` mapping field types to a boolean representing if they are a case class.
*/
val classToCaseClassMap: Map[Class[_], Boolean]
/**
* A nested map of case class name to a Map of the given field names and a list of the field types.
*/
val classFieldTypeArgsMap: Map[String, Map[String, List[Class[_]]]]
/**
* The case class type for the codec.
*/
val encoderClass: Class[T]
/**
* The `CodecRegistry` for use with the codec.
*/
val codecRegistry: CodecRegistry
/**
* Creates a new instance of the case class with the provided data
*
* @param className the name of the class to be instantiated
* @param fieldsData the Map of data for the class
* @return the new instance of the class
*/
def getInstance(className: String, fieldsData: Map[String, Any]): T
/**
* The method that writes the data for the case class
*
* @param className the name of the current case class being written
* @param writer the `BsonWriter`
* @param value the value to the case class
* @param encoderContext the `EncoderContext`
*/
def writeCaseClassData(className: String, writer: BsonWriter, value: T, encoderContext: EncoderContext): Unit
/**
* The field used to save the class name when saving sealed case classes.
*/
val classFieldName = "_t"
lazy val hasClassFieldName: Boolean = caseClassesMapInv.keySet != Set(encoderClass)
lazy val caseClassesMapInv: Map[Class[_], String] = caseClassesMap.map(_.swap)
protected val registry: CodecRegistry =
CodecRegistries.fromRegistries(List(codecRegistry, CodecRegistries.fromCodecs(this)).asJava)
protected val bsonNull = BsonNull()
override def encode(writer: BsonWriter, value: T, encoderContext: EncoderContext): Unit = {
if (value == null) { // scalastyle:ignore
throw new BsonInvalidOperationException(s"Invalid value for $encoderClass found a `null` value.")
}
writeValue(writer, value, encoderContext)
}
override def decode(reader: BsonReader, decoderContext: DecoderContext): T = {
val className = getClassName(reader, decoderContext)
val fieldTypeArgsMap = classFieldTypeArgsMap(className)
val map = mutable.Map[String, Any]()
reader.readStartDocument()
while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) {
val name = reader.readName
val typeArgs = if (name == classFieldName) List(classOf[String]) else fieldTypeArgsMap.getOrElse(name, List.empty)
if (typeArgs.isEmpty) {
reader.skipValue()
} else {
map += (name -> readValue(reader, decoderContext, typeArgs.head, typeArgs.tail, fieldTypeArgsMap))
}
}
reader.readEndDocument()
getInstance(className, map.toMap)
}
override def getEncoderClass: Class[T] = encoderClass
protected def getClassName(reader: BsonReader, decoderContext: DecoderContext): String = {
if (hasClassFieldName) {
// Find the class name
@scala.annotation.tailrec
def readOptionalClassName(): Option[String] = {
if (reader.readBsonType == BsonType.END_OF_DOCUMENT) {
None
} else if (reader.readName == classFieldName) {
Some(codecRegistry.get(classOf[String]).decode(reader, decoderContext))
} else {
reader.skipValue()
readOptionalClassName()
}
}
val mark: BsonReaderMark = reader.getMark()
reader.readStartDocument()
val optionalClassName: Option[String] = readOptionalClassName()
mark.reset()
val className = optionalClassName.getOrElse {
throw new BsonInvalidOperationException(s"Could not decode sealed case class. Missing '$classFieldName' field.")
}
if (!caseClassesMap.contains(className)) {
throw new BsonInvalidOperationException(s"Could not decode sealed case class, unknown class $className.")
}
className
} else {
caseClassesMap.head._1
}
}
protected def writeClassFieldName(writer: BsonWriter, className: String, encoderContext: EncoderContext): Unit = {
if (hasClassFieldName) {
writer.writeName(classFieldName)
this.writeValue(writer, className, encoderContext)
}
}
protected def writeFieldValue[V](
fieldName: String,
writer: BsonWriter,
value: V,
encoderContext: EncoderContext
): Unit = {
if (value == null) { // scalastyle:ignore
throw new BsonInvalidOperationException(s"Invalid value for $fieldName found a `null` value.")
}
writeValue(writer, value, encoderContext)
}
protected def writeValue[V](writer: BsonWriter, value: V, encoderContext: EncoderContext): Unit = {
val clazz = value.getClass
caseClassesMapInv.get(clazz) match {
case Some(className) =>
writeCaseClassData(className: String, writer: BsonWriter, value.asInstanceOf[T], encoderContext: EncoderContext)
case None =>
val codec = registry.get(clazz).asInstanceOf[Encoder[V]]
encoderContext.encodeWithChildContext(codec, writer, value)
}
}
protected def readValue[V](
reader: BsonReader,
decoderContext: DecoderContext,
clazz: Class[V],
typeArgs: List[Class[_]],
fieldTypeArgsMap: Map[String, List[Class[_]]]
): V = {
val currentType = reader.getCurrentBsonType
currentType match {
case BsonType.DOCUMENT => readDocument(reader, decoderContext, clazz, typeArgs, fieldTypeArgsMap)
case BsonType.ARRAY => readArray(reader, decoderContext, clazz, typeArgs, fieldTypeArgsMap)
case BsonType.NULL =>
reader.readNull()
null.asInstanceOf[V] // scalastyle:ignore
case _ => registry.get(clazz).decode(reader, decoderContext)
}
}
protected def readArray[V](
reader: BsonReader,
decoderContext: DecoderContext,
clazz: Class[V],
typeArgs: List[Class[_]],
fieldTypeArgsMap: Map[String, List[Class[_]]]
): V = {
if (typeArgs.isEmpty) {
throw new BsonInvalidOperationException(
s"Invalid Bson format for '${clazz.getSimpleName}'. Found a list but there is no type data."
)
}
reader.readStartArray()
val list = mutable.ListBuffer[Any]()
while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) {
list.append(readValue(reader, decoderContext, typeArgs.head, typeArgs.tail, fieldTypeArgsMap))
}
reader.readEndArray()
if (classOf[Set[_]].isAssignableFrom(clazz)) {
list.toSet.asInstanceOf[V]
} else if (classOf[Vector[_]].isAssignableFrom(clazz)) {
list.toVector.asInstanceOf[V]
} else if (classOf[Stream[_]].isAssignableFrom(clazz)) {
list.toStream.asInstanceOf[V]
} else {
list.toList.asInstanceOf[V]
}
}
protected def readDocument[V](
reader: BsonReader,
decoderContext: DecoderContext,
clazz: Class[V],
typeArgs: List[Class[_]],
fieldTypeArgsMap: Map[String, List[Class[_]]]
): V = {
if (classToCaseClassMap.getOrElse(clazz, false) || typeArgs.isEmpty) {
registry.get(clazz).decode(reader, decoderContext)
} else {
val map = mutable.Map[String, Any]()
reader.readStartDocument()
while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) {
val name = reader.readName
val fieldClazzTypeArgs = fieldTypeArgsMap.getOrElse(name, typeArgs)
if (fieldClazzTypeArgs.isEmpty) {
reader.skipValue()
} else {
map += (name -> readValue(
reader,
decoderContext,
fieldClazzTypeArgs.head,
fieldClazzTypeArgs.tail,
fieldTypeArgsMap
))
}
}
reader.readEndDocument()
map.toMap.asInstanceOf[V]
}
}
}
| rozza/mongo-java-driver | bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/MacroCodec.scala | Scala | apache-2.0 | 8,909 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dbis.pig.op
import dbis.pig.schema._
import dbis.pig.expr.Predicate
/**
* @brief Matcher represents the complex event processing operator of Pig.
*/
/**
* A pattern class represents the complex pattern for the matcher operator
* where the engine should detect
*/
sealed abstract class Pattern
/**
* An event class represents the simple event where the engine should detect
* in order to detect the complex pattern
*/
sealed abstract class Event
/**
* A class represents a simple pattern which consists only as a simple event name
* @param name the name of simple event
*/
case class SimplePattern(name: String) extends Pattern
/**
* A class represents the sequence pattern (i.e., complex one ) which has to consist other patterns. These
* patterns can be simple or complex ones
* @param patterns a list of patterns which the sequence pattern consists from
*/
case class SeqPattern(patterns: List[Pattern]) extends Pattern
/**
* A class represents the conjunction pattern (i.e., complex one ) which has to consist other patterns. These
* patterns can be simple or complex ones
* @param patterns a list of patterns which the conjunction pattern consists from
*/
case class ConjPattern(patterns: List[Pattern]) extends Pattern
/**
* A class represents the negation pattern. It receives only one pattern as its parameter to perform
* the negation
* @param patterns a list of patterns which the sequence pattern consists from
*/
case class NegPattern(patterns: Pattern) extends Pattern
/**
* A class represents the disjunction pattern (i.e., complex one ) which has to consist other patterns. These
* patterns can be simple or complex ones
* @param patterns a list of patterns which the disjunction pattern consists from
*/
case class DisjPattern(patterns: List[Pattern]) extends Pattern
/**
* A simple event represents the definition or the predicate of a particular simple pattern
* @param simplePattern a simple pattern
* @param predicate the predicate of the simple pattern
*/
case class SimpleEvent(simplePattern: Pattern, predicate: Predicate) extends Event
/**
* A class represents all the definitions of the simple patterns in the operator. Through these definitions,
* the simple patterns (i.e., events ) can be detected
*/
case class CompEvent(complex: List[SimpleEvent]) extends Event
case class Matcher(
private val out: Pipe, in: Pipe,
pattern: Pattern,
events: CompEvent,
mode: String = "skip_till_next_match",
within: Tuple2[Int, String] = (0, "SECONDS")
) extends PigOperator(out) {
/**
* construct the schema of this operator. In general, this operator will not
* change the schema of the previous operator in the chain. It will discard some tuples
* without changing their structures.
*/
override def constructSchema: Option[Schema] = {
/*
* Either the schema was defined or it is None.
*/
schema
}
/**
* Returns the lineage string for this operator.
*
* @return a string representation of the operator.
*/
override def lineageString: String = {
s"""Matcher""" + super.lineageString
}
}
| ksattler/piglet | src/main/scala/dbis/pig/op/Matcher.scala | Scala | apache-2.0 | 3,935 |
package net.paploo.diestats.statistics.util
import net.paploo.diestats.test.SpecTest
class MonoidTest extends SpecTest {
describe("reduce") {
it("should reduce a string") {
val monoid = implicitly[Monoid[String]]
monoid.reduce(Seq("alpha", "beta", "", "gamma")) should === ("alphabetagamma")
}
it("should reduce numbers as summation") {
val monoid = implicitly[Monoid[Int]]
monoid.reduce(Seq(5, -1, 0, 2)) should === (6)
}
}
}
| paploo/DieStats | src/test/scala/net/paploo/diestats/statistics/util/MonoidTest.scala | Scala | bsd-3-clause | 478 |
package gridscale.slurm
import gridscale.effectaside._
import gridscale._
import gridscale.cluster._
import gridscale.local._
object SlurmExampleLocal extends App {
val headNode = LocalHost()
val jobDescription = SLURMJobDescription(command = "/bin/echo hello from $(hostname)", workDirectory = "/homes/jpassera/test_gridscale", partition = Some("short"))
def res(implicit system: Effect[System], ssh: Effect[Local]) = {
val job = submit(headNode, jobDescription)
val s = waitUntilEnded(() ⇒ state(headNode, job))
val out = stdOut(headNode, job)
clean(headNode, job)
(s, out)
}
ClusterInterpreter { intp ⇒
import intp._
println(res)
}
}
| openmole/gridscale | examples/slurm/src/main/scala/gridscale/slurm/SlurmExampleLocal.scala | Scala | agpl-3.0 | 688 |
package highchair.specs
import org.specs._
import com.google.appengine.api.datastore.DatastoreServiceFactory
import com.google.appengine.tools.development.testing._
class DataStoreSpec extends Specification {
val helper = new LocalServiceTestHelper(new LocalDatastoreServiceTestConfig)
implicit val ds = DatastoreServiceFactory.getDatastoreService
implicit val ads = DatastoreServiceFactory.getAsyncDatastoreService
doBeforeSpec { helper.setUp }
doAfterSpec { helper.tearDown }
}
| chrislewis/highchair | spec/src/main/scala/DataStoreSpec.scala | Scala | mit | 498 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.symbols
abstract class CypherType {
def parentType: CypherType
val isAbstract: Boolean = false
def coercibleTo: Set[CypherType] = Set.empty
def parents: Seq[CypherType] = parents(Vector.empty)
private def parents(accumulator: Seq[CypherType]): Seq[CypherType] =
if (this.parentType == this)
accumulator
else
this.parentType.parents(accumulator :+ this.parentType)
/*
Determines if the class or interface represented by this
{@code CypherType} object is either the same as, or is a
supertype of, the class or interface represented by the
specified {@code CypherType} parameter.
*/
def isAssignableFrom(other: CypherType): Boolean =
if (other == this)
true
else if (other.parentType == other)
false
else
isAssignableFrom(other.parentType)
def legacyIteratedType: CypherType = this
def leastUpperBound(other: CypherType): CypherType =
if (this.isAssignableFrom(other)) this
else if (other.isAssignableFrom(this)) other
else parentType leastUpperBound other.parentType
def greatestLowerBound(other: CypherType): Option[CypherType] =
if (this.isAssignableFrom(other)) Some(other)
else if (other.isAssignableFrom(this)) Some(this)
else None
lazy val covariant: TypeSpec = TypeSpec.all constrain this
lazy val invariant: TypeSpec = TypeSpec.exact(this)
lazy val contravariant: TypeSpec = TypeSpec.all leastUpperBounds this
def rewrite(f: CypherType => CypherType) = f(this)
}
| HuangLS/neo4j | community/cypher/frontend-2.3/src/main/scala/org/neo4j/cypher/internal/frontend/v2_3/symbols/CypherType.scala | Scala | apache-2.0 | 2,335 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro
import java.nio.charset.StandardCharsets
import java.util
import java.util.{Date, UUID}
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.jts.geom.Geometry
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.AbstractSimpleFeature.AbstractImmutableSimpleFeature
import org.locationtech.geomesa.features.{ScalaSimpleFeature, SerializationOption}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeOptions
import org.locationtech.geomesa.utils.geotools.{ImmutableFeatureId, SimpleFeatureTypes}
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AvroFeatureSerializerTest extends Specification with LazyLogging {
import SerializationOption._
val options = Seq(
Set.empty[SerializationOption],
// Set(Immutable),
Set(WithUserData)
// Set(Lazy),
// Set(Immutable, WithUserData),
// Set(Lazy, Immutable),
// Set(Lazy, WithUserData),
// Set(Lazy, Immutable, WithUserData)
)
"new AvroFeatureSerializer" should {
def arrayEquals(a: Any, b: Any): MatchResult[Boolean] = {
val aBytes = a.asInstanceOf[Array[Byte]]
val bBytes = b.asInstanceOf[Array[Byte]]
util.Arrays.equals(aBytes, bBytes) must beTrue
}
"correctly deserialize basic features" in {
val spec = "a:Integer,b:Float,c:Double,d:Long,e:UUID,f:String,g:Boolean,dtg:Date,*geom:Point:srid=4326,bytes:Bytes"
val sft = SimpleFeatureTypes.createType("testType", spec)
val sf = new ScalaSimpleFeature(sft, "fakeid")
val userData = Map("key.one" -> java.lang.Boolean.TRUE, "key.two" -> "value.two")
sf.setAttribute("a", "1")
sf.setAttribute("b", "1.0")
sf.setAttribute("c", "5.37")
sf.setAttribute("d", "-100")
sf.setAttribute("e", UUID.randomUUID())
sf.setAttribute("f", "mystring")
sf.setAttribute("g", java.lang.Boolean.FALSE)
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(45.0 49.0)")
sf.setAttribute("bytes", "\\u0000FOOBARBAZ\\u0000\\u4444123".getBytes(StandardCharsets.UTF_16BE))
sf.getUserData.putAll(userData)
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val deserialized = serializer.deserialize(serialized)
deserialized.getID mustEqual sf.getID
deserialized.getAttributes.dropRight(1) mustEqual sf.getAttributes.dropRight(1)
arrayEquals(sf.getAttributes.last, "\\u0000FOOBARBAZ\\u0000\\u4444123".getBytes(StandardCharsets.UTF_16BE))
arrayEquals(deserialized.getAttributes.last, sf.getAttributes.last)
if (opts.withUserData) {
deserialized.getUserData.toMap mustEqual userData
} else {
deserialized.getUserData must beEmpty
}
if (opts.immutable) {
deserialized must beAnInstanceOf[AbstractImmutableSimpleFeature]
deserialized.getIdentifier must beAnInstanceOf[ImmutableFeatureId]
deserialized.setAttribute(0, 2) must throwAn[UnsupportedOperationException]
deserialized.setAttribute("a", 2) must throwAn[UnsupportedOperationException]
deserialized.setAttributes(Array.empty[AnyRef]) must throwAn[UnsupportedOperationException]
deserialized.setAttributes(Seq.empty[AnyRef]) must throwAn[UnsupportedOperationException]
deserialized.getUserData.put("foo", "bar") must throwAn[UnsupportedOperationException]
} else {
deserialized.getUserData.put("foo", "bar")
deserialized.getUserData.get("foo") mustEqual "bar"
}
}
}
"correctly serialize and deserialize different geometries" in {
val spec = "a:LineString,b:Polygon,c:MultiPoint,d:MultiLineString,e:MultiPolygon," +
"f:GeometryCollection,dtg:Date,*geom:Point:srid=4326"
val sftWkb = SimpleFeatureTypes.createType("testTypeWkb", spec)
// use a different name to avoid cached serializers
val sftTwkb = SimpleFeatureTypes.createType("testTypeTwkb", spec)
sftTwkb.getAttributeDescriptors.foreach(_.getUserData.put(AttributeOptions.OptPrecision, "6"))
val sf = new ScalaSimpleFeature(sftWkb, "fakeid")
sf.setAttribute("a", "LINESTRING(0 2, 2 0, 8 6)")
sf.setAttribute("b", "POLYGON((20 10, 30 0, 40 10, 30 20, 20 10))")
sf.setAttribute("c", "MULTIPOINT(0 0, 2 2)")
sf.setAttribute("d", "MULTILINESTRING((0 2, 2 0, 8 6),(0 2, 2 0, 8 6))")
sf.setAttribute("e", "MULTIPOLYGON(((-1 0, 0 1, 1 0, 0 -1, -1 0)), ((-2 6, 1 6, 1 3, -2 3, -2 6)), " +
"((-1 5, 2 5, 2 2, -1 2, -1 5)))")
sf.setAttribute("f", "MULTIPOINT(0 0, 2 2)")
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(55.0 49.0)")
forall(Seq(sftWkb, sftTwkb)) { sft =>
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val deserialized = serializer.deserialize(serialized)
deserialized must not(beNull)
deserialized.getType mustEqual sft
deserialized.getAttributes mustEqual sf.getAttributes
}
}
}
"correctly serialize and deserialize geometries with n dimensions" in {
val spec = "a:LineString,b:Polygon,c:MultiPoint,d:MultiLineString,e:MultiPolygon," +
"f:GeometryCollection,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("testType", spec)
val sf = new ScalaSimpleFeature(sft, "fakeid")
sf.setAttribute("a", "LINESTRING(0 2 0, 2 0 1, 8 6 2)")
sf.setAttribute("b", "POLYGON((20 10 0, 30 0 10, 40 10 10, 30 20 0, 20 10 0))")
sf.setAttribute("c", "MULTIPOINT(0 0 0, 2 2 2)")
sf.setAttribute("d", "MULTILINESTRING((0 2 0, 2 0 1, 8 6 2),(0 2 0, 2 0 0, 8 6 0))")
sf.setAttribute("e", "MULTIPOLYGON(((-1 0 0, 0 1 0, 1 0 0, 0 -1 0, -1 0 0)), ((-2 6 2, 1 6 3, 1 3 3, -2 3 3, -2 6 2)), " +
"((-1 5 0, 2 5 0, 2 2 0, -1 2 0, -1 5 0)))")
sf.setAttribute("f", "MULTIPOINT(0 0 2, 2 2 0)")
sf.setAttribute("geom", "POINT(55.0 49.0 37.0)")
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val deserialized = serializer.deserialize(serialized)
deserialized must not(beNull)
deserialized.getType mustEqual sf.getType
deserialized.getAttributes mustEqual sf.getAttributes
forall(deserialized.getAttributes.zip(sf.getAttributes)) { case (left, right) =>
forall(left.asInstanceOf[Geometry].getCoordinates.zip(right.asInstanceOf[Geometry].getCoordinates)) {
case (c1, c2) => c1.equals3D(c2) must beTrue
}
}
}
}.pendingUntilFixed()
"correctly serialize and deserialize collection types" in {
val spec = "a:Integer,m:Map[String,Double],l:List[Date],dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("testType", spec)
val sf = new ScalaSimpleFeature(sft, "fakeid")
sf.setAttribute("a", "1")
sf.setAttribute("m", Map("test1" -> 1.0, "test2" -> 2.0))
sf.setAttribute("l", List(new Date(100), new Date(200)))
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(45.0 49.0)")
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val deserialized = serializer.deserialize(serialized)
deserialized must not(beNull)
deserialized.getType mustEqual sf.getType
deserialized.getAttributes mustEqual sf.getAttributes
}
}
"serialize maps and lists of bytes" >> {
val spec = "m1:Map[String,Bytes],l:List[Bytes],dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("testType", spec)
val sf = new ScalaSimpleFeature(sft, "fakeid")
sf.setAttribute("m1", Map("a" -> Array(0.toByte, 23.toByte)))
sf.setAttribute("l", List[Array[Byte]](Array(0.toByte, 23.toByte), Array(1.toByte)))
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(45.0 49.0)")
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val deserialized = serializer.deserialize(serialized)
deserialized must not(beNull)
deserialized.getType mustEqual sf.getType
import org.locationtech.geomesa.utils.geotools.Conversions._
arrayEquals(deserialized.get[java.util.Map[String,_]]("m1")("a"), sf.get[java.util.Map[String,_]]("m1")("a"))
arrayEquals(deserialized.get[java.util.List[_]]("l")(0), sf.get[java.util.List[_]]("l")(0))
}
}
"correctly serialize and deserialize null values" in {
val spec = "a:Integer,b:Float,c:Double,d:Long,e:UUID,f:String,g:Boolean,l:List,m:Map," +
"dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("testType", spec)
val sf = new ScalaSimpleFeature(sft, "fakeid")
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val deserialized = serializer.deserialize(serialized)
deserialized must not(beNull)
deserialized.getType mustEqual sf.getType
deserialized.getAttributes.foreach(_ must beNull)
deserialized.getAttributes mustEqual sf.getAttributes
}
}
"correctly serialize and deserialize sub-arrays" in {
val spec = "a:Integer,b:Float,c:Double,d:Long,e:UUID,f:String,g:Boolean,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("testType", spec)
val sf = new ScalaSimpleFeature(sft, "fakeid")
val userData = Map("key.one" -> java.lang.Boolean.TRUE, "key.two" -> "value.two")
sf.setAttribute("a", "1")
sf.setAttribute("b", "1.0")
sf.setAttribute("c", "5.37")
sf.setAttribute("d", "-100")
sf.setAttribute("e", UUID.randomUUID())
sf.setAttribute("f", "mystring")
sf.setAttribute("g", java.lang.Boolean.FALSE)
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(45.0 49.0)")
sf.getUserData.putAll(userData)
forall(options) { opts =>
val serializer = new AvroFeatureSerializer(sft, opts)
val serialized = serializer.serialize(sf)
val extra = Array.fill[Byte](128)(-1)
val bytes = Seq((serialized ++ extra, 0), (extra ++ serialized, extra.length), (extra ++ serialized ++ extra, extra.length))
forall(bytes) { case (array, offset) =>
val deserialized = serializer.deserialize(array, offset, serialized.length)
deserialized must not(beNull)
deserialized.getType mustEqual sf.getType
deserialized.getAttributes mustEqual sf.getAttributes
if (opts.withUserData) {
deserialized.getUserData.toMap mustEqual userData
} else {
deserialized.getUserData must beEmpty
}
if (opts.immutable) {
deserialized must beAnInstanceOf[AbstractImmutableSimpleFeature]
deserialized.getIdentifier must beAnInstanceOf[ImmutableFeatureId]
deserialized.setAttribute(0, 2) must throwAn[UnsupportedOperationException]
deserialized.setAttribute("a", 2) must throwAn[UnsupportedOperationException]
deserialized.setAttributes(Array.empty[AnyRef]) must throwAn[UnsupportedOperationException]
deserialized.setAttributes(Seq.empty[AnyRef]) must throwAn[UnsupportedOperationException]
deserialized.getUserData.put("foo", "bar") must throwAn[UnsupportedOperationException]
} else {
deserialized.getUserData.put("foo", "bar")
deserialized.getUserData.get("foo") mustEqual "bar"
}
}
}
}
"correctly project features" in {
val sft = SimpleFeatureTypes.createType("fullType", "name:String,*geom:Point,dtg:Date")
val projectedSft = SimpleFeatureTypes.createType("projectedType", "*geom:Point")
val sf = new ScalaSimpleFeature(sft, "testFeature")
sf.setAttribute("name", "foo")
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(45.0 49.0)")
val serializer = new AvroFeatureSerializer(sft)
val deserializer = new ProjectingAvroFeatureDeserializer(sft, projectedSft)
val serialized = serializer.serialize(sf)
val deserialized = deserializer.deserialize(serialized)
deserialized.getID mustEqual sf.getID
deserialized.getDefaultGeometry mustEqual sf.getDefaultGeometry
deserialized.getAttributeCount mustEqual 1
}
"correctly project features to larger sfts" in {
val sft = SimpleFeatureTypes.createType("fullType", "name:String,*geom:Point,dtg:Date")
val projectedSft = SimpleFeatureTypes.createType("projectedType",
"name1:String,name2:String,*geom:Point,otherDate:Date")
val sf = new ScalaSimpleFeature(sft, "testFeature")
sf.setAttribute("name", "foo")
sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z")
sf.setAttribute("geom", "POINT(45.0 49.0)")
val serializer = new AvroFeatureSerializer(sft)
val deserializer = new ProjectingAvroFeatureDeserializer(sft, projectedSft)
val serialized = serializer.serialize(sf)
val deserialized = deserializer.deserialize(serialized)
deserialized.getID mustEqual sf.getID
deserialized.getDefaultGeometry mustEqual sf.getDefaultGeometry
deserialized.getAttributeCount mustEqual 4
}
}
}
| ccri/geomesa | geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/AvroFeatureSerializerTest.scala | Scala | apache-2.0 | 14,449 |
package at.logic.gapt.grammars
import at.logic.gapt.expr._
import at.logic.gapt.proofs.Context
import org.specs2.mutable.Specification
class LggTest extends Specification {
"leastGeneralGeneralization" should {
"compute lgg of first-order terms" in {
val ( lgg, substs ) = leastGeneralGeneralization( le"f c c", le"f d d" )
val Seq( x ) = freeVariables( lgg ).toSeq
lgg must_== le"f $x $x"
}
"compute lgg of many-sorted terms" in {
implicit var ctx = Context.empty
ctx += TBase( "Data" )
ctx += TBase( "Tree" )
ctx += hoc"Node: Data>Tree>Tree > Tree"
ctx += hoc"a: Data"
ctx += hoc"t: Tree"
ctx += hoc"s: Tree"
val ( lgg, _ ) = leastGeneralGeneralization( le"Node a t t", le"Node a s s" )
val Seq( x ) = freeVariables( lgg ).toSeq
lgg must_== le"Node a $x $x"
}
"terms with free variables" in {
val a = le"f(x1, c)"
val b = le"f(x1, d)"
val ( lgg, substs ) = leastGeneralGeneralization( a, b )
substs( a )( lgg ) must_== a
substs( b )( lgg ) must_== b
}
}
"leastGeneralGeneralization1" should {
"terms with free variables" in {
val a = le"f(x, c)"
val b = le"f(x, d)"
val ( lgg, substs ) = leastGeneralGeneralization( a, b )
substs( a )( lgg ) must_== a
substs( b )( lgg ) must_== b
}
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/grammars/LggTest.scala | Scala | gpl-3.0 | 1,373 |
package io.pipeline.prediction.jvm
import scala.collection.JavaConverters.asScalaBufferConverter
import scala.collection.JavaConverters.mapAsJavaMapConverter
import org.jpmml.evaluator.Evaluator
import com.netflix.hystrix.HystrixCommand
import com.netflix.hystrix.HystrixCommandGroupKey
import com.netflix.hystrix.HystrixCommandKey
import com.netflix.hystrix.HystrixCommandProperties
import com.netflix.hystrix.HystrixThreadPoolKey
import com.netflix.hystrix.HystrixThreadPoolProperties
class PMMLEvaluationCommand(commandName: String,
namespace: String,
pmmlName: String,
version: String,
modelEvaluator: Evaluator,
inputs: Map[String, Any],
fallback: String,
timeout: Int,
concurrencyPoolSize: Int,
rejectionThreshold: Int)
extends HystrixCommand[String](
HystrixCommand.Setter
.withGroupKey(HystrixCommandGroupKey.Factory.asKey(commandName))
.andCommandKey(HystrixCommandKey.Factory.asKey(commandName))
.andThreadPoolKey(HystrixThreadPoolKey.Factory.asKey(commandName))
.andCommandPropertiesDefaults(
HystrixCommandProperties.Setter()
.withExecutionTimeoutInMilliseconds(timeout)
.withExecutionIsolationStrategy(HystrixCommandProperties.ExecutionIsolationStrategy.SEMAPHORE)
.withExecutionIsolationSemaphoreMaxConcurrentRequests(concurrencyPoolSize)
.withFallbackIsolationSemaphoreMaxConcurrentRequests(rejectionThreshold)
)
.andThreadPoolPropertiesDefaults(
HystrixThreadPoolProperties.Setter()
.withCoreSize(concurrencyPoolSize)
.withQueueSizeRejectionThreshold(rejectionThreshold)
)
)
{
def run(): String = {
try{
val inputFields = modelEvaluator.getInputFields().asScala
val arguments =
( for(inputField <- inputFields)
// The raw value is passed through:
// 1) outlier treatment,
// 2) missing value treatment,
// 3) invalid value treatment
// 4) type conversion
yield (inputField.getName -> inputField.prepare(inputs(inputField.getName.getValue)))
).toMap.asJava
val results = modelEvaluator.evaluate(arguments)
val targetField = modelEvaluator.getTargetFields().asScala(0)
val targetValue = results.get(targetField.getName)
s"""[{'${targetField.getName}': '${targetValue}'}]"""
} catch {
case e: Throwable => {
// System.out.println(e)
throw e
}
}
}
override def getFallback(): String = {
// System.out.println("PMML Evaluator is Down! Fallback!!")
s"""${fallback}"""
}
}
| BrentDorsey/pipeline | prediction.ml/jvm/src/main/scala/io/pipeline/prediction/jvm/PMMLEvaluationCommand.scala | Scala | apache-2.0 | 2,880 |
package net.machinemuse.powersuits.common
import cpw.mods.fml.common.network.IGuiHandler
import net.machinemuse.general.gui.CosmeticGui
import net.machinemuse.general.gui.GuiFieldTinker
import net.machinemuse.general.gui.GuiTinkerTable
import net.machinemuse.general.gui.KeyConfigGui
import net.minecraft.client.Minecraft
import net.minecraft.client.entity.EntityClientPlayerMP
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.stats.AchievementList
import net.minecraft.world.World
import net.machinemuse.numina.scala.OptionCast
import cpw.mods.fml.relauncher.{Side, SideOnly}
/**
* Gui handler for this mod. Mainly just takes an ID according to what was
* passed to player.OpenGUI, and opens the corresponding GUI.
*
* @author MachineMuse
*/
object MPSGuiHandler extends IGuiHandler {
override def getServerGuiElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int, z: Int): AnyRef = {
ID match {
case _ => null
}
}
@SideOnly(Side.CLIENT)
override def getClientGuiElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int, z: Int): AnyRef = {
Minecraft.getMinecraft.thePlayer.addStat(AchievementList.openInventory, 1)
OptionCast[EntityClientPlayerMP](player) map (p =>
ID match {
case 0 => new GuiTinkerTable(p, x, y, z)
case 1 => new KeyConfigGui(p, x, y, z)
case 2 => new GuiFieldTinker(p)
case 3 => new CosmeticGui(p, x, y, z)
case _ => None
}) getOrElse null
}
} | QMXTech/MachineMusePowersuits | src/main/scala/net/machinemuse/powersuits/common/MPSGuiHandler.scala | Scala | bsd-3-clause | 1,502 |
package de.fosd.typechef.featureexpr.sat
/**
* CNFHelper provides several auxiliary functions to determine whether an expression is
* in normal form and to access parts of that normal form
*/
object CNFHelper {
//for testing
def isCNF(expr: SATFeatureExpr) = isTrueFalse(expr) || isClause(expr) || (expr match {
case And(clauses) => clauses.forall(isClause(_))
case e => false
})
def isClauseOrTF(expr: SATFeatureExpr) = isTrueFalse(expr) || isClause(expr)
def isClause(expr: SATFeatureExpr) = isLiteral(expr) || (expr match {
case Or(literals) => literals.forall(isLiteral(_))
case _ => false
})
def isLiteral(expr: SATFeatureExpr) = expr match {
case x: DefinedExpr => true
case Not(DefinedExpr(_)) => true
case _ => false
}
def isLiteralExternal(expr: SATFeatureExpr) = expr match {
case x: DefinedExternal => true
case Not(x: DefinedExternal) => true
case _ => false
}
def isTrueFalse(expr: SATFeatureExpr) = expr match {
case True => true
case False => true
case _ => false
}
def getCNFClauses(cnfExpr: SATFeatureExpr): Traversable[SATFeatureExpr /*Clause*/ ] = cnfExpr match {
case And(clauses) => clauses
case e => Set(e)
}
def getLiterals(orClause: SATFeatureExpr): Traversable[SATFeatureExpr /*Literal*/ ] = orClause match {
case Or(literals) => literals
case e => Set(e)
}
def getDefinedExprs(orClause: SATFeatureExpr): Set[DefinedExpr] = orClause match {
case Or(literals) => literals.map(getDefinedExpr(_)).foldLeft[Set[DefinedExpr]](Set())(_ + _)
case e => Set(getDefinedExpr(e))
}
def getDefinedExpr(literal: SATFeatureExpr): DefinedExpr = literal match {
case x: DefinedExpr => x
case Not(x: DefinedExpr) => x
case _ => throw new NoLiteralException(literal)
}
}
| ckaestne/TypeChef | FeatureExprLib/src/main/scala/de/fosd/typechef/featureexpr/sat/CNFHelper.scala | Scala | lgpl-3.0 | 1,944 |
/*
* Copyright 2011-2015 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mongodb
package record
import BsonDSL._
import json.JObject
import field._
import net.liftweb.record.field._
import java.util.{Calendar, Date, UUID}
import java.util.regex.Pattern
import org.bson.types.ObjectId
import org.specs2.mutable.Specification
package queryexamplesfixtures {
class Person private () extends MongoRecord[Person] with ObjectIdPk[Person] {
def meta = Person
object name extends StringField(this, 100)
object birthDate extends DateField(this)
object childId extends UUIDField(this)
object petId extends ObjectIdField(this) {
override def optional_? = true
}
}
object Person extends Person with MongoMetaRecord[Person] {
// index name
createIndex(("name" -> 1))
// implicit formats already exists
def findAllBornAfter(dt: Date) = findAll(("birthDate" -> ("$gt" -> dt)))
}
}
class QueryExamplesSpec extends Specification with MongoTestKit {
"QueryExamples Specification".title
import queryexamplesfixtures._
"Query examples" in {
checkMongoIsRunning
val fredsBirthDate = Calendar.getInstance
fredsBirthDate.set(1970, 1, 1, 19, 0)
val wilmasBirthDate = Calendar.getInstance
wilmasBirthDate.set(1971, 8, 30, 19, 0)
val barneysBirthDate = Calendar.getInstance
barneysBirthDate.set(1972, 8, 30, 19, 0)
val bettysBirthDate = Calendar.getInstance
bettysBirthDate.set(1973, 8, 30, 19, 0)
val dinoId = ObjectId.get
val pebblesId = UUID.randomUUID
val bammbammId = UUID.randomUUID
val fred = Person.createRecord
.name("Flinstone, Fred")
.birthDate(fredsBirthDate.getTime)
.childId(pebblesId)
.petId(dinoId)
.save()
val wilma = Person.createRecord
.name("Flinstone, Wilma")
.birthDate(wilmasBirthDate.getTime)
.childId(pebblesId)
.petId(dinoId)
.save()
val barney = Person.createRecord
.name("Rubble, Barney")
.birthDate(barneysBirthDate.getTime)
.childId(bammbammId)
.save()
val betty = Person.createRecord
.name("Rubble, Betty")
.birthDate(bettysBirthDate.getTime)
.childId(bammbammId)
.save()
val flinstonesIds = List(fred.id.get, wilma.id.get)
val rubblesIds = List(barney.id.get, betty.id.get)
// query for Bamm-Bamm's parents (UUID)
val pebblesParents = Person.findAll(("childId" -> bammbammId))
pebblesParents.length must_== 2
pebblesParents.map(_.id.get).filterNot(rubblesIds.contains(_)) must_== List()
// query for Bamm-Bamm's and Pebbles' parents using List[UUID]
val pebblesAndBammBammsParents = Person.findAll(("childId" -> ("$in" -> List(pebblesId, bammbammId))))
pebblesAndBammBammsParents.length must_== 4
// query for Dino's owners (ObjectId)
val dinosOwners = Person.findAll(("petId" -> dinoId))
dinosOwners.length must_== 2
dinosOwners.map(_.id.get).filterNot(flinstonesIds.contains(_)) must_== List()
// query for the Rubbles using a Regex
val rubbles = Person.findAll(("name" -> "^Rubble".r))
rubbles.length must_== 2
rubbles.map(_.id.get).filterNot(rubblesIds.contains(_)) must_== List()
// query for the Flinstones using a Pattern
val flinstones = Person.findAll(("name" -> Pattern.compile("^flinst", Pattern.CASE_INSENSITIVE)))
flinstones.length must_== 2
flinstones.map(_.id.get).filterNot(flinstonesIds.contains(_)) must_== List()
// query for the Flinstones using a List[ObjectId]
val flinstones2 = Person.findAll(("_id" -> ("$in" -> flinstonesIds)))
flinstones2.length must_== 2
flinstones2.map(_.id.get).filterNot(flinstonesIds.contains(_)) must_== List()
// query using Dates
implicit val formats = Person.formats // this is needed for Dates
val qryDate = Calendar.getInstance
qryDate.set(1971, 1, 1, 19, 0)
val people = Person.findAll(("birthDate" -> ("$gt" -> qryDate.getTime)))
people.length must_== 3
people.map(_.id.get).filterNot(List(wilma.id.get, barney.id.get, betty.id.get).contains(_)) must_== List()
// you do not need to define the implicit formats val if you write your query in the MongoMetaRecord object.
val people2 = Person.findAllBornAfter(qryDate.getTime)
people2.length must_== 3
people2.map(_.id.get).filterNot(List(wilma.id.get, barney.id.get, betty.id.get).contains(_)) must_== List()
// query with Sort
val people3 = Person.findAll(JObject(Nil), ("birthDate" -> -1))
people3.length must_== 4
people3.map(_.id.get) must_== List(betty.id.get, barney.id.get, wilma.id.get, fred.id.get)
val people4 = Person.findAll(JObject(Nil), ("birthDate" -> 1))
people4.length must_== 4
people4.map(_.id.get) must_== List(fred.id.get, wilma.id.get, barney.id.get, betty.id.get)
}
}
| lift/framework | persistence/mongodb-record/src/test/scala/net/liftweb/mongodb/record/QueryExamplesSpec.scala | Scala | apache-2.0 | 5,416 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import scala.concurrent.duration.TimeUnit
import monix.execution.{Cancelable, Features, Scheduler, UncaughtExceptionReporter, ExecutionModel => ExecModel}
import scala.concurrent.ExecutionContext
/** The `TracingScheduler` is a [[monix.execution.Scheduler Scheduler]]
* implementation that wraps another `Scheduler` reference, but
* that propagates the [[monix.execution.misc.Local.Context Local.Context]]
* on async execution.
*
* @param underlying the [[monix.execution.Scheduler Scheduler]]
* in charge of the actual execution and scheduling
*/
final class TracingScheduler private (underlying: Scheduler) extends TracingScheduler.Base(underlying) {
override def withExecutionModel(em: ExecModel): TracingScheduler =
new TracingScheduler(underlying.withExecutionModel(em))
override def withUncaughtExceptionReporter(r: UncaughtExceptionReporter): Scheduler =
new TracingScheduler(underlying.withUncaughtExceptionReporter(r))
}
object TracingScheduler {
/** Builds a [[TracingScheduler]] instance, wrapping the
* `underlying` scheduler given.
*/
def apply(underlying: ExecutionContext): TracingScheduler =
underlying match {
case ref: TracingScheduler => ref
case ref: Scheduler => new TracingScheduler(ref)
case _ => new TracingScheduler(Scheduler(underlying))
}
/** Common implementation between [[TracingScheduler]]
* and [[TracingSchedulerService]].
*/
private[execution] abstract class Base(underlying: Scheduler) extends Scheduler with BatchingScheduler {
override final def executeAsync(r: Runnable): Unit =
underlying.execute(new TracingRunnable(r))
override final def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable): Cancelable =
underlying.scheduleOnce(initialDelay, unit, new TracingRunnable(r))
override final def scheduleWithFixedDelay(initialDelay: Long, delay: Long, unit: TimeUnit, r: Runnable) =
underlying.scheduleWithFixedDelay(initialDelay, delay, unit, new TracingRunnable(r))
override final def scheduleAtFixedRate(initialDelay: Long, period: Long, unit: TimeUnit, r: Runnable) =
underlying.scheduleAtFixedRate(initialDelay, period, unit, new TracingRunnable(r))
override final def reportFailure(t: Throwable): Unit =
underlying.reportFailure(t)
override final def clockRealTime(unit: TimeUnit): Long =
underlying.clockRealTime(unit)
override def clockMonotonic(unit: TimeUnit): Long =
underlying.clockMonotonic(unit)
override final def executionModel: ExecModel =
underlying.executionModel
override final val features: Features =
underlying.features + Scheduler.TRACING
}
}
| monixio/monix | monix-execution/shared/src/main/scala/monix/execution/schedulers/TracingScheduler.scala | Scala | apache-2.0 | 3,402 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.utils
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.{ObjectArrayTypeInfo, RowTypeInfo, TupleTypeInfo}
import org.apache.flink.table.api.Types
import org.apache.flink.table.functions.AggregateFunction
import org.apache.flink.types.Row
import java.lang.{Float => JFloat, Integer => JInt, Long => JLong}
import java.util
/**
* User-defined aggregation function to compute the top 10 most visited Int IDs
* with the highest Float values. We use an Array[Tuple2[Int, Float]] as accumulator to
* store the top 10 entries.
*
* The result is emitted as Array as well.
*/
class Top10 extends AggregateFunction[Array[JTuple2[JInt, JFloat]], Array[JTuple2[JInt, JFloat]]] {
@Override
def createAccumulator(): Array[JTuple2[JInt, JFloat]] = {
new Array[JTuple2[JInt, JFloat]](10)
}
/**
* Adds a new entry and count to the top 10 entries if necessary.
*
* @param acc The current top 10
* @param id The ID
* @param value The value for the ID
*/
def accumulate(acc: Array[JTuple2[JInt, JFloat]], id: Int, value: Float) {
var i = 9
var skipped = 0
// skip positions without records
while (i >= 0 && acc(i) == null) {
if (acc(i) == null) {
// continue until first entry in the top10 list
i -= 1
}
}
// backward linear search for insert position
while (i >= 0 && value > acc(i).f1) {
// check next entry
skipped += 1
i -= 1
}
// set if necessary
if (i < 9) {
// move entries with lower count by one position
if (i < 8 && skipped > 0) {
System.arraycopy(acc, i + 1, acc, i + 2, skipped)
}
// add ID to top10 list
acc(i + 1) = JTuple2.of(id, value)
}
}
override def getValue(acc: Array[JTuple2[JInt, JFloat]]): Array[JTuple2[JInt, JFloat]] = acc
def resetAccumulator(acc: Array[JTuple2[JInt, JFloat]]): Unit = {
util.Arrays.fill(acc.asInstanceOf[Array[Object]], null)
}
def merge(
acc: Array[JTuple2[JInt, JFloat]],
its: java.lang.Iterable[Array[JTuple2[JInt, JFloat]]]): Unit = {
val it = its.iterator()
while (it.hasNext) {
val acc2 = it.next()
var i = 0
var i2 = 0
while (i < 10 && i2 < 10 && acc2(i2) != null) {
if (acc(i) == null) {
// copy to empty place
acc(i) = acc2(i2)
i += 1
i2 += 1
} else if (acc(i).f1.asInstanceOf[Float] >= acc2(i2).f1.asInstanceOf[Float]) {
// forward to next
i += 1
} else {
// shift and copy
System.arraycopy(acc, i, acc, i + 1, 9 - i)
acc(i) = acc2(i2)
i += 1
i2 += 1
}
}
}
}
override def getAccumulatorType: TypeInformation[Array[JTuple2[JInt, JFloat]]] = {
ObjectArrayTypeInfo.getInfoFor(new TupleTypeInfo[JTuple2[JInt, JFloat]](Types.INT, Types.FLOAT))
}
override def getResultType: TypeInformation[Array[JTuple2[JInt, JFloat]]] = {
ObjectArrayTypeInfo.getInfoFor(new TupleTypeInfo[JTuple2[JInt, JFloat]](Types.INT, Types.FLOAT))
}
}
case class NonMergableCountAcc(var count: Long)
class NonMergableCount extends AggregateFunction[Long, NonMergableCountAcc] {
def accumulate(acc: NonMergableCountAcc, value: Any): Unit = {
if (null != value) {
acc.count = acc.count + 1
}
}
def resetAccumulator(acc: NonMergableCountAcc): Unit = {
acc.count = 0
}
override def createAccumulator(): NonMergableCountAcc = NonMergableCountAcc(0)
override def getValue(acc: NonMergableCountAcc): Long = acc.count
}
case class CountMinMaxAcc(var count: Long, var min: Int, var max: Int)
class CountMinMax extends AggregateFunction[Row, CountMinMaxAcc] {
def accumulate(acc: CountMinMaxAcc, value: Int): Unit = {
if (acc.count == 0 || value < acc.min) {
acc.min = value
}
if (acc.count == 0 || value > acc.max) {
acc.max = value
}
acc.count += 1
}
def resetAccumulator(acc: CountMinMaxAcc): Unit = {
acc.count = 0
acc.min = 0
acc.max = 0
}
override def createAccumulator(): CountMinMaxAcc = CountMinMaxAcc(0L, 0, 0)
override def getValue(acc: CountMinMaxAcc): Row = {
val min: Int = if (acc.count > 0) {
acc.min
} else {
null.asInstanceOf[Int]
}
val max: Int = if (acc.count > 0) {
acc.max
} else {
null.asInstanceOf[Int]
}
Row.of(JLong.valueOf(acc.count), JInt.valueOf(min), JInt.valueOf(max))
}
override def getResultType: TypeInformation[Row] = {
new RowTypeInfo(Types.LONG, Types.INT, Types.INT)
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/utils/UserDefinedAggFunctions.scala | Scala | apache-2.0 | 5,550 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.util
import com.johnsnowlabs.nlp.util.io.ResourceHelper
import org.apache.hadoop.fs.FileSystem
object ConfigHelper {
private lazy val sparkSession = ResourceHelper.spark
// Configures s3 bucket where pretrained models are stored
val pretrainedS3BucketKey = "spark.jsl.settings.pretrained.s3_bucket"
// Configures s3 bucket where community pretrained models are stored
val pretrainedCommunityS3BucketKey = "spark.jsl.settings.pretrained.s3_community_bucket"
// Configures s3 path where pretrained models are stored
val pretrainedS3PathKey = "spark.jsl.settings.pretrained.s3_path"
// Configures cache folder where to cache pretrained models
val pretrainedCacheFolder = "spark.jsl.settings.pretrained.cache_folder"
// Configures log folder where to store annotator logs using OutputHelper
val annotatorLogFolder = "spark.jsl.settings.annotator.log_folder"
// Stores credentials for AWS S3 private models
val awsCredentials = "spark.jsl.settings.pretrained.credentials"
val accessKeyId: String = awsCredentials + ".access_key_id"
val secretAccessKey: String = awsCredentials + ".secret_access_key"
val sessionToken: String = awsCredentials + ".session_token"
val awsProfileName: String = awsCredentials + ".aws_profile_name"
val awsRegion: String = awsCredentials + ".aws.region"
val s3SocketTimeout = "spark.jsl.settings.pretrained.s3_socket_timeout"
// Stores info for AWS S3 logging output when training models
val awsLogCredentials = "spark.jsl.settings.aws.credentials"
val awsExternalAccessKeyId: String = awsLogCredentials + ".access_key_id"
val awsExternalSecretAccessKey: String = awsLogCredentials + ".secret_access_key"
val awsExternalSessionToken: String = awsLogCredentials + ".session_token"
val awsExternalProfileName: String = awsLogCredentials + ".aws_profile_name"
val awsExternalS3BucketKey = "spark.jsl.settings.aws.s3_bucket"
val awsExternalRegion = "spark.jsl.settings.aws.region"
val storageTmpDir = "spark.jsl.settings.storage.cluster_tmp_dir"
val serializationMode = "spark.jsl.settings.annotatorSerializationFormat"
val useBroadcast = "spark.jsl.settings.useBroadcastForFeatures"
def getConfigValueOrElse(property: String, defaultValue: String): String = {
sparkSession.conf.get(property, defaultValue)
}
def getFileSystem: FileSystem = {
FileSystem.get(sparkSession.sparkContext.hadoopConfiguration)
}
def getHadoopTmpDir: String = {
sparkSession.sparkContext.hadoopConfiguration.get("hadoop.tmp.dir")
}
}
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/util/ConfigHelper.scala | Scala | apache-2.0 | 3,159 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.ws.ahc
import akka.util.ByteString
import play.shaded.ahc.org.asynchttpclient.{ Response => AHCResponse }
import play.api.libs.json.JsValue
import play.api.libs.ws._
import scala.xml.Elem
/**
* A WS HTTP Response backed by an AsyncHttpClient response.
*
* @param underlying
*/
case class AhcWSResponse(underlying: StandaloneAhcWSResponse) extends WSResponse {
def this(ahcResponse: AHCResponse) = {
this(StandaloneAhcWSResponse(ahcResponse))
}
/**
* The response body as String.
*/
override def body: String = underlying.body
/**
* The response body as Xml.
*/
override lazy val xml: Elem = underlying.xml
/**
* Return the current headers of the request being constructed
*/
override def allHeaders: Map[String, Seq[String]] = underlying.allHeaders
/**
* Get the underlying response object, i.e. play.shaded.ahc.org.asynchttpclient.Response
*
* {{{
* val ahcResponse = response.underlying[play.shaded.ahc.org.asynchttpclient.Response]
* }}}
*/
override def underlying[T]: T = underlying.underlying[T]
/**
* The response status code.
*/
override def status: Int = underlying.status
/**
* The response status message.
*/
override def statusText: String = underlying.statusText
/**
* Get a response header.
*/
override def header(key: String): Option[String] = underlying.header(key)
/**
* Get all the cookies.
*/
override def cookies: Seq[WSCookie] = underlying.cookies
/**
* Get only one cookie, using the cookie name.
*/
override def cookie(name: String): Option[WSCookie] = underlying.cookie(name)
/**
* The response body as Json.
*/
override def json: JsValue = underlying.json
/**
* The response body as a byte string.
*/
override def bodyAsBytes: ByteString = underlying.bodyAsBytes
}
| hagl/playframework | framework/src/play-ahc-ws/src/main/scala/play/api/libs/ws/ahc/AhcWSResponse.scala | Scala | apache-2.0 | 1,937 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.rendering.actor
import scalismo.common.DiscreteField.ScalarVolumeMeshField
import scalismo.geometry._3D
import scalismo.mesh.TetrahedralMesh
import scalismo.ui.model._
import scalismo.ui.model.capabilities.Transformable
import scalismo.ui.model.properties._
import scalismo.ui.rendering.Caches
import scalismo.ui.rendering.Caches.FastCachingTetrahedralMesh
import scalismo.ui.rendering.actor.TetrahedralActor.TetrahedralRenderable
import scalismo.ui.rendering.actor.mixin._
import scalismo.ui.rendering.util.VtkUtil
import scalismo.ui.view.{ViewportPanel, ViewportPanel2D, ViewportPanel3D}
import scalismo.utils.{TetrahedralMeshConversion}
import vtk.vtkUnstructuredGrid
object TetrahedralMeshActor extends SimpleActorsFactory[TetrahedralMeshNode] {
override def actorsFor(renderable: TetrahedralMeshNode, viewport: ViewportPanel): Option[Actors] = {
viewport match {
case _: ViewportPanel3D => Some(new TetrahedralMeshActor3D(renderable))
case _2d: ViewportPanel2D => Some(new TetrahedralMeshActor2D(renderable, _2d))
}
}
}
object ScalarTetrahedralMeshFieldActor extends SimpleActorsFactory[ScalarTetrahedralMeshFieldNode] {
override def actorsFor(renderable: ScalarTetrahedralMeshFieldNode, viewport: ViewportPanel): Option[Actors] = {
viewport match {
case _: ViewportPanel3D => Some(new ScalarTetrahedralMeshFieldActor3D(renderable))
case _2d: ViewportPanel2D => Some(new ScalarTetrahedralMeshFieldActor2D(renderable, _2d))
}
}
}
object TetrahedralActor {
trait TetrahedralRenderable {
type MeshType
def opacity: OpacityProperty
def lineWidth: LineWidthProperty
def mesh: MeshType
def node: SceneNode
}
private[actor] object TetrahedralRenderable {
class TetrahedralMeshRenderable(override val node: TetrahedralMeshNode) extends TetrahedralRenderable {
type MeshType = TetrahedralMesh[_3D]
override def mesh: TetrahedralMesh[_3D] = node.transformedSource
override def opacity: OpacityProperty = node.opacity
override def lineWidth: LineWidthProperty = node.lineWidth
def color: ColorProperty = node.color
}
class ScalarTetrahedralMeshFieldRenderable(override val node: ScalarTetrahedralMeshFieldNode)
extends TetrahedralRenderable {
type MeshType = TetrahedralMesh[_3D]
override def opacity: OpacityProperty = node.opacity
override def lineWidth: LineWidthProperty = node.lineWidth
override def mesh: TetrahedralMesh[_3D] = field.mesh
def scalarRange: ScalarRangeProperty = node.scalarRange
def field: ScalarVolumeMeshField[Float] = node.transformedSource
}
def apply(source: TetrahedralMeshNode): TetrahedralMeshRenderable = new TetrahedralMeshRenderable(source)
def apply(source: ScalarTetrahedralMeshFieldNode): ScalarTetrahedralMeshFieldRenderable =
new ScalarTetrahedralMeshFieldRenderable(source)
}
}
trait TetrahedralActor[R <: TetrahedralRenderable] extends SingleDataSetActor with ActorOpacity with ActorSceneNode {
def renderable: R
override def opacity: OpacityProperty = renderable.opacity
override def sceneNode: SceneNode = renderable.node
protected def meshToUnstructuredGrid(template: Option[vtkUnstructuredGrid]): vtkUnstructuredGrid
protected var unstructuredgrid: vtkUnstructuredGrid = meshToUnstructuredGrid(None)
// this is invoked from within the rerender method, if the geometry has changed.
protected def onGeometryChanged(): Unit
protected def rerender(geometryChanged: Boolean): Unit = {
if (geometryChanged) {
unstructuredgrid = meshToUnstructuredGrid(None)
onGeometryChanged()
}
actorChanged(geometryChanged)
}
protected def onInstantiated(): Unit = {}
//FIXME: pick control -- this should probably go into a trait or something.
renderable.node match {
case p: HasPickable =>
SetPickable(if (p.pickable.value) 1 else 0)
listenTo(p.pickable)
reactions += {
case NodeProperty.event.PropertyChanged(s) if s == p.pickable =>
SetPickable(if (p.pickable.value) 1 else 0)
}
case _ =>
}
onInstantiated()
rerender(geometryChanged = true)
listenTo(renderable.node)
reactions += {
case Transformable.event.GeometryChanged(_) => rerender(geometryChanged = true)
}
}
trait TetrahedralMeshActor extends TetrahedralActor[TetrahedralRenderable.TetrahedralMeshRenderable] with ActorColor {
override def renderable: TetrahedralRenderable.TetrahedralMeshRenderable
override def color: ColorProperty = renderable.color
override protected def meshToUnstructuredGrid(template: Option[vtkUnstructuredGrid]): vtkUnstructuredGrid = {
Caches.TetrahedralMeshCache.getOrCreate(
FastCachingTetrahedralMesh(renderable.mesh),
TetrahedralMeshConversion.tetrahedralMeshToVTKUnstructuredGrid(renderable.mesh, template)
)
}
}
trait TetrahedralMeshScalarFieldActor
extends TetrahedralActor[TetrahedralRenderable.ScalarTetrahedralMeshFieldRenderable]
with ActorScalarRange {
override def renderable: TetrahedralRenderable.ScalarTetrahedralMeshFieldRenderable
override def scalarRange: ScalarRangeProperty = renderable.scalarRange
override protected def meshToUnstructuredGrid(template: Option[vtkUnstructuredGrid]): vtkUnstructuredGrid = {
Caches.ScalarTetrahedralMeshFieldCache
.getOrCreate(renderable.field,
TetrahedralMeshConversion.scalarVolumeMeshFieldToVtkUnstructuredGrid(renderable.field, template))
}
}
abstract class TetrahedralActor3D[R <: TetrahedralRenderable](override val renderable: R) extends TetrahedralActor[R] {
override protected def onInstantiated(): Unit = {
mapper.SetInputData(unstructuredgrid)
}
override protected def onGeometryChanged(): Unit = {
mapper.SetInputData(unstructuredgrid)
}
}
abstract class TetrahedralActor2D[R <: TetrahedralRenderable](override val renderable: R, viewport: ViewportPanel2D)
extends SlicingActor(viewport)
with TetrahedralActor[R]
with ActorLineWidth {
override def lineWidth: LineWidthProperty = renderable.lineWidth
override protected def onSlicingPositionChanged(): Unit = rerender(geometryChanged = false)
override protected def onGeometryChanged(): Unit = {
planeCutter.SetInputData(unstructuredgrid)
planeCutter.Modified()
}
override protected def sourceBoundingBox: BoundingBox = VtkUtil.bounds2BoundingBox(unstructuredgrid.GetBounds())
}
class TetrahedralMeshActor3D(node: TetrahedralMeshNode)
extends TetrahedralActor3D(TetrahedralRenderable(node))
with TetrahedralMeshActor
class TetrahedralMeshActor2D(node: TetrahedralMeshNode, viewport: ViewportPanel2D)
extends TetrahedralActor2D(TetrahedralRenderable(node), viewport)
with TetrahedralMeshActor
class ScalarTetrahedralMeshFieldActor3D(node: ScalarTetrahedralMeshFieldNode)
extends TetrahedralActor3D(TetrahedralRenderable(node))
with TetrahedralMeshScalarFieldActor
class ScalarTetrahedralMeshFieldActor2D(node: ScalarTetrahedralMeshFieldNode, viewport: ViewportPanel2D)
extends TetrahedralActor2D(TetrahedralRenderable(node), viewport)
with TetrahedralMeshScalarFieldActor
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/rendering/actor/TetrahedralActor.scala | Scala | gpl-3.0 | 8,699 |
package io.getquill.context.spark
import io.getquill.Spec
case class Person(name: String, age: Int)
case class Couple(her: String, him: String)
class PeopleJdbcSpec extends Spec {
val context = io.getquill.context.sql.testContext
import testContext._
import sqlContext.implicits._
val couples = liftQuery {
Seq(
Couple("Alex", "Bert"),
Couple("Cora", "Drew"),
Couple("Edna", "Fred")
).toDS
}
val people = liftQuery {
Seq(
Person("Alex", 60),
Person("Bert", 55),
Person("Cora", 33),
Person("Drew", 31),
Person("Edna", 21),
Person("Fred", 60)
).toDS
}
"Example 1 - differences" in {
val q =
quote {
for {
c <- couples.distinct
w <- people.distinct
m <- people.distinct if (c.her == w.name && c.him == m.name && w.age > m.age)
} yield {
(w.name, w.age - m.age)
}
}
testContext.run(q).collect.toList.sorted mustEqual
List(("Alex", 5), ("Cora", 2))
}
"Example 1 - differences with explicit join" in {
val q =
quote {
for {
c <- couples
w <- people.join(w => c.her == w.name)
m <- people.join(m => c.him == m.name) if (w.age > m.age)
} yield {
(w.name, w.age - m.age)
}
}
testContext.run(q).collect.toList.sorted mustEqual
List(("Alex", 5), ("Cora", 2))
}
"Example 2 - range simple" in {
val rangeSimple = quote {
(a: Int, b: Int) =>
for {
u <- people if (a <= u.age && u.age < b)
} yield {
u
}
}
testContext.run(rangeSimple(30, 40)).collect.toList mustEqual
List(Person("Cora", 33), Person("Drew", 31))
}
val satisfies =
quote {
(p: Int => Boolean) =>
for {
u <- people if (p(u.age))
} yield {
u
}
}
"Example 3 - satisfies" in {
testContext.run(satisfies((x: Int) => 20 <= x && x < 30)).collect.toList mustEqual
List(Person("Edna", 21))
}
"Example 4 - satisfies" in {
testContext.run(satisfies((x: Int) => x % 2 == 0)).collect.toList mustEqual
List(Person("Alex", 60), Person("Fred", 60))
}
"Example 5 - compose" in {
val q = {
val range = quote {
(a: Int, b: Int) =>
for {
u <- people if (a <= u.age && u.age < b)
} yield {
u
}
}
val ageFromName = quote {
(s: String) =>
for {
u <- people if (s == u.name)
} yield {
u.age
}
}
quote {
(s: String, t: String) =>
for {
a <- ageFromName(s)
b <- ageFromName(t)
r <- range(a, b)
} yield {
r
}
}
}
testContext.run(q("Drew", "Bert")).collect.toList mustEqual
List(Person("Cora", 33), Person("Drew", 31))
}
"Distinct" - {
"simple distinct" in {
val q =
quote {
couples.distinct
}
testContext.run(q).collect.toList mustEqual
List(Couple("Alex", "Bert"), Couple("Cora", "Drew"), Couple("Edna", "Fred"))
}
"complex distinct" in {
val q =
quote {
for {
c <- couples.distinct
w <- people.distinct.join(w => c.her == w.name)
m <- people.distinct.join(m => c.him == m.name) if (w.age > m.age)
} yield {
(w.name, w.age - m.age)
}
}
testContext.run(q).collect.toList.sorted mustEqual
List(("Alex", 5), ("Cora", 2))
}
"should throw Exception" in {
val q =
""" quote {
for {
c <- couples
w <- people.join(w => c.her == w.name).distinct
m <- people.join(m => c.him == m.name) if (w.age > m.age)
} yield {
(w.name, w.age - m.age)
}
}""" mustNot compile
}
}
"Nested" - {
"simple nested" in {
val q =
quote {
couples.nested
}
testContext.run(q.dynamic).collect.toList mustEqual
List(Couple("Alex", "Bert"), Couple("Cora", "Drew"), Couple("Edna", "Fred"))
}
"complex distinct" in {
val q =
quote {
for {
c <- couples.nested
w <- people.nested.join(w => c.her == w.name)
m <- people.nested.join(m => c.him == m.name) if (w.age > m.age)
} yield {
(w.name, w.age - m.age)
}
}
testContext.run(q).collect.toList.sorted mustEqual
List(("Alex", 5), ("Cora", 2))
}
"should throw Exception" in {
val q =
""" quote {
for {
c <- couples
w <- people.join(w => c.her == w.name).nested
m <- people.join(m => c.him == m.name) if (w.age > m.age)
} yield {
(w.name, w.age - m.age)
}
}""" mustNot compile
}
}
}
| getquill/quill | quill-spark/src/test/scala/io/getquill/context/spark/PeopleSparkSpec.scala | Scala | apache-2.0 | 5,007 |
package com.taig.tmpltr.engine.html.property
import com.taig.tmpltr.Property
trait a
{
class target( target: String ) extends Property( target )
object target
{
object blank extends target( "_blank" )
object parent extends target( "_parent" )
object self extends target( "_self" )
object top extends target( "_top" )
class frame( name: String ) extends target( name )
}
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/property/a.scala | Scala | mit | 387 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, Types}
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils}
import org.apache.spark.sql.types._
private object PostgresDialect extends JdbcDialect {
override def canHandle(url: String): Boolean = url.startsWith("jdbc:postgresql")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
if (sqlType == Types.REAL) {
Some(FloatType)
} else if (sqlType == Types.SMALLINT) {
Some(ShortType)
} else if (sqlType == Types.BIT && typeName.equals("bit") && size != 1) {
Some(BinaryType)
} else if (sqlType == Types.OTHER) {
Some(StringType)
} else if (sqlType == Types.ARRAY) {
val scale = md.build.getLong("scale").toInt
// postgres array type names start with underscore
toCatalystType(typeName.drop(1), size, scale).map(ArrayType(_))
} else None
}
private def toCatalystType(
typeName: String,
precision: Int,
scale: Int): Option[DataType] = typeName match {
case "bool" => Some(BooleanType)
case "bit" => Some(BinaryType)
case "int2" => Some(ShortType)
case "int4" => Some(IntegerType)
case "int8" | "oid" => Some(LongType)
case "float4" => Some(FloatType)
case "money" | "float8" => Some(DoubleType)
case "text" | "varchar" | "char" | "cidr" | "inet" | "json" | "jsonb" | "uuid" =>
Some(StringType)
case "bytea" => Some(BinaryType)
case "timestamp" | "timestamptz" | "time" | "timetz" => Some(TimestampType)
case "date" => Some(DateType)
case "numeric" | "decimal" if precision > 0 => Some(DecimalType.bounded(precision, scale))
case "numeric" | "decimal" =>
// SPARK-26538: handle numeric without explicit precision and scale.
Some(DecimalType. SYSTEM_DEFAULT)
case _ => None
}
override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
case StringType => Some(JdbcType("TEXT", Types.CHAR))
case BinaryType => Some(JdbcType("BYTEA", Types.BINARY))
case BooleanType => Some(JdbcType("BOOLEAN", Types.BOOLEAN))
case FloatType => Some(JdbcType("FLOAT4", Types.FLOAT))
case DoubleType => Some(JdbcType("FLOAT8", Types.DOUBLE))
case ShortType | ByteType => Some(JdbcType("SMALLINT", Types.SMALLINT))
case t: DecimalType => Some(
JdbcType(s"NUMERIC(${t.precision},${t.scale})", java.sql.Types.NUMERIC))
case ArrayType(et, _) if et.isInstanceOf[AtomicType] =>
getJDBCType(et).map(_.databaseTypeDefinition)
.orElse(JdbcUtils.getCommonJDBCType(et).map(_.databaseTypeDefinition))
.map(typeName => JdbcType(s"$typeName[]", java.sql.Types.ARRAY))
case _ => None
}
override def getTableExistsQuery(table: String): String = {
s"SELECT 1 FROM $table LIMIT 1"
}
override def isCascadingTruncateTable(): Option[Boolean] = Some(false)
/**
* The SQL query used to truncate a table. For Postgres, the default behaviour is to
* also truncate any descendant tables. As this is a (possibly unwanted) side-effect,
* the Postgres dialect adds 'ONLY' to truncate only the table in question
* @param table The table to truncate
* @param cascade Whether or not to cascade the truncation. Default value is the value of
* isCascadingTruncateTable(). Cascading a truncation will truncate tables
* with a foreign key relationship to the target table. However, it will not
* truncate tables with an inheritance relationship to the target table, as
* the truncate query always includes "ONLY" to prevent this behaviour.
* @return The SQL query to use for truncating a table
*/
override def getTruncateQuery(
table: String,
cascade: Option[Boolean] = isCascadingTruncateTable): String = {
cascade match {
case Some(true) => s"TRUNCATE TABLE ONLY $table CASCADE"
case _ => s"TRUNCATE TABLE ONLY $table"
}
}
override def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
super.beforeFetch(connection, properties)
// According to the postgres jdbc documentation we need to be in autocommit=false if we actually
// want to have fetchsize be non 0 (all the rows). This allows us to not have to cache all the
// rows inside the driver when fetching.
//
// See: https://jdbc.postgresql.org/documentation/head/query.html#query-with-cursor
//
if (properties.getOrElse(JDBCOptions.JDBC_BATCH_FETCH_SIZE, "0").toInt > 0) {
connection.setAutoCommit(false)
}
}
}
| pgandhi999/spark | sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala | Scala | apache-2.0 | 5,448 |
package ilc
package language
package bacchus
import org.scalatest.FunSuite
import org.scalatest.Matchers
class BacchusPrettySuite
extends FunSuite
with Matchers
{
object Lang extends Evaluation with feature.base.Pretty
import Lang._
test("values have short descriptions") {
NatValue(9).toString should be("9")
MapValue(1 -> 4).toString should be("Map(1 -> 4)")
SumValue(Left(5)).toString should be("Inj1(5)")
SumValue(Right(2)).toString should be("Inj2(2)")
MaybeValue(None).toString should be("Nope")
MaybeValue(Some(5)).toString should be("Just(5)")
}
}
| inc-lc/ilc-scala | src/test/scala/ilc/language/bacchus/BacchusPrettySuite.scala | Scala | mit | 594 |
/*
* Copyright 2013 TeamNexus
*
* TeamNexus Licenses this file to you under the MIT License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/mit-license.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License
*/
package com.nexus.webserver.handlers
import com.nexus.webserver.{SslContextProvider, TWebServerHandler}
import io.netty.channel.{SimpleChannelInboundHandler, ChannelHandlerContext}
import io.netty.handler.codec.http.{HttpHeaders, FullHttpRequest}
import io.netty.handler.codec.http.websocketx._
import com.nexus.webserver.netty.{WebSocketHandler, RoutedHandler}
/**
* No description given
*
* @author jk-5
*/
class WebServerHandlerWebsocket extends SimpleChannelInboundHandler[FullHttpRequest] with RoutedHandler {
def channelRead0(ctx: ChannelHandlerContext, msg: FullHttpRequest) {
println(this.getURLData.getURL)
val factory = new WebSocketServerHandshakerFactory("%s://".format(if(SslContextProvider.isValid) "wss" else "ws") + msg.headers().get(HttpHeaders.Names.HOST) + "/websocket", null, false)
val handshaker = factory.newHandshaker(msg)
if(handshaker == null) WebSocketServerHandshakerFactory.sendUnsupportedWebSocketVersionResponse(ctx.channel())
else{
handshaker.handshake(ctx.channel(), msg)
ctx.pipeline().get("websocketHandler").asInstanceOf[WebSocketHandler].setHandshaker(handshaker)
}
}
}
| crvidya/nexus-scala | src/main/scala/com/nexus/webserver/handlers/WebServerHandlerWebsocket.scala | Scala | mit | 1,762 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.errors
import java.io.{FileNotFoundException, IOException}
import java.lang.reflect.InvocationTargetException
import java.net.{URISyntaxException, URL}
import java.sql.{SQLException, SQLFeatureNotSupportedException}
import java.text.{ParseException => JavaParseException}
import java.time.{DateTimeException, LocalDate}
import java.time.format.DateTimeParseException
import java.time.temporal.ChronoField
import java.util.ConcurrentModificationException
import java.util.concurrent.TimeoutException
import com.fasterxml.jackson.core.{JsonParser, JsonToken}
import org.apache.hadoop.fs.{FileAlreadyExistsException, FileStatus, Path}
import org.apache.hadoop.fs.permission.FsPermission
import org.codehaus.commons.compiler.CompileException
import org.codehaus.janino.InternalCompilerException
import org.apache.spark.{Partition, SparkArithmeticException, SparkArrayIndexOutOfBoundsException, SparkClassNotFoundException, SparkConcurrentModificationException, SparkDateTimeException, SparkException, SparkFileAlreadyExistsException, SparkFileNotFoundException, SparkIllegalArgumentException, SparkIllegalStateException, SparkIndexOutOfBoundsException, SparkNoSuchElementException, SparkNoSuchMethodException, SparkNumberFormatException, SparkRuntimeException, SparkSecurityException, SparkSQLException, SparkSQLFeatureNotSupportedException, SparkUnsupportedOperationException, SparkUpgradeException}
import org.apache.spark.executor.CommitDeniedException
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.memory.SparkOutOfMemoryError
import org.apache.spark.sql.catalyst.ScalaReflection.Schema
import org.apache.spark.sql.catalyst.WalkedTypePath
import org.apache.spark.sql.catalyst.analysis.UnresolvedGenerator
import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTable}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.JoinType
import org.apache.spark.sql.catalyst.plans.logical.{DomainJoin, LogicalPlan}
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.ValueInterval
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.catalyst.util.{sideBySide, BadRecordException, FailFastMode}
import org.apache.spark.sql.connector.catalog.{CatalogNotFoundException, Identifier, Table, TableProvider}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.execution.QueryExecutionException
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.GLOBAL_TEMP_DATABASE
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.CircularBuffer
/**
* Object for grouping error messages from (most) exceptions thrown during query execution.
* This does not include exceptions thrown during the eager execution of commands, which are
* grouped into [[QueryCompilationErrors]].
*/
object QueryExecutionErrors {
def logicalHintOperatorNotRemovedDuringAnalysisError(): Throwable = {
new SparkIllegalStateException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(
"Internal error: logical hint operator should have been removed during analysis"))
}
def cannotEvaluateExpressionError(expression: Expression): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot evaluate expression: $expression"))
}
def cannotGenerateCodeForExpressionError(expression: Expression): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot generate code for expression: $expression"))
}
def cannotTerminateGeneratorError(generator: UnresolvedGenerator): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot terminate expression: $generator"))
}
def castingCauseOverflowError(t: Any, dataType: DataType): ArithmeticException = {
new SparkArithmeticException(errorClass = "CAST_CAUSES_OVERFLOW",
messageParameters = Array(t.toString, dataType.catalogString, SQLConf.ANSI_ENABLED.key))
}
def cannotChangeDecimalPrecisionError(
value: Decimal, decimalPrecision: Int, decimalScale: Int): ArithmeticException = {
new SparkArithmeticException(errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION",
messageParameters = Array(value.toDebugString,
decimalPrecision.toString, decimalScale.toString, SQLConf.ANSI_ENABLED.key))
}
def invalidInputSyntaxForNumericError(e: NumberFormatException): NumberFormatException = {
new NumberFormatException(s"${e.getMessage}. To return NULL instead, use 'try_cast'. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.")
}
def invalidInputSyntaxForNumericError(s: UTF8String): NumberFormatException = {
new SparkNumberFormatException(errorClass = "INVALID_INPUT_SYNTAX_FOR_NUMERIC_TYPE",
messageParameters = Array(s.toString, SQLConf.ANSI_ENABLED.key))
}
def cannotCastFromNullTypeError(to: DataType): Throwable = {
new SparkException(errorClass = "CANNOT_CAST_DATATYPE",
messageParameters = Array(NullType.typeName, to.typeName), null)
}
def cannotCastError(from: DataType, to: DataType): Throwable = {
new SparkException(errorClass = "CANNOT_CAST_DATATYPE",
messageParameters = Array(from.typeName, to.typeName), null)
}
def cannotParseDecimalError(): Throwable = {
new SparkIllegalStateException(errorClass = "CANNOT_PARSE_DECIMAL",
messageParameters = Array.empty)
}
def dataTypeUnsupportedError(dataType: String, failure: String): Throwable = {
new SparkIllegalArgumentException(errorClass = "UNSUPPORTED_DATATYPE",
messageParameters = Array(dataType + failure))
}
def failedExecuteUserDefinedFunctionError(funcCls: String, inputTypes: String,
outputType: String, e: Throwable): Throwable = {
new SparkException(errorClass = "FAILED_EXECUTE_UDF",
messageParameters = Array(funcCls, inputTypes, outputType), e)
}
def divideByZeroError(): ArithmeticException = {
new SparkArithmeticException(
errorClass = "DIVIDE_BY_ZERO", messageParameters = Array(SQLConf.ANSI_ENABLED.key))
}
def invalidArrayIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = {
invalidArrayIndexErrorInternal(index, numElements, SQLConf.ANSI_STRICT_INDEX_OPERATOR.key)
}
def invalidInputIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = {
invalidArrayIndexErrorInternal(index, numElements, SQLConf.ANSI_ENABLED.key)
}
private def invalidArrayIndexErrorInternal(
index: Int,
numElements: Int,
key: String): ArrayIndexOutOfBoundsException = {
new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX",
messageParameters = Array(index.toString, numElements.toString, key))
}
def invalidElementAtIndexError(
index: Int,
numElements: Int): ArrayIndexOutOfBoundsException = {
new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT",
messageParameters = Array(index.toString, numElements.toString, SQLConf.ANSI_ENABLED.key))
}
def mapKeyNotExistError(key: Any, isElementAtFunction: Boolean): NoSuchElementException = {
if (isElementAtFunction) {
new SparkNoSuchElementException(errorClass = "MAP_KEY_DOES_NOT_EXIST_IN_ELEMENT_AT",
messageParameters = Array(key.toString, SQLConf.ANSI_ENABLED.key))
} else {
new SparkNoSuchElementException(errorClass = "MAP_KEY_DOES_NOT_EXIST",
messageParameters = Array(key.toString, SQLConf.ANSI_STRICT_INDEX_OPERATOR.key))
}
}
def inputTypeUnsupportedError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"Unsupported input type ${dataType.catalogString}")
}
def invalidFractionOfSecondError(): DateTimeException = {
new SparkDateTimeException(errorClass = "INVALID_FRACTION_OF_SECOND",
Array(SQLConf.ANSI_ENABLED.key))
}
def ansiDateTimeParseError(e: DateTimeParseException): DateTimeParseException = {
val newMessage = s"${e.getMessage}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error."
new DateTimeParseException(newMessage, e.getParsedString, e.getErrorIndex, e.getCause)
}
def ansiDateTimeError(e: DateTimeException): DateTimeException = {
val newMessage = s"${e.getMessage}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error."
new DateTimeException(newMessage, e.getCause)
}
def ansiParseError(e: JavaParseException): JavaParseException = {
val newMessage = s"${e.getMessage}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error."
new JavaParseException(newMessage, e.getErrorOffset)
}
def ansiIllegalArgumentError(message: String): IllegalArgumentException = {
val newMessage = s"$message. If necessary set ${SQLConf.ANSI_ENABLED.key} " +
s"to false to bypass this error."
new IllegalArgumentException(newMessage)
}
def ansiIllegalArgumentError(e: IllegalArgumentException): IllegalArgumentException = {
ansiIllegalArgumentError(e.getMessage)
}
def overflowInSumOfDecimalError(): ArithmeticException = {
arithmeticOverflowError("Overflow in sum of decimals")
}
def overflowInIntegralDivideError(): ArithmeticException = {
arithmeticOverflowError("Overflow in integral divide", "try_divide")
}
def mapSizeExceedArraySizeWhenZipMapError(size: Int): RuntimeException = {
new RuntimeException(s"Unsuccessful try to zip maps with $size " +
"unique keys due to exceeding the array size limit " +
s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
}
def copyNullFieldNotAllowedError(): Throwable = {
new IllegalStateException("Do not attempt to copy a null field")
}
def literalTypeUnsupportedError(v: Any): RuntimeException = {
new SparkRuntimeException(
errorClass = "UNSUPPORTED_FEATURE",
messageParameters = Array(s"literal for '${v.toString}' of ${v.getClass.toString}."))
}
def pivotColumnUnsupportedError(v: Any, dataType: DataType): RuntimeException = {
new SparkRuntimeException(
errorClass = "UNSUPPORTED_FEATURE",
messageParameters = Array(
s"pivoting by the value '${v.toString}' of the column data type" +
s" '${dataType.catalogString}'."))
}
def noDefaultForDataTypeError(dataType: DataType): RuntimeException = {
new RuntimeException(s"no default for type $dataType")
}
def doGenCodeOfAliasShouldNotBeCalledError(): Throwable = {
new IllegalStateException("Alias.doGenCode should not be called.")
}
def orderedOperationUnsupportedByDataTypeError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"Type $dataType does not support ordered operations")
}
def regexGroupIndexLessThanZeroError(): Throwable = {
new IllegalArgumentException("The specified group index cannot be less than zero")
}
def regexGroupIndexExceedGroupCountError(
groupCount: Int, groupIndex: Int): Throwable = {
new IllegalArgumentException(
s"Regex group count is $groupCount, but the specified group index is $groupIndex")
}
def invalidUrlError(url: UTF8String, e: URISyntaxException): Throwable = {
new IllegalArgumentException(s"Find an invalid url string ${url.toString}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.", e)
}
def dataTypeOperationUnsupportedError(): Throwable = {
new UnsupportedOperationException("dataType")
}
def mergeUnsupportedByWindowFunctionError(): Throwable = {
new UnsupportedOperationException("Window Functions do not support merging.")
}
def dataTypeUnexpectedError(dataType: DataType): Throwable = {
new UnsupportedOperationException(s"Unexpected data type ${dataType.catalogString}")
}
def typeUnsupportedError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"Unexpected type $dataType")
}
def negativeValueUnexpectedError(frequencyExpression : Expression): Throwable = {
new SparkException(s"Negative values found in ${frequencyExpression.sql}")
}
def addNewFunctionMismatchedWithFunctionError(funcName: String): Throwable = {
new IllegalArgumentException(s"$funcName is not matched at addNewFunction")
}
def cannotGenerateCodeForUncomparableTypeError(
codeType: String, dataType: DataType): Throwable = {
new IllegalArgumentException(
s"cannot generate $codeType code for un-comparable type: ${dataType.catalogString}")
}
def cannotGenerateCodeForUnsupportedTypeError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"cannot generate code for unsupported type: $dataType")
}
def cannotInterpolateClassIntoCodeBlockError(arg: Any): Throwable = {
new IllegalArgumentException(
s"Can not interpolate ${arg.getClass.getName} into code block.")
}
def customCollectionClsNotResolvedError(): Throwable = {
new UnsupportedOperationException("not resolved")
}
def classUnsupportedByMapObjectsError(cls: Class[_]): RuntimeException = {
new RuntimeException(s"class `${cls.getName}` is not supported by `MapObjects` as " +
"resulting collection.")
}
def nullAsMapKeyNotAllowedError(): RuntimeException = {
new RuntimeException("Cannot use null as map key!")
}
def methodNotDeclaredError(name: String): Throwable = {
new SparkNoSuchMethodException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(
s"""A method named "$name" is not declared in any enclosing class nor any supertype"""))
}
def constructorNotFoundError(cls: String): Throwable = {
new RuntimeException(s"Couldn't find a valid constructor on $cls")
}
def primaryConstructorNotFoundError(cls: Class[_]): Throwable = {
new RuntimeException(s"Couldn't find a primary constructor on $cls")
}
def unsupportedNaturalJoinTypeError(joinType: JoinType): Throwable = {
new RuntimeException("Unsupported natural join type " + joinType)
}
def notExpectedUnresolvedEncoderError(attr: AttributeReference): Throwable = {
new RuntimeException(s"Unresolved encoder expected, but $attr was found.")
}
def unsupportedEncoderError(): Throwable = {
new RuntimeException("Only expression encoders are supported for now.")
}
def notOverrideExpectedMethodsError(className: String, m1: String, m2: String): Throwable = {
new RuntimeException(s"$className must override either $m1 or $m2")
}
def failToConvertValueToJsonError(value: AnyRef, cls: Class[_], dataType: DataType): Throwable = {
new RuntimeException(s"Failed to convert value $value (class of $cls) " +
s"with the type of $dataType to JSON.")
}
def unexpectedOperatorInCorrelatedSubquery(op: LogicalPlan, pos: String = ""): Throwable = {
new RuntimeException(s"Unexpected operator $op in correlated subquery" + pos)
}
def unreachableError(err: String = ""): Throwable = {
new RuntimeException("This line should be unreachable" + err)
}
def unsupportedRoundingMode(roundMode: BigDecimal.RoundingMode.Value): Throwable = {
new RuntimeException(s"Not supported rounding mode: $roundMode")
}
def resolveCannotHandleNestedSchema(plan: LogicalPlan): Throwable = {
new RuntimeException(s"Can not handle nested schema yet... plan $plan")
}
def inputExternalRowCannotBeNullError(): RuntimeException = {
new RuntimeException("The input external row cannot be null.")
}
def fieldCannotBeNullMsg(index: Int, fieldName: String): String = {
s"The ${index}th field '$fieldName' of input row cannot be null."
}
def fieldCannotBeNullError(index: Int, fieldName: String): RuntimeException = {
new RuntimeException(fieldCannotBeNullMsg(index, fieldName))
}
def unableToCreateDatabaseAsFailedToCreateDirectoryError(
dbDefinition: CatalogDatabase, e: IOException): Throwable = {
new SparkException(s"Unable to create database ${dbDefinition.name} as failed " +
s"to create its directory ${dbDefinition.locationUri}", e)
}
def unableToDropDatabaseAsFailedToDeleteDirectoryError(
dbDefinition: CatalogDatabase, e: IOException): Throwable = {
new SparkException(s"Unable to drop database ${dbDefinition.name} as failed " +
s"to delete its directory ${dbDefinition.locationUri}", e)
}
def unableToCreateTableAsFailedToCreateDirectoryError(
table: String, defaultTableLocation: Path, e: IOException): Throwable = {
new SparkException(s"Unable to create table $table as failed " +
s"to create its directory $defaultTableLocation", e)
}
def unableToDeletePartitionPathError(partitionPath: Path, e: IOException): Throwable = {
new SparkException(s"Unable to delete partition path $partitionPath", e)
}
def unableToDropTableAsFailedToDeleteDirectoryError(
table: String, dir: Path, e: IOException): Throwable = {
new SparkException(s"Unable to drop table $table as failed " +
s"to delete its directory $dir", e)
}
def unableToRenameTableAsFailedToRenameDirectoryError(
oldName: String, newName: String, oldDir: Path, e: IOException): Throwable = {
new SparkException(s"Unable to rename table $oldName to $newName as failed " +
s"to rename its directory $oldDir", e)
}
def unableToCreatePartitionPathError(partitionPath: Path, e: IOException): Throwable = {
new SparkException(s"Unable to create partition path $partitionPath", e)
}
def unableToRenamePartitionPathError(oldPartPath: Path, e: IOException): Throwable = {
new SparkException(s"Unable to rename partition path $oldPartPath", e)
}
def methodNotImplementedError(methodName: String): Throwable = {
new UnsupportedOperationException(s"$methodName is not implemented")
}
def tableStatsNotSpecifiedError(): Throwable = {
new IllegalStateException("table stats must be specified.")
}
def arithmeticOverflowError(e: ArithmeticException): ArithmeticException = {
new ArithmeticException(s"${e.getMessage}. If necessary set ${SQLConf.ANSI_ENABLED.key} " +
s"to false to bypass this error.")
}
def arithmeticOverflowError(message: String, hint: String = ""): ArithmeticException = {
val alternative = if (hint.nonEmpty) s" To return NULL instead, use '$hint'." else ""
new ArithmeticException(s"$message.$alternative If necessary set " +
s"${SQLConf.ANSI_ENABLED.key} to false (except for ANSI interval type) to bypass this error.")
}
def unaryMinusCauseOverflowError(originValue: AnyVal): ArithmeticException = {
arithmeticOverflowError(s"- $originValue caused overflow")
}
def binaryArithmeticCauseOverflowError(
eval1: Short, symbol: String, eval2: Short): ArithmeticException = {
arithmeticOverflowError(s"$eval1 $symbol $eval2 caused overflow")
}
def failedSplitSubExpressionMsg(length: Int): String = {
"Failed to split subexpression code into small functions because " +
s"the parameter length of at least one split function went over the JVM limit: $length"
}
def failedSplitSubExpressionError(length: Int): Throwable = {
new IllegalStateException(failedSplitSubExpressionMsg(length))
}
def failedToCompileMsg(e: Exception): String = {
s"failed to compile: $e"
}
def internalCompilerError(e: InternalCompilerException): Throwable = {
new InternalCompilerException(failedToCompileMsg(e), e)
}
def compilerError(e: CompileException): Throwable = {
new CompileException(failedToCompileMsg(e), e.getLocation)
}
def unsupportedTableChangeError(e: IllegalArgumentException): Throwable = {
new SparkException(s"Unsupported table change: ${e.getMessage}", e)
}
def notADatasourceRDDPartitionError(split: Partition): Throwable = {
new SparkException(s"[BUG] Not a DataSourceRDDPartition: $split")
}
def dataPathNotSpecifiedError(): Throwable = {
new IllegalArgumentException("'path' is not specified")
}
def createStreamingSourceNotSpecifySchemaError(): Throwable = {
new IllegalArgumentException(
s"""
|Schema must be specified when creating a streaming source DataFrame. If some
|files already exist in the directory, then depending on the file format you
|may be able to create a static DataFrame on that directory with
|'spark.read.load(directory)' and infer schema from it.
""".stripMargin)
}
def streamedOperatorUnsupportedByDataSourceError(
className: String, operator: String): Throwable = {
new UnsupportedOperationException(
s"Data source $className does not support streamed $operator")
}
def multiplePathsSpecifiedError(allPaths: Seq[String]): Throwable = {
new IllegalArgumentException("Expected exactly one path to be specified, but " +
s"got: ${allPaths.mkString(", ")}")
}
def failedToFindDataSourceError(provider: String, error: Throwable): Throwable = {
new ClassNotFoundException(
s"""
|Failed to find data source: $provider. Please find packages at
|http://spark.apache.org/third-party-projects.html
""".stripMargin, error)
}
def removedClassInSpark2Error(className: String, e: Throwable): Throwable = {
new ClassNotFoundException(s"$className was removed in Spark 2.0. " +
"Please check if your library is compatible with Spark 2.0", e)
}
def incompatibleDataSourceRegisterError(e: Throwable): Throwable = {
new SparkClassNotFoundException("INCOMPATIBLE_DATASOURCE_REGISTER", Array(e.getMessage), e)
}
def unrecognizedFileFormatError(format: String): Throwable = {
new IllegalStateException(s"unrecognized format $format")
}
def sparkUpgradeInReadingDatesError(
format: String, config: String, option: String): SparkUpgradeException = {
new SparkUpgradeException(
errorClass = "INCONSISTENT_BEHAVIOR_CROSS_VERSION",
messageParameters = Array(
"3.0",
s"""
|reading dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z
|from $format files can be ambiguous, as the files may be written by
|Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar
|that is different from Spark 3.0+'s Proleptic Gregorian calendar.
|See more details in SPARK-31404. You can set the SQL config '$config' or
|the datasource option '$option' to 'LEGACY' to rebase the datetime values
|w.r.t. the calendar difference during reading. To read the datetime values
|as it is, set the SQL config '$config' or the datasource option '$option'
|to 'CORRECTED'.
|""".stripMargin),
cause = null
)
}
def sparkUpgradeInWritingDatesError(format: String, config: String): SparkUpgradeException = {
new SparkUpgradeException(
errorClass = "INCONSISTENT_BEHAVIOR_CROSS_VERSION",
messageParameters = Array(
"3.0",
s"""
|writing dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z
|into $format files can be dangerous, as the files may be read by Spark 2.x
|or legacy versions of Hive later, which uses a legacy hybrid calendar that
|is different from Spark 3.0+'s Proleptic Gregorian calendar. See more
|details in SPARK-31404. You can set $config to 'LEGACY' to rebase the
|datetime values w.r.t. the calendar difference during writing, to get maximum
|interoperability. Or set $config to 'CORRECTED' to write the datetime values
|as it is, if you are 100% sure that the written files will only be read by
|Spark 3.0+ or other systems that use Proleptic Gregorian calendar.
|""".stripMargin),
cause = null
)
}
def buildReaderUnsupportedForFileFormatError(format: String): Throwable = {
new UnsupportedOperationException(s"buildReader is not supported for $format")
}
def jobAbortedError(cause: Throwable): Throwable = {
new SparkException("Job aborted.", cause)
}
def taskFailedWhileWritingRowsError(cause: Throwable): Throwable = {
new SparkException("Task failed while writing rows.", cause)
}
def readCurrentFileNotFoundError(e: FileNotFoundException): Throwable = {
new FileNotFoundException(
s"""
|${e.getMessage}\n
|It is possible the underlying files have been updated. You can explicitly invalidate
|the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by
|recreating the Dataset/DataFrame involved.
""".stripMargin)
}
def unsupportedSaveModeError(saveMode: String, pathExists: Boolean): Throwable = {
new IllegalStateException(s"unsupported save mode $saveMode ($pathExists)")
}
def cannotClearOutputDirectoryError(staticPrefixPath: Path): Throwable = {
new IOException(s"Unable to clear output directory $staticPrefixPath prior to writing to it")
}
def cannotClearPartitionDirectoryError(path: Path): Throwable = {
new IOException(s"Unable to clear partition directory $path prior to writing to it")
}
def failedToCastValueToDataTypeForPartitionColumnError(
value: String, dataType: DataType, columnName: String): Throwable = {
new RuntimeException(s"Failed to cast value `$value` to " +
s"`$dataType` for partition column `$columnName`")
}
def endOfStreamError(): Throwable = {
new NoSuchElementException("End of stream")
}
def fallbackV1RelationReportsInconsistentSchemaError(
v2Schema: StructType, v1Schema: StructType): Throwable = {
new IllegalArgumentException(
"The fallback v1 relation reports inconsistent schema:\n" +
"Schema of v2 scan: " + v2Schema + "\n" +
"Schema of v1 relation: " + v1Schema)
}
def noRecordsFromEmptyDataReaderError(): Throwable = {
new IOException("No records should be returned from EmptyDataReader")
}
def fileNotFoundError(e: FileNotFoundException): Throwable = {
new FileNotFoundException(
e.getMessage + "\n" +
"It is possible the underlying files have been updated. " +
"You can explicitly invalidate the cache in Spark by " +
"recreating the Dataset/DataFrame involved.")
}
def unsupportedSchemaColumnConvertError(
filePath: String,
column: String,
logicalType: String,
physicalType: String,
e: Exception): Throwable = {
val message = "Parquet column cannot be converted in " +
s"file $filePath. Column: $column, " +
s"Expected: $logicalType, Found: $physicalType"
new QueryExecutionException(message, e)
}
def cannotReadFilesError(
e: Throwable,
path: String): Throwable = {
val message = s"Encountered error while reading file $path. Details: "
new QueryExecutionException(message, e)
}
def cannotCreateColumnarReaderError(): Throwable = {
new UnsupportedOperationException("Cannot create columnar reader.")
}
def invalidNamespaceNameError(namespace: Array[String]): Throwable = {
new IllegalArgumentException(s"Invalid namespace name: ${namespace.quoted}")
}
def unsupportedPartitionTransformError(transform: Transform): Throwable = {
new UnsupportedOperationException(
s"Unsupported partition transform: $transform")
}
def missingDatabaseLocationError(): Throwable = {
new IllegalArgumentException("Missing database location")
}
def cannotRemoveReservedPropertyError(property: String): Throwable = {
new UnsupportedOperationException(s"Cannot remove reserved property: $property")
}
def namespaceNotEmptyError(namespace: Array[String]): Throwable = {
new IllegalStateException(s"Namespace ${namespace.quoted} is not empty")
}
def writingJobFailedError(cause: Throwable): Throwable = {
new SparkException("Writing job failed.", cause)
}
def writingJobAbortedError(e: Throwable): Throwable = {
new SparkException(
errorClass = "WRITING_JOB_ABORTED",
messageParameters = Array.empty,
cause = e)
}
def commitDeniedError(
partId: Int, taskId: Long, attemptId: Int, stageId: Int, stageAttempt: Int): Throwable = {
val message = s"Commit denied for partition $partId (task $taskId, attempt $attemptId, " +
s"stage $stageId.$stageAttempt)"
new CommitDeniedException(message, stageId, partId, attemptId)
}
def unsupportedTableWritesError(ident: Identifier): Throwable = {
new SparkException(
s"Table implementation does not support writes: ${ident.quoted}")
}
def cannotCreateJDBCTableWithPartitionsError(): Throwable = {
new UnsupportedOperationException("Cannot create JDBC table with partition")
}
def unsupportedUserSpecifiedSchemaError(): Throwable = {
new UnsupportedOperationException("user-specified schema")
}
def writeUnsupportedForBinaryFileDataSourceError(): Throwable = {
new UnsupportedOperationException("Write is not supported for binary file data source")
}
def fileLengthExceedsMaxLengthError(status: FileStatus, maxLength: Int): Throwable = {
new SparkException(
s"The length of ${status.getPath} is ${status.getLen}, " +
s"which exceeds the max length allowed: ${maxLength}.")
}
def unsupportedFieldNameError(fieldName: String): Throwable = {
new RuntimeException(s"Unsupported field name: ${fieldName}")
}
def cannotSpecifyBothJdbcTableNameAndQueryError(
jdbcTableName: String, jdbcQueryString: String): Throwable = {
new IllegalArgumentException(
s"Both '$jdbcTableName' and '$jdbcQueryString' can not be specified at the same time.")
}
def missingJdbcTableNameAndQueryError(
jdbcTableName: String, jdbcQueryString: String): Throwable = {
new IllegalArgumentException(
s"Option '$jdbcTableName' or '$jdbcQueryString' is required."
)
}
def emptyOptionError(optionName: String): Throwable = {
new IllegalArgumentException(s"Option `$optionName` can not be empty.")
}
def invalidJdbcTxnIsolationLevelError(jdbcTxnIsolationLevel: String, value: String): Throwable = {
new IllegalArgumentException(
s"Invalid value `$value` for parameter `$jdbcTxnIsolationLevel`. This can be " +
"`NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ` or `SERIALIZABLE`.")
}
def cannotGetJdbcTypeError(dt: DataType): Throwable = {
new IllegalArgumentException(s"Can't get JDBC type for ${dt.catalogString}")
}
def unrecognizedSqlTypeError(sqlType: Int): Throwable = {
new SparkSQLException(errorClass = "UNRECOGNIZED_SQL_TYPE", Array(sqlType.toString))
}
def unsupportedJdbcTypeError(content: String): Throwable = {
new SQLException(s"Unsupported type $content")
}
def unsupportedArrayElementTypeBasedOnBinaryError(dt: DataType): Throwable = {
new IllegalArgumentException(s"Unsupported array element " +
s"type ${dt.catalogString} based on binary")
}
def nestedArraysUnsupportedError(): Throwable = {
new IllegalArgumentException("Nested arrays unsupported")
}
def cannotTranslateNonNullValueForFieldError(pos: Int): Throwable = {
new IllegalArgumentException(s"Can't translate non-null value for field $pos")
}
def invalidJdbcNumPartitionsError(n: Int, jdbcNumPartitions: String): Throwable = {
new IllegalArgumentException(
s"Invalid value `$n` for parameter `$jdbcNumPartitions` in table writing " +
"via JDBC. The minimum value is 1.")
}
def transactionUnsupportedByJdbcServerError(): Throwable = {
new SparkSQLFeatureNotSupportedException(
errorClass = "UNSUPPORTED_FEATURE",
messageParameters = Array("the target JDBC server does not support transaction and " +
"can only support ALTER TABLE with a single action."))
}
def dataTypeUnsupportedYetError(dataType: DataType): Throwable = {
new UnsupportedOperationException(s"$dataType is not supported yet.")
}
def unsupportedOperationForDataTypeError(dataType: DataType): Throwable = {
new UnsupportedOperationException(s"DataType: ${dataType.catalogString}")
}
def inputFilterNotFullyConvertibleError(owner: String): Throwable = {
new SparkException(s"The input filter of $owner should be fully convertible.")
}
def cannotReadFooterForFileError(file: Path, e: IOException): Throwable = {
new SparkException(s"Could not read footer for file: $file", e)
}
def cannotReadFooterForFileError(file: FileStatus, e: RuntimeException): Throwable = {
new IOException(s"Could not read footer for file: $file", e)
}
def foundDuplicateFieldInCaseInsensitiveModeError(
requiredFieldName: String, matchedOrcFields: String): Throwable = {
new RuntimeException(
s"""
|Found duplicate field(s) "$requiredFieldName": $matchedOrcFields
|in case-insensitive mode
""".stripMargin.replaceAll("\n", " "))
}
def foundDuplicateFieldInFieldIdLookupModeError(
requiredId: Int, matchedFields: String): Throwable = {
new RuntimeException(
s"""
|Found duplicate field(s) "$requiredId": $matchedFields
|in id mapping mode
""".stripMargin.replaceAll("\n", " "))
}
def failedToMergeIncompatibleSchemasError(
left: StructType, right: StructType, e: Throwable): Throwable = {
new SparkException(s"Failed to merge incompatible schemas $left and $right", e)
}
def ddlUnsupportedTemporarilyError(ddl: String): Throwable = {
new UnsupportedOperationException(s"$ddl is not supported temporarily.")
}
def operatingOnCanonicalizationPlanError(): Throwable = {
new IllegalStateException("operating on canonicalization plan")
}
def executeBroadcastTimeoutError(timeout: Long, ex: Option[TimeoutException]): Throwable = {
new SparkException(
s"""
|Could not execute broadcast in $timeout secs. You can increase the timeout
|for broadcasts via ${SQLConf.BROADCAST_TIMEOUT.key} or disable broadcast join
|by setting ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1
""".stripMargin.replaceAll("\n", " "), ex.getOrElse(null))
}
def cannotCompareCostWithTargetCostError(cost: String): Throwable = {
new IllegalArgumentException(s"Could not compare cost with $cost")
}
def unsupportedDataTypeError(dt: String): Throwable = {
new UnsupportedOperationException(s"Unsupported data type: ${dt}")
}
def notSupportTypeError(dataType: DataType): Throwable = {
new Exception(s"not support type: $dataType")
}
def notSupportNonPrimitiveTypeError(): Throwable = {
new RuntimeException("Not support non-primitive type now")
}
def unsupportedTypeError(dataType: DataType): Throwable = {
new Exception(s"Unsupported type: ${dataType.catalogString}")
}
def useDictionaryEncodingWhenDictionaryOverflowError(): Throwable = {
new IllegalStateException(
"Dictionary encoding should not be used because of dictionary overflow.")
}
def endOfIteratorError(): Throwable = {
new NoSuchElementException("End of the iterator")
}
def cannotAllocateMemoryToGrowBytesToBytesMapError(): Throwable = {
new IOException("Could not allocate memory to grow BytesToBytesMap")
}
def cannotAcquireMemoryToBuildLongHashedRelationError(size: Long, got: Long): Throwable = {
new SparkException(s"Can't acquire $size bytes memory to build hash relation, " +
s"got $got bytes")
}
def cannotAcquireMemoryToBuildUnsafeHashedRelationError(): Throwable = {
new SparkOutOfMemoryError("There is not enough memory to build hash map")
}
def rowLargerThan256MUnsupportedError(): Throwable = {
new UnsupportedOperationException("Does not support row that is larger than 256M")
}
def cannotBuildHashedRelationWithUniqueKeysExceededError(): Throwable = {
new UnsupportedOperationException(
"Cannot build HashedRelation with more than 1/3 billions unique keys")
}
def cannotBuildHashedRelationLargerThan8GError(): Throwable = {
new UnsupportedOperationException(
"Can not build a HashedRelation that is larger than 8G")
}
def failedToPushRowIntoRowQueueError(rowQueue: String): Throwable = {
new SparkException(s"failed to push a row into $rowQueue")
}
def unexpectedWindowFunctionFrameError(frame: String): Throwable = {
new RuntimeException(s"Unexpected window function frame $frame.")
}
def cannotParseStatisticAsPercentileError(
stats: String, e: NumberFormatException): Throwable = {
new IllegalArgumentException(s"Unable to parse $stats as a percentile", e)
}
def statisticNotRecognizedError(stats: String): Throwable = {
new IllegalArgumentException(s"$stats is not a recognised statistic")
}
def unknownColumnError(unknownColumn: String): Throwable = {
new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
def unexpectedAccumulableUpdateValueError(o: Any): Throwable = {
new IllegalArgumentException(s"Unexpected: $o")
}
def unscaledValueTooLargeForPrecisionError(): Throwable = {
new ArithmeticException("Unscaled value too large for precision. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.")
}
def decimalPrecisionExceedsMaxPrecisionError(precision: Int, maxPrecision: Int): Throwable = {
new ArithmeticException(
s"Decimal precision $precision exceeds max precision $maxPrecision")
}
def outOfDecimalTypeRangeError(str: UTF8String): Throwable = {
new ArithmeticException(s"out of decimal type range: $str")
}
def unsupportedArrayTypeError(clazz: Class[_]): Throwable = {
new RuntimeException(s"Do not support array of type $clazz.")
}
def unsupportedJavaTypeError(clazz: Class[_]): Throwable = {
new RuntimeException(s"Do not support type $clazz.")
}
def failedParsingStructTypeError(raw: String): Throwable = {
new RuntimeException(s"Failed parsing ${StructType.simpleString}: $raw")
}
def failedMergingFieldsError(leftName: String, rightName: String, e: Throwable): Throwable = {
new SparkException(s"Failed to merge fields '$leftName' and '$rightName'. ${e.getMessage}")
}
def cannotMergeDecimalTypesWithIncompatiblePrecisionAndScaleError(
leftPrecision: Int, rightPrecision: Int, leftScale: Int, rightScale: Int): Throwable = {
new SparkException("Failed to merge decimal types with incompatible " +
s"precision $leftPrecision and $rightPrecision & scale $leftScale and $rightScale")
}
def cannotMergeDecimalTypesWithIncompatiblePrecisionError(
leftPrecision: Int, rightPrecision: Int): Throwable = {
new SparkException("Failed to merge decimal types with incompatible " +
s"precision $leftPrecision and $rightPrecision")
}
def cannotMergeDecimalTypesWithIncompatibleScaleError(
leftScale: Int, rightScale: Int): Throwable = {
new SparkException("Failed to merge decimal types with incompatible " +
s"scale $leftScale and $rightScale")
}
def cannotMergeIncompatibleDataTypesError(left: DataType, right: DataType): Throwable = {
new SparkException(s"Failed to merge incompatible data types ${left.catalogString}" +
s" and ${right.catalogString}")
}
def exceedMapSizeLimitError(size: Int): Throwable = {
new RuntimeException(s"Unsuccessful attempt to build maps with $size elements " +
s"due to exceeding the map size limit ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
}
def duplicateMapKeyFoundError(key: Any): Throwable = {
new RuntimeException(s"Duplicate map key $key was found, please check the input " +
"data. If you want to remove the duplicated keys, you can set " +
s"${SQLConf.MAP_KEY_DEDUP_POLICY.key} to ${SQLConf.MapKeyDedupPolicy.LAST_WIN} so that " +
"the key inserted at last takes precedence.")
}
def mapDataKeyArrayLengthDiffersFromValueArrayLengthError(): Throwable = {
new RuntimeException("The key array and value array of MapData must have the same length.")
}
def fieldDiffersFromDerivedLocalDateError(
field: ChronoField, actual: Int, expected: Int, candidate: LocalDate): Throwable = {
new DateTimeException(s"Conflict found: Field $field $actual differs from" +
s" $field $expected derived from $candidate")
}
def failToParseDateTimeInNewParserError(s: String, e: Throwable): Throwable = {
new SparkUpgradeException("3.0", s"Fail to parse '$s' in the new parser. You can " +
s"set ${SQLConf.LEGACY_TIME_PARSER_POLICY.key} to LEGACY to restore the behavior " +
s"before Spark 3.0, or set to CORRECTED and treat it as an invalid datetime string.", e)
}
def failToFormatDateTimeInNewFormatterError(
resultCandidate: String, e: Throwable): Throwable = {
new SparkUpgradeException("3.0",
s"""
|Fail to format it to '$resultCandidate' in the new formatter. You can set
|${SQLConf.LEGACY_TIME_PARSER_POLICY.key} to LEGACY to restore the behavior before
|Spark 3.0, or set to CORRECTED and treat it as an invalid datetime string.
""".stripMargin.replaceAll("\n", " "), e)
}
def failToRecognizePatternAfterUpgradeError(pattern: String, e: Throwable): Throwable = {
new SparkUpgradeException("3.0", s"Fail to recognize '$pattern' pattern in the" +
s" DateTimeFormatter. 1) You can set ${SQLConf.LEGACY_TIME_PARSER_POLICY.key} to LEGACY" +
s" to restore the behavior before Spark 3.0. 2) You can form a valid datetime pattern" +
s" with the guide from https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html",
e)
}
def failToRecognizePatternError(pattern: String, e: Throwable): Throwable = {
new RuntimeException(s"Fail to recognize '$pattern' pattern in the" +
" DateTimeFormatter. You can form a valid datetime pattern" +
" with the guide from https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html",
e)
}
def cannotCastToDateTimeError(value: Any, to: DataType): Throwable = {
new DateTimeException(s"Cannot cast $value to $to. To return NULL instead, use 'try_cast'. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.")
}
def registeringStreamingQueryListenerError(e: Exception): Throwable = {
new SparkException("Exception when registering StreamingQueryListener", e)
}
def concurrentQueryInstanceError(): Throwable = {
new SparkConcurrentModificationException("CONCURRENT_QUERY", Array.empty)
}
def cannotParseJsonArraysAsStructsError(): Throwable = {
new RuntimeException("Parsing JSON arrays as structs is forbidden.")
}
def cannotParseStringAsDataTypeError(parser: JsonParser, token: JsonToken, dataType: DataType)
: Throwable = {
new RuntimeException(
s"Cannot parse field name ${parser.getCurrentName}, " +
s"field value ${parser.getText}, " +
s"[$token] as target spark data type [$dataType].")
}
def cannotParseStringAsDataTypeError(pattern: String, value: String, dataType: DataType)
: Throwable = {
new RuntimeException(
s"Cannot parse field value ${value} for pattern ${pattern} " +
s"as target spark data type [$dataType].")
}
def failToParseEmptyStringForDataTypeError(dataType: DataType): Throwable = {
new RuntimeException(
s"Failed to parse an empty string for data type ${dataType.catalogString}")
}
def failToParseValueForDataTypeError(parser: JsonParser, token: JsonToken, dataType: DataType)
: Throwable = {
new RuntimeException(
s"Failed to parse field name ${parser.getCurrentName}, " +
s"field value ${parser.getText}, " +
s"[$token] to target spark data type [$dataType].")
}
def rootConverterReturnNullError(): Throwable = {
new RuntimeException("Root converter returned null")
}
def cannotHaveCircularReferencesInBeanClassError(clazz: Class[_]): Throwable = {
new UnsupportedOperationException(
"Cannot have circular references in bean class, but got the circular reference " +
s"of class $clazz")
}
def cannotHaveCircularReferencesInClassError(t: String): Throwable = {
new UnsupportedOperationException(
s"cannot have circular references in class, but got the circular reference of class $t")
}
def cannotUseInvalidJavaIdentifierAsFieldNameError(
fieldName: String, walkedTypePath: WalkedTypePath): Throwable = {
new UnsupportedOperationException(s"`$fieldName` is not a valid identifier of " +
s"Java and cannot be used as field name\n$walkedTypePath")
}
def cannotFindEncoderForTypeError(
tpe: String, walkedTypePath: WalkedTypePath): Throwable = {
new UnsupportedOperationException(s"No Encoder found for $tpe\n$walkedTypePath")
}
def attributesForTypeUnsupportedError(schema: Schema): Throwable = {
new UnsupportedOperationException(s"Attributes for type $schema is not supported")
}
def schemaForTypeUnsupportedError(tpe: String): Throwable = {
new UnsupportedOperationException(s"Schema for type $tpe is not supported")
}
def cannotFindConstructorForTypeError(tpe: String): Throwable = {
new UnsupportedOperationException(
s"""
|Unable to find constructor for $tpe.
|This could happen if $tpe is an interface, or a trait without companion object
|constructor.
""".stripMargin.replaceAll("\n", " "))
}
def paramExceedOneCharError(paramName: String): Throwable = {
new RuntimeException(s"$paramName cannot be more than one character")
}
def paramIsNotIntegerError(paramName: String, value: String): Throwable = {
new RuntimeException(s"$paramName should be an integer. Found $value")
}
def paramIsNotBooleanValueError(paramName: String): Throwable = {
new Exception(s"$paramName flag can be true or false")
}
def foundNullValueForNotNullableFieldError(name: String): Throwable = {
new RuntimeException(s"null value found but field $name is not nullable.")
}
def malformedCSVRecordError(): Throwable = {
new RuntimeException("Malformed CSV record")
}
def elementsOfTupleExceedLimitError(): Throwable = {
new UnsupportedOperationException("Due to Scala's limited support of tuple, " +
"tuple with more than 22 elements are not supported.")
}
def expressionDecodingError(e: Exception, expressions: Seq[Expression]): Throwable = {
new RuntimeException(s"Error while decoding: $e\n" +
s"${expressions.map(_.simpleString(SQLConf.get.maxToStringFields)).mkString("\n")}", e)
}
def expressionEncodingError(e: Exception, expressions: Seq[Expression]): Throwable = {
new RuntimeException(s"Error while encoding: $e\n" +
s"${expressions.map(_.simpleString(SQLConf.get.maxToStringFields)).mkString("\n")}", e)
}
def classHasUnexpectedSerializerError(clsName: String, objSerializer: Expression): Throwable = {
new RuntimeException(s"class $clsName has unexpected serializer: $objSerializer")
}
def cannotGetOuterPointerForInnerClassError(innerCls: Class[_]): Throwable = {
new RuntimeException(s"Failed to get outer pointer for ${innerCls.getName}")
}
def userDefinedTypeNotAnnotatedAndRegisteredError(udt: UserDefinedType[_]): Throwable = {
new SparkException(s"${udt.userClass.getName} is not annotated with " +
"SQLUserDefinedType nor registered with UDTRegistration.}")
}
def invalidInputSyntaxForBooleanError(s: UTF8String): UnsupportedOperationException = {
new UnsupportedOperationException(s"invalid input syntax for type boolean: $s. " +
s"To return NULL instead, use 'try_cast'. If necessary set ${SQLConf.ANSI_ENABLED.key} " +
"to false to bypass this error.")
}
def unsupportedOperandTypeForSizeFunctionError(dataType: DataType): Throwable = {
new UnsupportedOperationException(
s"The size function doesn't support the operand type ${dataType.getClass.getCanonicalName}")
}
def unexpectedValueForStartInFunctionError(prettyName: String): RuntimeException = {
new RuntimeException(
s"Unexpected value for start in function $prettyName: SQL array indices start at 1.")
}
def unexpectedValueForLengthInFunctionError(prettyName: String): RuntimeException = {
new RuntimeException(s"Unexpected value for length in function $prettyName: " +
"length must be greater than or equal to 0.")
}
def sqlArrayIndexNotStartAtOneError(): ArrayIndexOutOfBoundsException = {
new ArrayIndexOutOfBoundsException("SQL array indices start at 1")
}
def concatArraysWithElementsExceedLimitError(numberOfElements: Long): Throwable = {
new RuntimeException(
s"""
|Unsuccessful try to concat arrays with $numberOfElements
|elements due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\n", " "))
}
def flattenArraysWithElementsExceedLimitError(numberOfElements: Long): Throwable = {
new RuntimeException(
s"""
|Unsuccessful try to flatten an array of arrays with $numberOfElements
|elements due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\n", " "))
}
def createArrayWithElementsExceedLimitError(count: Any): RuntimeException = {
new RuntimeException(
s"""
|Unsuccessful try to create array with $count elements
|due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\n", " "))
}
def unionArrayWithElementsExceedLimitError(length: Int): Throwable = {
new RuntimeException(
s"""
|Unsuccessful try to union arrays with $length
|elements due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\n", " "))
}
def initialTypeNotTargetDataTypeError(dataType: DataType, target: String): Throwable = {
new UnsupportedOperationException(s"Initial type ${dataType.catalogString} must be a $target")
}
def initialTypeNotTargetDataTypesError(dataType: DataType): Throwable = {
new UnsupportedOperationException(
s"Initial type ${dataType.catalogString} must be " +
s"an ${ArrayType.simpleString}, a ${StructType.simpleString} or a ${MapType.simpleString}")
}
def cannotConvertColumnToJSONError(name: String, dataType: DataType): Throwable = {
new UnsupportedOperationException(
s"Unable to convert column $name of type ${dataType.catalogString} to JSON.")
}
def malformedRecordsDetectedInSchemaInferenceError(e: Throwable): Throwable = {
new SparkException("Malformed records are detected in schema inference. " +
s"Parse Mode: ${FailFastMode.name}.", e)
}
def malformedJSONError(): Throwable = {
new SparkException("Malformed JSON")
}
def malformedRecordsDetectedInSchemaInferenceError(dataType: DataType): Throwable = {
new SparkException(
s"""
|Malformed records are detected in schema inference.
|Parse Mode: ${FailFastMode.name}. Reasons: Failed to infer a common schema.
|Struct types are expected, but `${dataType.catalogString}` was found.
""".stripMargin.replaceAll("\n", " "))
}
def cannotRewriteDomainJoinWithConditionsError(
conditions: Seq[Expression], d: DomainJoin): Throwable = {
new IllegalStateException(
s"Unable to rewrite domain join with conditions: $conditions\n$d")
}
def decorrelateInnerQueryThroughPlanUnsupportedError(plan: LogicalPlan): Throwable = {
new UnsupportedOperationException(
s"Decorrelate inner query through ${plan.nodeName} is not supported.")
}
def methodCalledInAnalyzerNotAllowedError(): Throwable = {
new RuntimeException("This method should not be called in the analyzer")
}
def cannotSafelyMergeSerdePropertiesError(
props1: Map[String, String],
props2: Map[String, String],
conflictKeys: Set[String]): Throwable = {
new UnsupportedOperationException(
s"""
|Cannot safely merge SERDEPROPERTIES:
|${props1.map { case (k, v) => s"$k=$v" }.mkString("{", ",", "}")}
|${props2.map { case (k, v) => s"$k=$v" }.mkString("{", ",", "}")}
|The conflict keys: ${conflictKeys.mkString(", ")}
|""".stripMargin)
}
def pairUnsupportedAtFunctionError(
r1: ValueInterval, r2: ValueInterval, function: String): Throwable = {
new UnsupportedOperationException(s"Not supported pair: $r1, $r2 at $function()")
}
def onceStrategyIdempotenceIsBrokenForBatchError[TreeType <: TreeNode[_]](
batchName: String, plan: TreeType, reOptimized: TreeType): Throwable = {
new RuntimeException(
s"""
|Once strategy's idempotence is broken for batch $batchName
|${sideBySide(plan.treeString, reOptimized.treeString).mkString("\n")}
""".stripMargin)
}
def structuralIntegrityOfInputPlanIsBrokenInClassError(className: String): Throwable = {
new RuntimeException("The structural integrity of the input plan is broken in " +
s"$className.")
}
def structuralIntegrityIsBrokenAfterApplyingRuleError(
ruleName: String, batchName: String): Throwable = {
new RuntimeException(s"After applying rule $ruleName in batch $batchName, " +
"the structural integrity of the plan is broken.")
}
def ruleIdNotFoundForRuleError(ruleName: String): Throwable = {
new NoSuchElementException(s"Rule id not found for $ruleName")
}
def cannotCreateArrayWithElementsExceedLimitError(
numElements: Long, additionalErrorMessage: String): Throwable = {
new RuntimeException(
s"""
|Cannot create array with $numElements
|elements of data due to exceeding the limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH} elements for ArrayData.
|$additionalErrorMessage
""".stripMargin.replaceAll("\n", " "))
}
def indexOutOfBoundsOfArrayDataError(idx: Int): Throwable = {
new SparkIndexOutOfBoundsException(errorClass = "INDEX_OUT_OF_BOUNDS", Array(idx.toString))
}
def malformedRecordsDetectedInRecordParsingError(e: BadRecordException): Throwable = {
new SparkException("Malformed records are detected in record parsing. " +
s"Parse Mode: ${FailFastMode.name}. To process malformed records as null " +
"result, try setting the option 'mode' as 'PERMISSIVE'.", e)
}
def remoteOperationsUnsupportedError(): Throwable = {
new RuntimeException("Remote operations not supported")
}
def invalidKerberosConfigForHiveServer2Error(): Throwable = {
new IOException(
"HiveServer2 Kerberos principal or keytab is not correctly configured")
}
def parentSparkUIToAttachTabNotFoundError(): Throwable = {
new SparkException("Parent SparkUI to attach this tab to not found!")
}
def inferSchemaUnsupportedForHiveError(): Throwable = {
new UnsupportedOperationException("inferSchema is not supported for hive data source.")
}
def requestedPartitionsMismatchTablePartitionsError(
table: CatalogTable, partition: Map[String, Option[String]]): Throwable = {
new SparkException(
s"""
|Requested partitioning does not match the ${table.identifier.table} table:
|Requested partitions: ${partition.keys.mkString(",")}
|Table partitions: ${table.partitionColumnNames.mkString(",")}
""".stripMargin)
}
def dynamicPartitionKeyNotAmongWrittenPartitionPathsError(key: String): Throwable = {
new SparkException(s"Dynamic partition key $key is not among written partition paths.")
}
def cannotRemovePartitionDirError(partitionPath: Path): Throwable = {
new RuntimeException(s"Cannot remove partition directory '$partitionPath'")
}
def cannotCreateStagingDirError(message: String, e: IOException): Throwable = {
new RuntimeException(s"Cannot create staging directory: $message", e)
}
def serDeInterfaceNotFoundError(e: NoClassDefFoundError): Throwable = {
new ClassNotFoundException("The SerDe interface removed since Hive 2.3(HIVE-15167)." +
" Please migrate your custom SerDes to Hive 2.3. See HIVE-15167 for more details.", e)
}
def convertHiveTableToCatalogTableError(
e: SparkException, dbName: String, tableName: String): Throwable = {
new SparkException(s"${e.getMessage}, db: $dbName, table: $tableName", e)
}
def cannotRecognizeHiveTypeError(
e: ParseException, fieldType: String, fieldName: String): Throwable = {
new SparkException(
s"Cannot recognize hive type string: $fieldType, column: $fieldName", e)
}
def getTablesByTypeUnsupportedByHiveVersionError(): Throwable = {
new UnsupportedOperationException("Hive 2.2 and lower versions don't support " +
"getTablesByType. Please use Hive 2.3 or higher version.")
}
def dropTableWithPurgeUnsupportedError(): Throwable = {
new UnsupportedOperationException("DROP TABLE ... PURGE")
}
def alterTableWithDropPartitionAndPurgeUnsupportedError(): Throwable = {
new UnsupportedOperationException("ALTER TABLE ... DROP PARTITION ... PURGE")
}
def invalidPartitionFilterError(): Throwable = {
new UnsupportedOperationException(
"""Partition filter cannot have both `"` and `'` characters""")
}
def getPartitionMetadataByFilterError(e: InvocationTargetException): Throwable = {
new RuntimeException(
s"""
|Caught Hive MetaException attempting to get partition metadata by filter
|from Hive. You can set the Spark configuration setting
|${SQLConf.HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION.key} to true to work
|around this problem, however this will result in degraded performance. Please
|report a bug: https://issues.apache.org/jira/browse/SPARK
""".stripMargin.replaceAll("\n", " "), e)
}
def unsupportedHiveMetastoreVersionError(version: String, key: String): Throwable = {
new UnsupportedOperationException(s"Unsupported Hive Metastore version ($version). " +
s"Please set $key with a valid version.")
}
def loadHiveClientCausesNoClassDefFoundError(
cnf: NoClassDefFoundError,
execJars: Seq[URL],
key: String,
e: InvocationTargetException): Throwable = {
new ClassNotFoundException(
s"""
|$cnf when creating Hive client using classpath: ${execJars.mkString(", ")}\n
|Please make sure that jars for your version of hive and hadoop are included in the
|paths passed to $key.
""".stripMargin.replaceAll("\n", " "), e)
}
def cannotFetchTablesOfDatabaseError(dbName: String, e: Exception): Throwable = {
new SparkException(s"Unable to fetch tables of db $dbName", e)
}
def illegalLocationClauseForViewPartitionError(): Throwable = {
new SparkException("LOCATION clause illegal for view partition")
}
def renamePathAsExistsPathError(srcPath: Path, dstPath: Path): Throwable = {
new SparkFileAlreadyExistsException(errorClass = "FAILED_RENAME_PATH",
Array(srcPath.toString, dstPath.toString))
}
def renameAsExistsPathError(dstPath: Path): Throwable = {
new FileAlreadyExistsException(s"Failed to rename as $dstPath already exists")
}
def renameSrcPathNotFoundError(srcPath: Path): Throwable = {
new SparkFileNotFoundException(errorClass = "RENAME_SRC_PATH_NOT_FOUND",
Array(srcPath.toString))
}
def failedRenameTempFileError(srcPath: Path, dstPath: Path): Throwable = {
new IOException(s"Failed to rename temp file $srcPath to $dstPath as rename returned false")
}
def legacyMetadataPathExistsError(metadataPath: Path, legacyMetadataPath: Path): Throwable = {
new SparkException(
s"""
|Error: we detected a possible problem with the location of your "_spark_metadata"
|directory and you likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out the
|"_spark_metadata" directory for structured streaming. While this was corrected in
|Spark 3.0, it appears that your query was started using an earlier version that
|incorrectly handled the "_spark_metadata" path.
|
|Correct "_spark_metadata" Directory: $metadataPath
|Incorrect "_spark_metadata" Directory: $legacyMetadataPath
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}.
""".stripMargin)
}
def partitionColumnNotFoundInSchemaError(col: String, schema: StructType): Throwable = {
new RuntimeException(s"Partition column $col not found in schema $schema")
}
def stateNotDefinedOrAlreadyRemovedError(): Throwable = {
new NoSuchElementException("State is either not defined or has already been removed")
}
def cannotSetTimeoutDurationError(): Throwable = {
new UnsupportedOperationException(
"Cannot set timeout duration without enabling processing time timeout in " +
"[map|flatMap]GroupsWithState")
}
def cannotGetEventTimeWatermarkError(): Throwable = {
new UnsupportedOperationException(
"Cannot get event time watermark timestamp without setting watermark before " +
"[map|flatMap]GroupsWithState")
}
def cannotSetTimeoutTimestampError(): Throwable = {
new UnsupportedOperationException(
"Cannot set timeout timestamp without enabling event time timeout in " +
"[map|flatMapGroupsWithState")
}
def batchMetadataFileNotFoundError(batchMetadataFile: Path): Throwable = {
new FileNotFoundException(s"Unable to find batch $batchMetadataFile")
}
def multiStreamingQueriesUsingPathConcurrentlyError(
path: String, e: FileAlreadyExistsException): Throwable = {
new ConcurrentModificationException(
s"Multiple streaming queries are concurrently using $path", e)
}
def addFilesWithAbsolutePathUnsupportedError(commitProtocol: String): Throwable = {
new UnsupportedOperationException(
s"$commitProtocol does not support adding files with an absolute path")
}
def microBatchUnsupportedByDataSourceError(srcName: String): Throwable = {
new UnsupportedOperationException(
s"Data source $srcName does not support microbatch processing.")
}
def cannotExecuteStreamingRelationExecError(): Throwable = {
new UnsupportedOperationException("StreamingRelationExec cannot be executed")
}
def invalidStreamingOutputModeError(outputMode: Option[OutputMode]): Throwable = {
new UnsupportedOperationException(s"Invalid output mode: $outputMode")
}
def catalogPluginClassNotFoundError(name: String): Throwable = {
new CatalogNotFoundException(
s"Catalog '$name' plugin class not found: spark.sql.catalog.$name is not defined")
}
def catalogPluginClassNotImplementedError(name: String, pluginClassName: String): Throwable = {
new SparkException(
s"Plugin class for catalog '$name' does not implement CatalogPlugin: $pluginClassName")
}
def catalogPluginClassNotFoundForCatalogError(
name: String,
pluginClassName: String): Throwable = {
new SparkException(s"Cannot find catalog plugin class for catalog '$name': $pluginClassName")
}
def catalogFailToFindPublicNoArgConstructorError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException(
s"Failed to find public no-arg constructor for catalog '$name': $pluginClassName)", e)
}
def catalogFailToCallPublicNoArgConstructorError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException(
s"Failed to call public no-arg constructor for catalog '$name': $pluginClassName)", e)
}
def cannotInstantiateAbstractCatalogPluginClassError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException("Cannot instantiate abstract catalog plugin class for " +
s"catalog '$name': $pluginClassName", e.getCause)
}
def failedToInstantiateConstructorForCatalogError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException("Failed during instantiating constructor for catalog " +
s"'$name': $pluginClassName", e.getCause)
}
def noSuchElementExceptionError(): Throwable = {
new NoSuchElementException
}
def noSuchElementExceptionError(key: String): Throwable = {
new NoSuchElementException(key)
}
def cannotMutateReadOnlySQLConfError(): Throwable = {
new UnsupportedOperationException("Cannot mutate ReadOnlySQLConf.")
}
def cannotCloneOrCopyReadOnlySQLConfError(): Throwable = {
new UnsupportedOperationException("Cannot clone/copy ReadOnlySQLConf.")
}
def cannotGetSQLConfInSchedulerEventLoopThreadError(): Throwable = {
new RuntimeException("Cannot get SQLConf inside scheduler event loop thread.")
}
def unsupportedOperationExceptionError(): Throwable = {
new UnsupportedOperationException
}
def nullLiteralsCannotBeCastedError(name: String): Throwable = {
new UnsupportedOperationException(s"null literals can't be casted to $name")
}
def notUserDefinedTypeError(name: String, userClass: String): Throwable = {
new SparkException(s"$name is not an UserDefinedType. Please make sure registering " +
s"an UserDefinedType for ${userClass}")
}
def cannotLoadUserDefinedTypeError(name: String, userClass: String): Throwable = {
new SparkException(s"Can not load in UserDefinedType ${name} for user class ${userClass}.")
}
def timeZoneIdNotSpecifiedForTimestampTypeError(): Throwable = {
new SparkUnsupportedOperationException(
errorClass = "UNSUPPORTED_OPERATION",
messageParameters = Array(
s"${TimestampType.catalogString} must supply timeZoneId parameter " +
s"while converting to ArrowType")
)
}
def notPublicClassError(name: String): Throwable = {
new UnsupportedOperationException(
s"$name is not a public class. Only public classes are supported.")
}
def primitiveTypesNotSupportedError(): Throwable = {
new UnsupportedOperationException("Primitive types are not supported.")
}
def fieldIndexOnRowWithoutSchemaError(): Throwable = {
new UnsupportedOperationException("fieldIndex on a Row without schema is undefined.")
}
def valueIsNullError(index: Int): Throwable = {
new NullPointerException(s"Value at index $index is null")
}
def onlySupportDataSourcesProvidingFileFormatError(providingClass: String): Throwable = {
new SparkException(s"Only Data Sources providing FileFormat are supported: $providingClass")
}
def failToSetOriginalPermissionBackError(
permission: FsPermission,
path: Path,
e: Throwable): Throwable = {
new SparkSecurityException(errorClass = "FAILED_SET_ORIGINAL_PERMISSION_BACK",
Array(permission.toString, path.toString, e.getMessage))
}
def failToSetOriginalACLBackError(aclEntries: String, path: Path, e: Throwable): Throwable = {
new SecurityException(s"Failed to set original ACL $aclEntries back to " +
s"the created path: $path. Exception: ${e.getMessage}")
}
def multiFailuresInStageMaterializationError(error: Throwable): Throwable = {
new SparkException("Multiple failures in stage materialization.", error)
}
def unrecognizedCompressionSchemaTypeIDError(typeId: Int): Throwable = {
new UnsupportedOperationException(s"Unrecognized compression scheme type ID: $typeId")
}
def getParentLoggerNotImplementedError(className: String): Throwable = {
new SQLFeatureNotSupportedException(s"$className.getParentLogger is not yet implemented.")
}
def cannotCreateParquetConverterForTypeError(t: DecimalType, parquetType: String): Throwable = {
new RuntimeException(
s"""
|Unable to create Parquet converter for ${t.typeName}
|whose Parquet type is $parquetType without decimal metadata. Please read this
|column/field as Spark BINARY type.
""".stripMargin.replaceAll("\n", " "))
}
def cannotCreateParquetConverterForDecimalTypeError(
t: DecimalType, parquetType: String): Throwable = {
new RuntimeException(
s"""
|Unable to create Parquet converter for decimal type ${t.json} whose Parquet type is
|$parquetType. Parquet DECIMAL type can only be backed by INT32, INT64,
|FIXED_LEN_BYTE_ARRAY, or BINARY.
""".stripMargin.replaceAll("\n", " "))
}
def cannotCreateParquetConverterForDataTypeError(
t: DataType, parquetType: String): Throwable = {
new RuntimeException(s"Unable to create Parquet converter for data type ${t.json} " +
s"whose Parquet type is $parquetType")
}
def cannotAddMultiPartitionsOnNonatomicPartitionTableError(tableName: String): Throwable = {
new UnsupportedOperationException(
s"Nonatomic partition table $tableName can not add multiple partitions.")
}
def userSpecifiedSchemaUnsupportedByDataSourceError(provider: TableProvider): Throwable = {
new UnsupportedOperationException(
s"${provider.getClass.getSimpleName} source does not support user-specified schema.")
}
def cannotDropMultiPartitionsOnNonatomicPartitionTableError(tableName: String): Throwable = {
new UnsupportedOperationException(
s"Nonatomic partition table $tableName can not drop multiple partitions.")
}
def truncateMultiPartitionUnsupportedError(tableName: String): Throwable = {
new UnsupportedOperationException(
s"The table $tableName does not support truncation of multiple partition.")
}
def overwriteTableByUnsupportedExpressionError(table: Table): Throwable = {
new SparkException(s"Table does not support overwrite by expression: $table")
}
def dynamicPartitionOverwriteUnsupportedByTableError(table: Table): Throwable = {
new SparkException(s"Table does not support dynamic partition overwrite: $table")
}
def failedMergingSchemaError(schema: StructType, e: SparkException): Throwable = {
new SparkException(s"Failed merging schema:\n${schema.treeString}", e)
}
def cannotBroadcastTableOverMaxTableRowsError(
maxBroadcastTableRows: Long, numRows: Long): Throwable = {
new SparkException(
s"Cannot broadcast the table over $maxBroadcastTableRows rows: $numRows rows")
}
def cannotBroadcastTableOverMaxTableBytesError(
maxBroadcastTableBytes: Long, dataSize: Long): Throwable = {
new SparkException("Cannot broadcast the table that is larger than" +
s" ${maxBroadcastTableBytes >> 30}GB: ${dataSize >> 30} GB")
}
def notEnoughMemoryToBuildAndBroadcastTableError(oe: OutOfMemoryError): Throwable = {
new OutOfMemoryError("Not enough memory to build and broadcast the table to all " +
"worker nodes. As a workaround, you can either disable broadcast by setting " +
s"${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1 or increase the spark " +
s"driver memory by setting ${SparkLauncher.DRIVER_MEMORY} to a higher value.")
.initCause(oe.getCause)
}
def executeCodePathUnsupportedError(execName: String): Throwable = {
new UnsupportedOperationException(s"$execName does not support the execute() code path.")
}
def cannotMergeClassWithOtherClassError(className: String, otherClass: String): Throwable = {
new UnsupportedOperationException(
s"Cannot merge $className with $otherClass")
}
def continuousProcessingUnsupportedByDataSourceError(sourceName: String): Throwable = {
new UnsupportedOperationException(
s"Data source $sourceName does not support continuous processing.")
}
def failedToReadDataError(failureReason: Throwable): Throwable = {
new SparkException("Data read failed", failureReason)
}
def failedToGenerateEpochMarkerError(failureReason: Throwable): Throwable = {
new SparkException("Epoch marker generation failed", failureReason)
}
def foreachWriterAbortedDueToTaskFailureError(): Throwable = {
new SparkException("Foreach writer has been aborted due to a task failure")
}
def integerOverflowError(message: String): Throwable = {
new ArithmeticException(s"Integer overflow. $message")
}
def failedToReadDeltaFileError(fileToRead: Path, clazz: String, keySize: Int): Throwable = {
new IOException(
s"Error reading delta file $fileToRead of $clazz: key size cannot be $keySize")
}
def failedToReadSnapshotFileError(fileToRead: Path, clazz: String, message: String): Throwable = {
new IOException(s"Error reading snapshot file $fileToRead of $clazz: $message")
}
def cannotPurgeAsBreakInternalStateError(): Throwable = {
new UnsupportedOperationException("Cannot purge as it might break internal state.")
}
def cleanUpSourceFilesUnsupportedError(): Throwable = {
new UnsupportedOperationException("Clean up source files is not supported when" +
" reading from the output directory of FileStreamSink.")
}
def latestOffsetNotCalledError(): Throwable = {
new UnsupportedOperationException(
"latestOffset(Offset, ReadLimit) should be called instead of this method")
}
def legacyCheckpointDirectoryExistsError(
checkpointPath: Path, legacyCheckpointDir: String): Throwable = {
new SparkException(
s"""
|Error: we detected a possible problem with the location of your checkpoint and you
|likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out checkpoints for
|structured streaming. While this was corrected in Spark 3.0, it appears that your
|query was started using an earlier version that incorrectly handled the checkpoint
|path.
|
|Correct Checkpoint Directory: $checkpointPath
|Incorrect Checkpoint Directory: $legacyCheckpointDir
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}.
""".stripMargin)
}
def subprocessExitedError(
exitCode: Int, stderrBuffer: CircularBuffer, cause: Throwable): Throwable = {
new SparkException(s"Subprocess exited with status $exitCode. " +
s"Error: ${stderrBuffer.toString}", cause)
}
def outputDataTypeUnsupportedByNodeWithoutSerdeError(
nodeName: String, dt: DataType): Throwable = {
new SparkException(s"$nodeName without serde does not support " +
s"${dt.getClass.getSimpleName} as output data type")
}
def invalidStartIndexError(numRows: Int, startIndex: Int): Throwable = {
new ArrayIndexOutOfBoundsException(
"Invalid `startIndex` provided for generating iterator over the array. " +
s"Total elements: $numRows, requested `startIndex`: $startIndex")
}
def concurrentModificationOnExternalAppendOnlyUnsafeRowArrayError(
className: String): Throwable = {
new ConcurrentModificationException(
s"The backing $className has been modified since the creation of this Iterator")
}
def doExecuteBroadcastNotImplementedError(nodeName: String): Throwable = {
new UnsupportedOperationException(s"$nodeName does not implement doExecuteBroadcast")
}
def databaseNameConflictWithSystemPreservedDatabaseError(globalTempDB: String): Throwable = {
new SparkException(
s"""
|$globalTempDB is a system preserved database, please rename your existing database
|to resolve the name conflict, or set a different value for
|${GLOBAL_TEMP_DATABASE.key}, and launch your Spark application again.
""".stripMargin.split("\n").mkString(" "))
}
def commentOnTableUnsupportedError(): Throwable = {
new SQLFeatureNotSupportedException("comment on table is not supported")
}
def unsupportedUpdateColumnNullabilityError(): Throwable = {
new SQLFeatureNotSupportedException("UpdateColumnNullability is not supported")
}
def renameColumnUnsupportedForOlderMySQLError(): Throwable = {
new SQLFeatureNotSupportedException(
"Rename column is only supported for MySQL version 8.0 and above.")
}
def failedToExecuteQueryError(e: Throwable): QueryExecutionException = {
val message = "Hit an error when executing a query" +
(if (e.getMessage == null) "" else s": ${e.getMessage}")
new QueryExecutionException(message, e)
}
def nestedFieldUnsupportedError(colName: String): Throwable = {
new UnsupportedOperationException(s"Nested field $colName is not supported.")
}
def transformationsAndActionsNotInvokedByDriverError(): Throwable = {
new SparkException(
"""
|Dataset transformations and actions can only be invoked by the driver, not inside of
|other Dataset transformations; for example, dataset1.map(x => dataset2.values.count()
|* x) is invalid because the values transformation and count action cannot be
|performed inside of the dataset1.map transformation. For more information,
|see SPARK-28702.
""".stripMargin.split("\n").mkString(" "))
}
def repeatedPivotsUnsupportedError(): Throwable = {
new SparkUnsupportedOperationException(
errorClass = "UNSUPPORTED_FEATURE",
messageParameters = Array("Repeated pivots."))
}
def pivotNotAfterGroupByUnsupportedError(): Throwable = {
new SparkUnsupportedOperationException(
errorClass = "UNSUPPORTED_FEATURE",
messageParameters = Array("Pivot not after a groupBy."))
}
def invalidAesKeyLengthError(actualLength: Int): RuntimeException = {
new SparkRuntimeException(
errorClass = "INVALID_PARAMETER_VALUE",
messageParameters = Array(
"key",
"the aes_encrypt/aes_decrypt function",
s"expects a binary value with 16, 24 or 32 bytes, but got ${actualLength.toString} bytes."))
}
def aesModeUnsupportedError(mode: String, padding: String): RuntimeException = {
new SparkRuntimeException(
errorClass = "UNSUPPORTED_FEATURE",
messageParameters = Array(
s"AES-$mode with the padding $padding by the aes_encrypt/aes_decrypt function."))
}
def aesCryptoError(detailMessage: String): RuntimeException = {
new SparkRuntimeException(
errorClass = "INVALID_PARAMETER_VALUE",
messageParameters = Array(
"expr, key",
"the aes_encrypt/aes_decrypt function",
s"Detail message: $detailMessage"))
}
def hiveTableWithAnsiIntervalsError(tableName: String): Throwable = {
new UnsupportedOperationException(s"Hive table $tableName with ANSI intervals is not supported")
}
def cannotConvertOrcTimestampToTimestampNTZError(): Throwable = {
new SparkUnsupportedOperationException(
errorClass = "UNSUPPORTED_OPERATION",
messageParameters = Array("Unable to convert timestamp of Orc to data type 'timestamp_ntz'"))
}
def writePartitionExceedConfigSizeWhenDynamicPartitionError(
numWrittenParts: Int,
maxDynamicPartitions: Int,
maxDynamicPartitionsKey: String): Throwable = {
new SparkException(
s"Number of dynamic partitions created is $numWrittenParts" +
s", which is more than $maxDynamicPartitions" +
s". To solve this try to set $maxDynamicPartitionsKey" +
s" to at least $numWrittenParts.")
}
def invalidNumberFormatError(input: UTF8String, format: String): Throwable = {
new IllegalArgumentException(
s"The input string '$input' does not match the given number format: '$format'")
}
def multipleBucketTransformsError(): Throwable = {
new UnsupportedOperationException("Multiple bucket transforms are not supported.")
}
def unsupportedCreateNamespaceCommentError(): Throwable = {
new SQLFeatureNotSupportedException("Create namespace comment is not supported")
}
def unsupportedRemoveNamespaceCommentError(): Throwable = {
new SQLFeatureNotSupportedException("Remove namespace comment is not supported")
}
def unsupportedDropNamespaceRestrictError(): Throwable = {
new SQLFeatureNotSupportedException("Drop namespace restrict is not supported")
}
def invalidUnitInTimestampAdd(unit: String): Throwable = {
new SparkIllegalArgumentException(
errorClass = "INVALID_PARAMETER_VALUE",
messageParameters = Array("unit", "timestampadd", unit))
}
def invalidUnitInTimestampDiff(unit: String): Throwable = {
new SparkIllegalArgumentException(
errorClass = "INVALID_PARAMETER_VALUE",
messageParameters = Array("unit", "timestampdiff", unit))
}
}
| ueshin/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala | Scala | apache-2.0 | 82,258 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xerial.silk.cui
import java.io.File
import java.net.URL
import java.util.jar.JarFile
import xerial.core.log.Logger
import xerial.silk.core.ClassBox
import xerial.silk.core.util.Path
import Path._
import scala.annotation.tailrec
/**
* ClassFinder finds a full class name from its partial class name
*
* @author Taro L. Saito
*/
object ClassFinder extends Logger {
def findClass(clName: String, classLoader: => ClassLoader = Thread.currentThread.getContextClassLoader): Option[String] = {
val cname = {
val pos = clName.lastIndexOf(".")
if (pos == -1)
clName
else
clName.substring(pos + 1)
}
import scala.collection.JavaConversions._
val classPathEntries = sys.props.getOrElse("java.class.path", "")
.split(File.pathSeparator)
.map { e => new File(e).toURI.toURL } ++
ClassBox.classPathEntries(classLoader)
trace(s"classpath entries:\\n${classPathEntries.mkString("\\n")}")
val isFullPath = clName.lastIndexOf(".") != -1
val clPath = s"${clName.replaceAll("\\\\.", "/")}.class"
val clFile = s"${cname}.class"
def removeExt(s: String) = s.replaceAll("\\\\.class$", "")
def findTargetClassFile(resource: URL): Option[String] = {
if (ClassBox.isJarFile(resource)) {
// Find the target class from a jar file
val path = resource.getPath
val jarPath = path.replaceAll("%20", " ")
val jarFilePath = jarPath.replace("file:", "")
val jar = new JarFile(jarFilePath)
val entryName = if (isFullPath)
Option(jar.getEntry(s"/$clPath")).map(_.getName)
else {
jar.entries.collectFirst {
case e if e.getName.endsWith(clFile) =>
e.getName
}
}
entryName.map(name => removeExt(name))
}
else if (resource.getProtocol == "file") {
// Find the target class from a directory
@tailrec
def find(lst: List[File]): Option[File] = {
if (lst.isEmpty)
None
else {
val h = lst.head
if (h.isDirectory)
find(h.listFiles.toList ::: lst.tail)
else {
val fileName = h.getName
if (fileName.endsWith(".class") && fileName == clFile)
Some(h)
else
find(lst.tail)
}
}
}
val filePath = resource.getPath
val base = new File(filePath)
if (isFullPath) {
// Search the target file by directly specifying the file name
val f = new File(filePath, clPath)
if (f.exists())
Some(f.relativeTo(base).getPath)
else
None
}
else {
// Search directories recursively
find(List(base)).map { f => f.relativeTo(base).getPath }
}
}
else
None
}
val targetClassName = classPathEntries.toIterator.map(findTargetClassFile).collectFirst {
case Some(relativePathToClass) => {
removeExt(relativePathToClass).replaceAll("\\\\/", ".")
}
}
targetClassName
}
}
| xerial/silk | silk-cui/src/main/scala/xerial/silk/cui/ClassFinder.scala | Scala | apache-2.0 | 3,705 |
package app
import service._
import util._
import util.Directory._
import util.ControlUtil._
import jp.sf.amateras.scalatra.forms._
import org.eclipse.jgit.api.Git
import org.scalatra.FlashMapSupport
import org.scalatra.i18n.Messages
import scala.Some
import java.util.ResourceBundle
class WikiController extends WikiControllerBase
with WikiService with RepositoryService with AccountService with ActivityService
with CollaboratorsAuthenticator with ReferrerAuthenticator with FileUploadControllerBase
trait WikiControllerBase extends ControllerBase with FlashMapSupport {
self: WikiService with RepositoryService with ActivityService
with CollaboratorsAuthenticator with ReferrerAuthenticator
with FileUploadControllerBase =>
case class WikiPageEditForm(pageName: String, content: String, message: Option[String], currentPageName: String, id: String)
case class WikiPageUploadForm(fileId: Option[String])
val newForm = mapping(
"pageName" -> trim(label("Page name" , text(required, maxlength(40), pagename, unique))),
"content" -> trim(label("Content" , text(required, conflictForNew))),
"message" -> trim(label("Message" , optional(text()))),
"currentPageName" -> trim(label("Current page name" , text())),
"id" -> trim(label("Latest commit id" , text()))
)(WikiPageEditForm.apply)
val editForm = mapping(
"pageName" -> trim(label("Page name" , text(required, maxlength(40), pagename))),
"content" -> trim(label("Content" , text(required, conflictForEdit))),
"message" -> trim(label("Message" , optional(text()))),
"currentPageName" -> trim(label("Current page name" , text(required))),
"id" -> trim(label("Latest commit id" , text(required)))
)(WikiPageEditForm.apply)
val uploadForm = mapping(
"fileId" -> trim(label("File" , optional(text())))
)(WikiPageUploadForm.apply)
protected def upload(owner: String, repository: String, pageName: String, committer: model.Account, fileId: Option[String]): Unit =
fileId.map { fileId =>
val filename = getUploadedFilename(fileId).get
val file = getTemporaryFile(fileId)
storeAttachmentFile(owner, repository, pageName, committer, filename, FileUtil.getBytes(file))
}
get("/:owner/:repository/wiki")(referrersOnly { repository =>
getWikiPage(repository.owner, repository.name, "Home").map { page =>
wiki.html.page("Home", page, repository, hasWritePermission(repository.owner, repository.name, context.loginAccount))
} getOrElse redirect(s"/${repository.owner}/${repository.name}/wiki/Home/_edit")
})
get("/:owner/:repository/wiki/:page")(referrersOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
getWikiPage(repository.owner, repository.name, pageName).map { page =>
wiki.html.page(pageName, page, repository, hasWritePermission(repository.owner, repository.name, context.loginAccount))
} getOrElse redirect(s"/${repository.owner}/${repository.name}/wiki/${StringUtil.urlEncode(pageName)}/_edit")
})
get("/:owner/:repository/wiki/:page/_history")(referrersOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
using(Git.open(getWikiRepositoryDir(repository.owner, repository.name))){ git =>
JGitUtil.getCommitLog(git, "master", path = pageName + ".md") match {
case Right((logs, hasNext)) => wiki.html.history(Some(pageName), logs, repository)
case Left(_) => NotFound
}
}
})
get("/:owner/:repository/wiki/:page/_download/:file")(referrersOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
val file = StringUtil.urlDecode(params("file"))
getWikiPageAttachment(repository.owner, repository.name, pageName, file).map { bytes =>
val contentType = FileUtil.getContentType(file, bytes)
bytes
} getOrElse {
NotFound
}
})
post("/:owner/:repository/wiki/:page/_upload", uploadForm)(collaboratorsOnly { (form, repository) =>
defining(context.loginAccount.get){ loginAccount =>
val pageName = StringUtil.urlDecode(params("page"))
upload(repository.owner, repository.name, pageName, loginAccount, form.fileId)
updateLastActivityDate(repository.owner, repository.name)
redirect(s"/${repository.owner}/${repository.name}/wiki/${StringUtil.urlEncode(pageName)}")
}
})
get("/:owner/:repository/wiki/:page/_compare/:commitId")(referrersOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
val Array(from, to) = params("commitId").split("\\\\.\\\\.\\\\.")
using(Git.open(getWikiRepositoryDir(repository.owner, repository.name))){ git =>
wiki.html.compare(Some(pageName), from, to, JGitUtil.getDiffs(git, from, to, true).filter(_.newPath == pageName + ".md"), repository,
hasWritePermission(repository.owner, repository.name, context.loginAccount), flash.get("info"))
}
})
get("/:owner/:repository/wiki/_compare/:commitId")(referrersOnly { repository =>
val Array(from, to) = params("commitId").split("\\\\.\\\\.\\\\.")
using(Git.open(getWikiRepositoryDir(repository.owner, repository.name))){ git =>
wiki.html.compare(None, from, to, JGitUtil.getDiffs(git, from, to, true), repository,
hasWritePermission(repository.owner, repository.name, context.loginAccount), flash.get("info"))
}
})
get("/:owner/:repository/wiki/:page/_revert/:commitId")(collaboratorsOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
val Array(from, to) = params("commitId").split("\\\\.\\\\.\\\\.")
if(revertWikiPage(repository.owner, repository.name, from, to, context.loginAccount.get, Some(pageName))){
redirect(s"/${repository.owner}/${repository.name}/wiki/${StringUtil.urlEncode(pageName)}")
} else {
flash += "info" -> "This patch was not able to be reversed."
redirect(s"/${repository.owner}/${repository.name}/wiki/${StringUtil.urlEncode(pageName)}/_compare/${from}...${to}")
}
})
get("/:owner/:repository/wiki/_revert/:commitId")(collaboratorsOnly { repository =>
val Array(from, to) = params("commitId").split("\\\\.\\\\.\\\\.")
if(revertWikiPage(repository.owner, repository.name, from, to, context.loginAccount.get, None)){
redirect(s"/${repository.owner}/${repository.name}/wiki/")
} else {
flash += "info" -> "This patch was not able to be reversed."
redirect(s"/${repository.owner}/${repository.name}/wiki/_compare/${from}...${to}")
}
})
get("/:owner/:repository/wiki/:page/_edit")(collaboratorsOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
wiki.html.edit(pageName, getWikiPage(repository.owner, repository.name, pageName), repository)
})
post("/:owner/:repository/wiki/_edit", editForm)(collaboratorsOnly { (form, repository) =>
defining(context.loginAccount.get){ loginAccount =>
saveWikiPage(repository.owner, repository.name, form.currentPageName, form.pageName,
form.content, loginAccount, form.message.getOrElse(""), Some(form.id)).map { commitId =>
updateLastActivityDate(repository.owner, repository.name)
recordEditWikiPageActivity(repository.owner, repository.name, loginAccount.userName, form.pageName, commitId)
}
redirect(s"/${repository.owner}/${repository.name}/wiki/${StringUtil.urlEncode(form.pageName)}")
}
})
get("/:owner/:repository/wiki/_new")(collaboratorsOnly {
wiki.html.edit("", None, _)
})
post("/:owner/:repository/wiki/_new", newForm)(collaboratorsOnly { (form, repository) =>
defining(context.loginAccount.get){ loginAccount =>
saveWikiPage(repository.owner, repository.name, form.currentPageName, form.pageName,
form.content, loginAccount, form.message.getOrElse(""), None)
updateLastActivityDate(repository.owner, repository.name)
recordCreateWikiPageActivity(repository.owner, repository.name, loginAccount.userName, form.pageName)
redirect(s"/${repository.owner}/${repository.name}/wiki/${StringUtil.urlEncode(form.pageName)}")
}
})
get("/:owner/:repository/wiki/:page/_delete")(collaboratorsOnly { repository =>
val pageName = StringUtil.urlDecode(params("page"))
defining(context.loginAccount.get){ loginAccount =>
deleteWikiPage(repository.owner, repository.name, pageName, loginAccount.fullName, loginAccount.mailAddress, s"Destroyed ${pageName}")
updateLastActivityDate(repository.owner, repository.name)
redirect(s"/${repository.owner}/${repository.name}/wiki")
}
})
get("/:owner/:repository/wiki/_pages")(referrersOnly { repository =>
wiki.html.pages(getWikiPageList(repository.owner, repository.name), repository,
hasWritePermission(repository.owner, repository.name, context.loginAccount))
})
get("/:owner/:repository/wiki/_history")(referrersOnly { repository =>
using(Git.open(getWikiRepositoryDir(repository.owner, repository.name))){ git =>
JGitUtil.getCommitLog(git, "master") match {
case Right((logs, hasNext)) => wiki.html.history(None, logs, repository)
case Left(_) => NotFound
}
}
})
get("/:owner/:repository/wiki/_blob/*")(referrersOnly { repository =>
val path = multiParams("splat").head
getFileContent(repository.owner, repository.name, path).map { bytes =>
contentType = FileUtil.getContentType(path, bytes)
bytes
} getOrElse NotFound
})
private def unique: Constraint = new Constraint(){
override def validate(name: String, value: String, params: Map[String, String], messages: Messages): Option[String] =
getWikiPageList(params("owner"), params("repository")).find(_ == value).map(_ => "Page already exists.")
}
private def pagename: Constraint = new Constraint(){
override def validate(name: String, value: String, messages: Messages): Option[String] =
if(value.exists("\\\\/:*?\\"<>|".contains(_))){
Some(s"${name} contains invalid character.")
} else if(value.startsWith("_") || value.startsWith("-")){
Some(s"${name} starts with invalid character.")
} else {
None
}
}
private def conflictForNew: Constraint = new Constraint(){
override def validate(name: String, value: String, messages: Messages): Option[String] = {
targetWikiPage.map { _ =>
"Someone has created the wiki since you started. Please reload this page and re-apply your changes."
}
}
}
private def conflictForEdit: Constraint = new Constraint(){
override def validate(name: String, value: String, messages: Messages): Option[String] = {
targetWikiPage.filter(_.id != params("id")).map{ _ =>
"Someone has edited the wiki since you started. Please reload this page and re-apply your changes."
}
}
}
private def targetWikiPage = getWikiPage(params("owner"), params("repository"), params("pageName"))
}
| toshi-saito/gitbucket | src/main/scala/app/WikiController.scala | Scala | apache-2.0 | 11,070 |
package com.mesosphere.cosmos
import org.scalatest.FreeSpec
import com.twitter.util.Return
import com.twitter.util.Throw
final class TrysSpec extends FreeSpec {
"join[A,B]" in {
assertResult(Return((1,2)))(Trys.join(Return(1), Return(2)))
val e = new IllegalArgumentException
val n = new NoSuchElementException
assertResult(Throw(e))(Trys.join(Throw(e), Return(2)))
assertResult(Throw(e))(Trys.join(Return(1), Throw(e)))
assertResult(Throw(n))(Trys.join(Throw(n), Throw(e)))
}
"join[A,B,C]" in {
assertResult(Return((1,2,3)))(Trys.join(Return(1), Return(2), Return(3)))
val e = new IllegalArgumentException
val n = new NoSuchElementException
assertResult(Throw(e))(Trys.join(Throw(e), Return(2), Return(3)))
assertResult(Throw(e))(Trys.join(Return(1), Throw(e), Return(3)))
assertResult(Throw(n))(Trys.join(Throw(n), Throw(e), Return(3)))
}
}
| dcos/cosmos | cosmos-test-common/src/test/scala/com/mesosphere/cosmos/TrysSpec.scala | Scala | apache-2.0 | 903 |
import sbt._
import Keys._
import bintray.BintrayKeys._
object IngredientsBuild extends Build {
lazy val commonSettings = seq(
organization := "io.buildo",
scalaVersion := "2.11.8",
crossScalaVersions := Seq("2.11.8", "2.12.1"),
resolvers ++= Seq(
"buildo mvn" at "https://raw.github.com/buildo/mvn/master/releases",
Resolver.jcenterRepo
),
scalacOptions := Seq(
"-unchecked",
"-deprecation",
"-encoding",
"utf8"),
licenses += ("MIT", url("http://opensource.org/licenses/MIT")),
bintrayOrganization := Some("buildo"),
bintrayReleaseOnPublish in ThisBuild := true
)
lazy val jsend = project.in(file("jsend"))
.settings(commonSettings: _*)
lazy val logging = project.in(file("logging"))
.settings(commonSettings: _*)
.dependsOn(loggingMacro % "compile-internal, test-internal")
lazy val loggingMacro = project.in(file("logging/macro"))
.settings(commonSettings: _*)
.settings(
publish := (),
publishLocal := ()
)
lazy val tuplecutter = project.in(file("tuplecutter"))
.settings(commonSettings: _*)
lazy val caseenum = project.in(file("caseenum"))
.settings(commonSettings: _*)
lazy val caseenumCirceSupport = project.in(file("caseenum/circeSupport"))
.settings(commonSettings: _*)
.dependsOn(caseenum)
lazy val root = project.in(file("."))
.aggregate(jsend, logging, tuplecutter, caseenum)
.settings(
publish := {}
)
}
| buildo/ingredients | project/Build.scala | Scala | mit | 1,488 |
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
class ObjcMarshal(spec: Spec) extends Marshal(spec) {
override def typename(tm: MExpr): String = {
val (name, _) = toObjcType(tm)
name
}
def typename(name: String, ty: TypeDef): String = idObjc.ty(name)
override def fqTypename(tm: MExpr): String = typename(tm)
def fqTypename(name: String, ty: TypeDef): String = typename(name, ty)
def nullability(tm: MExpr): Option[String] = {
val nonnull = Some("nonnull")
val nullable = Some("nullable")
tm.base match {
case MOptional => nullable
case MPrimitive(_,_,_,_,_,_,_,_) => None
case d: MDef => d.defType match {
case DEnum => None
case DInterface => nullable
case DRecord => nonnull
}
case e: MExtern => e.defType match {
case DEnum => None
case DInterface => nullable
case DRecord => if(e.objc.pointer) nonnull else None
}
case _ => nonnull
}
}
override def paramType(tm: MExpr): String = {
nullability(tm).fold("")(_ + " ") + toObjcParamType(tm)
}
override def fqParamType(tm: MExpr): String = paramType(tm)
override def returnType(ret: Option[TypeRef]): String = ret.fold("void")((t: TypeRef) => nullability(t.resolved).fold("")(_ + " ") + toObjcParamType(t.resolved))
override def fqReturnType(ret: Option[TypeRef]): String = returnType(ret)
override def fieldType(tm: MExpr): String = toObjcParamType(tm)
override def fqFieldType(tm: MExpr): String = toObjcParamType(tm)
override def toCpp(tm: MExpr, expr: String): String = throw new AssertionError("direct objc to cpp conversion not possible")
override def fromCpp(tm: MExpr, expr: String): String = throw new AssertionError("direct cpp to objc conversion not possible")
def references(m: Meta, exclude: String = ""): Seq[SymbolReference] = m match {
case o: MOpaque =>
List(ImportRef("<Foundation/Foundation.h>"))
case d: MDef => d.defType match {
case DEnum =>
List(ImportRef(include(d.name)))
case DInterface =>
val ext = d.body.asInstanceOf[Interface].ext
if (ext.cpp && !ext.objc) {
List(ImportRef("<Foundation/Foundation.h>"), DeclRef(s"@class ${typename(d.name, d.body)};", None))
}
else {
List(ImportRef("<Foundation/Foundation.h>"), DeclRef(s"@protocol ${typename(d.name, d.body)};", None))
}
case DRecord =>
val r = d.body.asInstanceOf[Record]
val prefix = if (r.ext.objc) "../" else ""
List(ImportRef(q(spec.objcIncludePrefix + prefix + headerName(d.name))))
}
case e: MExtern => List(ImportRef(e.objc.header))
case p: MParam => List()
}
def headerName(ident: String) = idObjc.ty(ident) + "." + spec.objcHeaderExt
def include(ident: String) = q(spec.objcIncludePrefix + headerName(ident))
def isPointer(td: TypeDecl) = td.body match {
case i: Interface => true
case r: Record => true
case e: Enum => false
}
def boxedTypename(td: TypeDecl) = td.body match {
case i: Interface => typename(td.ident, i)
case r: Record => typename(td.ident, r)
case e: Enum => "NSNumber"
}
// Return value: (Type_Name, Is_Class_Or_Not)
def toObjcType(ty: TypeRef): (String, Boolean) = toObjcType(ty.resolved, false)
def toObjcType(ty: TypeRef, needRef: Boolean): (String, Boolean) = toObjcType(ty.resolved, needRef)
def toObjcType(tm: MExpr): (String, Boolean) = toObjcType(tm, false)
def toObjcType(tm: MExpr, needRef: Boolean): (String, Boolean) = {
def f(tm: MExpr, needRef: Boolean): (String, Boolean) = {
tm.base match {
case MOptional =>
// We use "nil" for the empty optional.
assert(tm.args.size == 1)
val arg = tm.args.head
arg.base match {
case MOptional => throw new AssertionError("nested optional?")
case m => f(arg, true)
}
case o =>
val base = o match {
case p: MPrimitive => if (needRef) (p.objcBoxed, true) else (p.objcName, false)
case MString => ("NSString", true)
case MDate => ("NSDate", true)
case MBinary => ("NSData", true)
case MOptional => throw new AssertionError("optional should have been special cased")
case MList => ("NSArray", true)
case MSet => ("NSSet", true)
case MMap => ("NSDictionary", true)
case d: MDef => d.defType match {
case DEnum => if (needRef) ("NSNumber", true) else (idObjc.ty(d.name), false)
case DRecord => (idObjc.ty(d.name), true)
case DInterface =>
val ext = d.body.asInstanceOf[Interface].ext
if (ext.cpp && !ext.objc)
(idObjc.ty(d.name), true)
else
(s"id<${idObjc.ty(d.name)}>", false)
}
case e: MExtern => e.body match {
case i: Interface => if(i.ext.objc) (s"id<${e.objc.typename}>", false) else (e.objc.typename, true)
case _ => if(needRef) (e.objc.boxed, true) else (e.objc.typename, e.objc.pointer)
}
case p: MParam => throw new AssertionError("Parameter should not happen at Obj-C top level")
}
base
}
}
f(tm, needRef)
}
def toObjcParamType(tm: MExpr): String = {
val (name, needRef) = toObjcType(tm)
name + (if(needRef) " *" else "")
}
}
| aijiekj/djinni | src/source/ObjcMarshal.scala | Scala | apache-2.0 | 5,504 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.base.Modifier
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.Annotation
/**
* @author Alexander Podkhalyuzin
* Date: 05.02.2008
*/
/*
* TmplDef ::= {Annotation} {Modifier}
[case] class ClassDef
* | [case] object ObjectDef
* | trait TraitDef
*
*/
object TmplDef extends TmplDef {
override protected val classDef = ClassDef
override protected val objectDef = ObjectDef
override protected val traitDef = TraitDef
override protected val annotation = Annotation
}
trait TmplDef {
protected val classDef: ClassDef
protected val objectDef: ObjectDef
protected val traitDef: TraitDef
protected val annotation: Annotation
def parse(builder: ScalaPsiBuilder): Boolean = {
val templateMarker = builder.mark
templateMarker.setCustomEdgeTokenBinders(ScalaTokenBinders.PRECEEDING_COMMENTS_TOKEN, null)
val annotationsMarker = builder.mark
while (annotation.parse(builder)) {}
annotationsMarker.done(ScalaElementTypes.ANNOTATIONS)
annotationsMarker.setCustomEdgeTokenBinders(ScalaTokenBinders.DEFAULT_LEFT_EDGE_BINDER, null)
//parsing modifiers
val modifierMarker = builder.mark
while (Modifier.parse(builder)) {}
//could be case modifier
val caseMarker = builder.mark
if (builder.getTokenType == ScalaTokenTypes.kCASE)
builder.advanceLexer() //Ate case
//parsing template body
builder.getTokenType match {
case ScalaTokenTypes.kCLASS =>
caseMarker.drop()
modifierMarker.done(ScalaElementTypes.MODIFIERS)
builder.advanceLexer() //Ate class
if (classDef parse builder) {
templateMarker.done(ScalaElementTypes.CLASS_DEF)
} else {
templateMarker.drop()
}
true
case ScalaTokenTypes.kOBJECT =>
caseMarker.drop()
modifierMarker.done(ScalaElementTypes.MODIFIERS)
builder.advanceLexer() //Ate object
if (objectDef parse builder) {
templateMarker.done(ScalaElementTypes.OBJECT_DEF)
} else {
templateMarker.drop()
}
true
case ScalaTokenTypes.kTRAIT =>
caseMarker.rollbackTo()
modifierMarker.done(ScalaElementTypes.MODIFIERS)
builder.getTokenType match {
case ScalaTokenTypes.kTRAIT => {
builder.advanceLexer() //Ate trait
if (traitDef.parse(builder)) {
templateMarker.done(ScalaElementTypes.TRAIT_DEF)
} else {
templateMarker.drop()
}
true
}
// In this way wrong case modifier
case _ => {
builder error ErrMsg("wrong.case.modifier")
builder.advanceLexer() //Ate case
builder.getTokenText
builder.advanceLexer() //Ate trait
traitDef.parse(builder)
templateMarker.done(ScalaElementTypes.TRAIT_DEF)
true
}
}
//it's error
case _ =>
templateMarker.rollbackTo()
//builder.advanceLexer //Ate something
false
}
}
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/top/TmplDef.scala | Scala | apache-2.0 | 3,377 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.r
import org.apache.hadoop.fs.Path
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.ml.regression.{GBTRegressionModel, GBTRegressor}
import org.apache.spark.ml.util._
import org.apache.spark.sql.{DataFrame, Dataset}
private[r] class GBTRegressorWrapper private (
val pipeline: PipelineModel,
val formula: String,
val features: Array[String]) extends MLWritable {
private val gbtrModel: GBTRegressionModel =
pipeline.stages(1).asInstanceOf[GBTRegressionModel]
lazy val numFeatures: Int = gbtrModel.numFeatures
lazy val featureImportances: Vector = gbtrModel.featureImportances
lazy val numTrees: Int = gbtrModel.getNumTrees
lazy val treeWeights: Array[Double] = gbtrModel.treeWeights
lazy val maxDepth: Int = gbtrModel.getMaxDepth
def summary: String = gbtrModel.toDebugString
def transform(dataset: Dataset[_]): DataFrame = {
pipeline.transform(dataset).drop(gbtrModel.getFeaturesCol)
}
override def write: MLWriter = new
GBTRegressorWrapper.GBTRegressorWrapperWriter(this)
}
private[r] object GBTRegressorWrapper extends MLReadable[GBTRegressorWrapper] {
def fit( // scalastyle:ignore
data: DataFrame,
formula: String,
maxDepth: Int,
maxBins: Int,
maxIter: Int,
stepSize: Double,
minInstancesPerNode: Int,
minInfoGain: Double,
checkpointInterval: Int,
lossType: String,
seed: String,
subsamplingRate: Double,
maxMemoryInMB: Int,
cacheNodeIds: Boolean): GBTRegressorWrapper = {
val rFormula = new RFormula()
.setFormula(formula)
RWrapperUtils.checkDataColumns(rFormula, data)
val rFormulaModel = rFormula.fit(data)
// get feature names from output schema
val schema = rFormulaModel.transform(data).schema
val featureAttrs = AttributeGroup.fromStructField(schema(rFormulaModel.getFeaturesCol))
.attributes.get
val features = featureAttrs.map(_.name.get)
// assemble and fit the pipeline
val rfr = new GBTRegressor()
.setMaxDepth(maxDepth)
.setMaxBins(maxBins)
.setMaxIter(maxIter)
.setStepSize(stepSize)
.setMinInstancesPerNode(minInstancesPerNode)
.setMinInfoGain(minInfoGain)
.setCheckpointInterval(checkpointInterval)
.setLossType(lossType)
.setSubsamplingRate(subsamplingRate)
.setMaxMemoryInMB(maxMemoryInMB)
.setCacheNodeIds(cacheNodeIds)
.setFeaturesCol(rFormula.getFeaturesCol)
if (seed != null && seed.length > 0) rfr.setSeed(seed.toLong)
val pipeline = new Pipeline()
.setStages(Array(rFormulaModel, rfr))
.fit(data)
new GBTRegressorWrapper(pipeline, formula, features)
}
override def read: MLReader[GBTRegressorWrapper] = new GBTRegressorWrapperReader
override def load(path: String): GBTRegressorWrapper = super.load(path)
class GBTRegressorWrapperWriter(instance: GBTRegressorWrapper)
extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val rMetadataPath = new Path(path, "rMetadata").toString
val pipelinePath = new Path(path, "pipeline").toString
val rMetadata = ("class" -> instance.getClass.getName) ~
("formula" -> instance.formula) ~
("features" -> instance.features.toSeq)
val rMetadataJson: String = compact(render(rMetadata))
sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath)
instance.pipeline.save(pipelinePath)
}
}
class GBTRegressorWrapperReader extends MLReader[GBTRegressorWrapper] {
override def load(path: String): GBTRegressorWrapper = {
implicit val format = DefaultFormats
val rMetadataPath = new Path(path, "rMetadata").toString
val pipelinePath = new Path(path, "pipeline").toString
val pipeline = PipelineModel.load(pipelinePath)
val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
val rMetadata = parse(rMetadataStr)
val formula = (rMetadata \\ "formula").extract[String]
val features = (rMetadata \\ "features").extract[Array[String]]
new GBTRegressorWrapper(pipeline, formula, features)
}
}
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/ml/r/GBTRegressorWrapper.scala | Scala | apache-2.0 | 5,192 |
package au.gov.dva.sopapi.tests.parsertests
import au.gov.dva.sopapi.sopref.parsing.implementations.parsers.LegalReferenceSequences
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class LegalReferenceSequenceTests extends FunSuite {
test("Post Aug 2015 ordinary sequence correctly returns false")
{
val testData = List("(1)","(2)","(3)","(4)","(6)") // sec five repealed
val result = LegalReferenceSequences.isNextMainFactorLine(Some("(3)"),"(2)")
assert(result == false)
}
test("Post Aug 2015 ordinary sequence correctly returns true")
{
val testData = List("(1)","(2)","(3)","(4)","(6)") // sec five repealed
val result = LegalReferenceSequences.isNextMainFactorLine(Some("(4)"),"(6)")
assert(result == true)
}
test("Int to small romans")
{
val testData = 1 to 40
val result = testData.map(LegalReferenceSequences.intToSmallRoman)
println(result)
}
test("Get small letters")
{
val result = LegalReferenceSequences.sequenceOfSmallLetters
println(result)
}
}
| govlawtech/dva-sop-api | app/src/test/scala/au/gov/dva/sopapi/tests/parsertests/LegalReferenceSequenceTests.scala | Scala | apache-2.0 | 1,117 |
package controllers
import com.pellucid.caseconfig._
import com.typesafe.config.Config
import models.configuration.AppProxy
import play.api.mvc.{ActionBuilder, Controller, Request, Result}
import scala.concurrent.Future
trait ApiController extends Controller {
val appConfig: Config
private val appProxy = appConfig.get[AppProxy]("proxy")
object AuthenticationAction extends ActionBuilder[Request] {
def invokeBlock[A](request: Request[A], block: (Request[A]) => Future[Result]) = {
appProxy.authentication match {
case true => {
val userOpt = request.getQueryString("user")
val passwordOpt = request.getQueryString("password")
(userOpt, passwordOpt) match {
case (Some(user), Some(password))
if (user == appProxy.user && password == appProxy.password) => {
block(request)
}
case _ => Future.successful(Unauthorized)
}
}
case false => block(request)
}
}
}
}
| joakim-ribier/proxy-foscam | app/controllers/ApiController.scala | Scala | mit | 1,021 |
package s {
object Boop extends j.Bar_1 {
def foo() {}
def bar() {}
}
class Baz(x: j.Bar_1) {
x.foo
override def toString = "Baz"
}
}
object Test {
def main(args: Array[String]): Unit = {
println(new s.Baz(s.Boop))
}
}
| yusuke2255/dotty | tests/pending/run/t1430/Test_2.scala | Scala | bsd-3-clause | 252 |
package breeze.linalg
package operators
import breeze.util.ArrayUtil
import spire.syntax.cfor._
import support._
import scala.reflect.ClassTag
import java.util
import breeze.macros.expand
import breeze.math._
import breeze.generic.UFunc
import breeze.storage.Zero
import breeze.generic.UFunc.{UImpl2, UImpl}
import scala.{specialized=>spec}
import scalaxy.debug._
trait SparseVector_DenseVector_Ops { this: SparseVector.type =>
import breeze.math.PowImplicits._
@expand
@expand.valify
implicit def implOps_SVT_DVT_InPlace[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub, OpMulScalar, OpDiv, OpSet, OpMod, OpPow) Op <: OpType]
(implicit @expand.sequence[Op]({_ + _}, {_ - _}, {_ * _}, {_ / _}, {(a,b) => b}, {_ % _}, {_ pow _}) op: Op.Impl2[T, T, T]):
Op.InPlaceImpl2[SparseVector[T], DenseVector[T]] =
new Op.InPlaceImpl2[SparseVector[T], DenseVector[T]] {
def apply(a: SparseVector[T], b: DenseVector[T]): Unit = {
require(a.length == b.length, "Vectors must have the same length")
val result: VectorBuilder[T] = new VectorBuilder[T](a.length, a.length)
val bd: Array[T] = b.data
val adefault: T = a.array.default
var boff: Int = b.offset
val asize: Int = a.activeSize
val bstride: Int = b.stride
val ad: Array[T] = a.data
val ai: Array[Int] = a.index
var i = 0
var j = 0
while(i < asize) {
// do defaults until we get to the next aoffset
val nextBoff: Int = b.offset + ai(i) * bstride
while(boff < nextBoff) {
result.add(j, op(adefault, bd(boff)))
boff += bstride
j += 1
}
result.add(j, op(ad(i), bd(boff)))
boff += b.stride
i += 1
j += 1
}
while(boff < bd.length) {
result.add(j, op(adefault, bd(boff)))
boff += bstride
j += 1
}
val rs: SparseVector[T] = result.toSparseVector(true, true)
a.use(rs.index, rs.data, rs.activeSize)
}
implicitly[BinaryUpdateRegistry[Vector[T], Vector[T], Op.type]].register(this)
}
@expand
@expand.valify
implicit def implOps_SVT_DVT_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpMulScalar, OpDiv) Op <: OpType]
(implicit @expand.sequence[Op]({_ * _}, {_ / _}) op: Op.Impl2[T, T, T]):
Op.Impl2[SparseVector[T], DenseVector[T], SparseVector[T]] = {
new Op.Impl2[SparseVector[T], DenseVector[T], SparseVector[T]] {
def apply(a: SparseVector[T], b: DenseVector[T]): SparseVector[T] = {
require(a.length == b.length, "Vectors must have the same length")
val result = VectorBuilder.zeros[T](a.length)
val bd: Array[T] = b.data
val adefault: T = a.array.default
var boff: Int = b.offset
val asize: Int = a.activeSize
val bstride: Int = b.stride
val ad: Array[T] = a.data
val ai: Array[Int] = a.index
cforRange(0 until a.activeSize) { i =>
val ind = a.indexAt(i)
val res: T = op(a.valueAt(i), b(ind))
if(res != 0)
result.add(ind, res)
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(this)
}
}
@expand
@expand.valify
implicit def implOps_SVT_DVT_eq_DVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub, OpSet, OpMod, OpPow) Op <: OpType]
(implicit @expand.sequence[Op]({_ + _}, {_ - _}, {(a,b) => b}, {_ % _}, {_ pow _}) op: Op.Impl2[T, T, T]):
Op.Impl2[SparseVector[T], DenseVector[T], DenseVector[T]] =
new Op.Impl2[SparseVector[T], DenseVector[T], DenseVector[T]] {
def apply(a: SparseVector[T], b: DenseVector[T]): DenseVector[T] = {
require(a.length == b.length, "Vectors must have the same length")
val result: DenseVector[T] = DenseVector.zeros[T](a.length)
val bd: Array[T] = b.data
val adefault: T = a.array.default
var boff: Int = b.offset
val asize: Int = a.activeSize
val bstride: Int = b.stride
val ad: Array[T] = a.data
val ai: Array[Int] = a.index
var i = 0
var j = 0
while(i < asize) {
// do defaults until we get to the next aoffset
val nextBoff: Int = b.offset + ai(i) * bstride
while(boff < nextBoff) {
result(j) = op(adefault, bd(boff))
boff += bstride
j += 1
}
result(j) = op(ad(i), bd(boff))
boff += b.stride
i += 1
j += 1
}
while(boff < bd.length) {
result(j) = op(adefault, bd(boff))
boff += bstride
j += 1
}
result
}
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(this)
}
@expand
@expand.valify
implicit def implOpMulInner_SVT_DVT_eq_T[@expand.args(Int, Double, Float, Long) T]:
breeze.linalg.operators.OpMulInner.Impl2[SparseVector[T], DenseVector[T], T] =
new breeze.linalg.operators.OpMulInner.Impl2[SparseVector[T], DenseVector[T], T] {
def apply(a: SparseVector[T], b: DenseVector[T]): T = {
require(b.length == a.length, "Vectors must be the same length!")
b dot a
}
implicitly[BinaryRegistry[Vector[T], Vector[T], OpMulInner.type, T]].register(this)
}
}
trait DenseVector_SparseVector_Ops { this: SparseVector.type =>
import breeze.math.PowImplicits._
@expand
@expand.valify
implicit def implOps_DVT_SVT_InPlace[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpMulScalar, OpDiv, OpSet, OpMod, OpPow) Op <: OpType]
(implicit @expand.sequence[Op]({_ * _}, {_ / _}, {(a,b) => b}, {_ % _}, {_ pow _}) op: Op.Impl2[T, T, T]):
Op.InPlaceImpl2[DenseVector[T], SparseVector[T]] =
new Op.InPlaceImpl2[DenseVector[T], SparseVector[T]] {
def apply(a: DenseVector[T], b: SparseVector[T]): Unit = {
require(a.length == b.length, "Vectors must have the same length")
val ad: Array[T] = a.data
val bdefault: T = b.array.default
var aoff: Int = a.offset
val bsize: Int = b.activeSize
val astride: Int = a.stride
val bd: Array[T] = b.data
val bi: Array[Int] = b.index
var i: Int = 0
while(i < bsize) {
// do defaults until we get to the next aoffset
val nextAoff: Int = a.offset + bi(i) * astride
while(aoff < nextAoff) {
ad(aoff) = op(ad(aoff), bdefault)
aoff += astride
}
ad(aoff) = op(ad(aoff), bd(i))
aoff += a.stride
i += 1
}
while(aoff < ad.length) {
ad(aoff) = op(ad(aoff), bdefault)
aoff += astride
}
}
implicitly[BinaryUpdateRegistry[DenseVector[T], Vector[T], Op.type]].register(this)
implicitly[BinaryUpdateRegistry[Vector[T], Vector[T], Op.type]].register(this)
}
@expand
@expand.valify
implicit def implOps_DVT_SVT_InPlace[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub) Op <: OpType]
(implicit @expand.sequence[Op]({_ + _}, {_ - _}) op: Op.Impl2[T, T, T]):
Op.InPlaceImpl2[DenseVector[T], SparseVector[T]] =
new Op.InPlaceImpl2[DenseVector[T], SparseVector[T]] {
def apply(a: DenseVector[T], b: SparseVector[T]): Unit = {
require(a.length == b.length, "Vectors must have the same length")
val ad: Array[T] = a.data
val bd: Array[T] = b.data
val bi: Array[Int] = b.index
val bsize: Int = b.iterableSize
var i: Int = 0
while(i < bsize) {
val aoff: Int = a.offset + bi(i) * a.stride
ad(aoff) = op(ad(aoff), bd(i))
i += 1
}
}
implicitly[BinaryUpdateRegistry[DenseVector[T], Vector[T], Op.type]].register(this)
implicitly[BinaryUpdateRegistry[Vector[T], Vector[T], Op.type]].register(this)
}
@expand
@expand.valify
implicit def implOps_DVT_SVT_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpMulScalar, OpDiv) Op <: OpType]
(implicit @expand.sequence[Op]({_ * _}, {_ / _}) op: Op.Impl2[T, T, T]):
Op.Impl2[DenseVector[T], SparseVector[T], SparseVector[T]] = {
new Op.Impl2[DenseVector[T], SparseVector[T], SparseVector[T]] {
def apply(a: DenseVector[T], b: SparseVector[T]): SparseVector[T] = {
require(a.length == b.length, "Vectors must have the same length")
val result = VectorBuilder.zeros[T](a.length)
cforRange(0 until b.activeSize) { i =>
val ind = b.indexAt(i)
val res: T = op(a(ind), b.valueAt(i))
if(res != 0)
result.add(ind, res)
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(this)
}
}
@expand
@expand.valify
implicit def implOpMulInner_DVT_SVT_eq_T[@expand.args(Int, Double, Float, Long) T]
(implicit @expand.sequence[T](0, 0.0, 0.0f, 0l) zero: T):
OpMulInner.Impl2[DenseVector[T], SparseVector[T], T] =
new OpMulInner.Impl2[DenseVector[T], SparseVector[T], T] {
def apply(a: DenseVector[T], b: SparseVector[T]): T = {
var result: T = zero
val bd: Array[T] = b.data
val bi: Array[Int] = b.index
val bsize: Int = b.iterableSize
val adata: Array[T] = a.data
val aoff: Int = a.offset
val stride: Int = a.stride
var i = 0
if(stride == 1 && aoff == 0) {
while(i < bsize) {
result += adata(bi(i)) * bd(i)
i += 1
}
} else {
while(i < bsize) {
result += adata(aoff + bi(i) * stride) * bd(i)
i += 1
}
}
result
}
implicitly[BinaryRegistry[Vector[T], Vector[T], OpMulInner.type, T]].register(this)
// implicitly[BinaryRegistry[DenseVector[T], Vector[T], OpMulInner.type, T]].register(this)
}
@expand
@expand.valify
implicit def implZipValues_DVT_SVT_eq_ZVTT[@expand.args(Int, Double, Float, Long) T]
(implicit @expand.sequence[T](0, 0.0, 0.0f, 0l) zero: T):
zipValues.Impl2[DenseVector[T], SparseVector[T], ZippedValues[T, T]] =
new zipValues.Impl2[DenseVector[T], SparseVector[T], ZippedValues[T, T]] {
def apply(du: DenseVector[T], sv: SparseVector[T]): ZippedValues[T, T] = {
require(sv.length == du.length, "vector length mismatch")
new ZippedValues[T, T] {
def foreach(fn: (T, T) => Unit): Unit = {
val n: Int = du.length
val duData: Array[T] = du.data
val duStride: Int = du.stride
var duOffset: Int = du.offset
val svIndices: Array[Int] = sv.index
val svValues: Array[T] = sv.data
val svActiveSize: Int = sv.activeSize
var i: Int = 0
var j: Int = 0
while (j < svActiveSize) {
val svIndex = svIndices(j)
while (i < svIndex) {
fn(duData(duOffset), zero)
i += 1
duOffset += duStride
}
fn(duData(duOffset), svValues(j))
i += 1
duOffset += duStride
j += 1
}
while (i < n) {
fn(duData(duOffset), zero)
i += 1
duOffset += duStride
}
}
}
}
implicitly[BinaryRegistry[Vector[T], Vector[T], zipValues.type, ZippedValues[T, T]]]
}
@expand
@expand.valify
implicit def implScaleAdd_DVT_T_SVT_InPlace[@expand.args(Int, Double, Float, Long) T]
(implicit @expand.sequence[T](0, 0.0, 0f, 0l) zero: T):
scaleAdd.InPlaceImpl3[DenseVector[T], T, SparseVector[T]] =
new scaleAdd.InPlaceImpl3[DenseVector[T], T, SparseVector[T]] {
def apply(y: DenseVector[T], a: T, x: SparseVector[T]): Unit = {
require(x.length == y.length, "Vectors must be the same length!")
val xsize: Int = x.activeSize
if(a != zero) {
var xoff: Int = 0
while (xoff < xsize) {
y(x.indexAt(xoff)) += a * x.valueAt(xoff)
xoff += 1
}
}
}
implicitly[TernaryUpdateRegistry[Vector[T], T, Vector[T], scaleAdd.type]].register(this)
}
// this shouldn't be necessary but it is:
@expand
@expand.valify
implicit def dv_sv_op[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub, OpSet, OpMod, OpPow) Op <: OpType]: Op.Impl2[DenseVector[T], SparseVector[T], DenseVector[T]] = {
val op = DenseVector.pureFromUpdate(implicitly[Op.InPlaceImpl2[DenseVector[T], SparseVector[T]]])
implicitly[BinaryRegistry[DenseVector[T], Vector[T], Op.type, Vector[T]]].register(op)
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(op)
op
}
}
trait SparseVectorOps { this: SparseVector.type =>
import breeze.math.PowImplicits._
implicit def liftCSCOpToSVTransposeOp[Tag,V,LHS,R](implicit op: UFunc.UImpl2[Tag,LHS,CSCMatrix[V],R],
zero: Zero[V], ct: ClassTag[V]):
UFunc.UImpl2[Tag,LHS,Transpose[SparseVector[V]],R] =
new UFunc.UImpl2[Tag,LHS,Transpose[SparseVector[V]],R] {
def apply(v: LHS, v2: Transpose[SparseVector[V]]): R = {
op(v,v2.inner.asCscRow)
}
}
@expand
@expand.valify
implicit def implOps_SVT_SVT_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub) Op <: OpType]
(implicit @expand.sequence[Op]({_ + _}, {_ - _}) op: Op.Impl2[T, T, T],
@expand.sequence[T](0, 0.0, 0f, 0l) zero: T):
Op.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] =
new Op.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] {
def apply(a: SparseVector[T], b: SparseVector[T]): SparseVector[T] = {
require(b.length == a.length, "Vectors must be the same length!")
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
val q: T = zero
val resultI: Array[Int] = new Array[Int](asize + bsize)
val resultV: Array[T] = new Array[T](asize + bsize)
var resultOff: Int = 0
var aoff: Int = 0
var boff: Int = 0
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while(aoff < asize) {
while(boff < bsize && b.indexAt(boff) < a.indexAt(aoff)) {
resultI(resultOff) = b.indexAt(boff)
resultV(resultOff) = op(q, b.valueAt(boff))
resultOff += 1
boff += 1
}
val bvalue: T = if(boff < bsize && b.indexAt(boff) == a.indexAt(aoff)) {
val bv: T = b.valueAt(boff)
boff += 1
bv
} else {
q
}
resultI(resultOff) = a.indexAt(aoff)
resultV(resultOff) = op(a.valueAt(aoff), bvalue)
resultOff += 1
aoff += 1
}
while(boff < bsize) {
resultI(resultOff) = b.indexAt(boff)
resultV(resultOff) = op(q, b.valueAt(boff))
resultOff += 1
boff += 1
}
if(resultOff != resultI.length) {
new SparseVector[T](util.Arrays.copyOf(resultI, resultOff), util.Arrays.copyOf(resultV, resultOff), resultOff, a.length)
} else {
new SparseVector[T](resultI, resultV, resultOff, a.length)
}
}
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(this)
}
implicit def implSubOp_SVT_SVT_eq_SVT[T:Ring:ClassTag]: OpSub.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] = {
new OpSub.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] {
val r = implicitly[Ring[T]]
def apply(a: SparseVector[T], b: SparseVector[T]): SparseVector[T] = {
require(b.length == a.length, "Vectors must be the same length!")
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
val q: T = r.zero
val resultI: Array[Int] = new Array[Int](asize + bsize)
val resultV: Array[T] = new Array[T](asize + bsize)
var resultOff: Int = 0
var aoff: Int = 0
var boff: Int = 0
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
while (boff < bsize && b.indexAt(boff) < a.indexAt(aoff)) {
resultI(resultOff) = b.indexAt(boff)
resultV(resultOff) = r.-(q, b.valueAt(boff))
resultOff += 1
boff += 1
}
val bvalue: T = if (boff < bsize && b.indexAt(boff) == a.indexAt(aoff)) {
val bv: T = b.valueAt(boff)
boff += 1
bv
} else {
q
}
resultI(resultOff) = a.indexAt(aoff)
resultV(resultOff) = r.-(a.valueAt(aoff), bvalue)
resultOff += 1
aoff += 1
}
while (boff < bsize) {
resultI(resultOff) = b.indexAt(boff)
resultV(resultOff) = r.-(q, b.valueAt(boff))
resultOff += 1
boff += 1
}
if (resultOff != resultI.length) {
val dat = new Array[T](resultOff)
Array.copy(resultV, 0,dat,0,resultOff)
new SparseVector[T](util.Arrays.copyOf(resultI, resultOff), dat, resultOff, a.length)
} else {
new SparseVector[T](resultI, resultV, resultOff, a.length)
}
}
}
}
implicit def implAddOp_SVT_SVT_eq_SVT[T:Semiring:ClassTag]: OpAdd.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] = {
new OpAdd.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] {
val r = implicitly[Semiring[T]]
def apply(a: SparseVector[T], b: SparseVector[T]): SparseVector[T] = {
require(b.length == a.length, "Vectors must be the same length!")
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
val q: T = r.zero
val resultI: Array[Int] = new Array[Int](asize + bsize)
val resultV: Array[T] = new Array[T](asize + bsize)
var resultOff: Int = 0
var aoff: Int = 0
var boff: Int = 0
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
while (boff < bsize && b.indexAt(boff) < a.indexAt(aoff)) {
resultI(resultOff) = b.indexAt(boff)
resultV(resultOff) = r.+(q, b.valueAt(boff))
resultOff += 1
boff += 1
}
val bvalue: T = if (boff < bsize && b.indexAt(boff) == a.indexAt(aoff)) {
val bv: T = b.valueAt(boff)
boff += 1
bv
} else {
q
}
resultI(resultOff) = a.indexAt(aoff)
resultV(resultOff) = r.+(a.valueAt(aoff), bvalue)
resultOff += 1
aoff += 1
}
while (boff < bsize) {
resultI(resultOff) = b.indexAt(boff)
resultV(resultOff) = r.+(q, b.valueAt(boff))
resultOff += 1
boff += 1
}
if (resultOff != resultI.length) {
val dat = new Array[T](resultOff)
Array.copy(resultV, 0,dat,0,resultOff)
new SparseVector[T](util.Arrays.copyOf(resultI, resultOff), dat, resultOff, a.length)
} else {
new SparseVector[T](resultI, resultV, resultOff, a.length)
}
}
}
}
@expand
@expand.valify
implicit def implOpMulScalar_SVT_SVT_eq_SVT[@expand.args(Int, Double, Float, Long) T]
(implicit @expand.sequence[T](0, 0.0, 0f, 0l) zero: T):
OpMulScalar.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] =
new OpMulScalar.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] {
def apply(a: SparseVector[T], b: SparseVector[T]): SparseVector[T] = {
if (b.activeSize < a.activeSize) {
apply(b, a)
} else {
require(b.length == a.length, "Vectors must be the same length!")
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
val resultI: Array[Int] = new Array[Int](math.min(asize, bsize))
val resultV: Array[T] = new Array[T](math.min(asize, bsize))
var resultOff: Int = 0
var aoff: Int = 0
var boff: Int = 0
// in principle we could do divide and conquer here
// by picking the middle of a, figuring out where that is in b, and then recursing,
// using it as a bracketing.
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
val aind: Int = a.indexAt(aoff)
// the min reflects the invariant that index aind must be in the first aind active indices in b's index.
boff = util.Arrays.binarySearch(b.index, boff, math.min(bsize, aind + 1), aind)
if (boff < 0) {
boff = ~boff
if (boff == bsize) {
// we're through the b array, so we're done.
aoff = asize
} else {
// fast forward a until we get to the b we just got to
val bind = b.indexAt(boff)
var newAoff = util.Arrays.binarySearch(a.index, aoff, math.min(asize, bind + 1), bind)
if (newAoff < 0) {
newAoff = ~newAoff
boff += 1
}
assert(newAoff > aoff, bind + " " + aoff + " " + newAoff + " " + a.index(aoff) + " " + a.index(newAoff) + " " + a + " " + b)
aoff = newAoff
}
} else {
// b is there, a is there, do the multiplication!
resultI(resultOff) = aind
resultV(resultOff) = a.valueAt(aoff) * b.valueAt(boff)
aoff += 1
boff += 1
resultOff += 1
}
}
if (resultOff != resultI.length) {
new SparseVector[T](util.Arrays.copyOf(resultI, resultOff), util.Arrays.copyOf(resultV, resultOff), resultOff, a.length)
} else {
new SparseVector[T](resultI, resultV, resultOff, a.length)
}
}
}
implicitly[BinaryRegistry[Vector[T], Vector[T], OpMulScalar.type, Vector[T]]].register(this)
}
@expand
@expand.valify
implicit def implOps_SVT_SVT_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpDiv, OpSet, OpMod, OpPow) Op <: OpType]
(implicit @expand.sequence[Op]({_ / _}, {(a,b) => b}, {_ % _}, {_ pow _}) op: Op.Impl2[T, T, T]):
Op.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] =
new Op.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]] {
def apply(a: SparseVector[T], b: SparseVector[T]): SparseVector[T] = {
require(b.length == a.length, "Vectors must be the same length!")
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
var i: Int = 0
while(i < a.length) {
result.add(i, op(a(i), b(i)))
i += 1
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(this)
}
@expand
@expand.valify
implicit def implOps_SVT_VT_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpDiv, OpSet, OpMod, OpPow) Op <: OpType]
(implicit @expand.sequence[Op]({_ / _}, {(a,b) => b}, {_ % _}, {_ pow _}) op: Op.Impl2[T, T, T]):
Op.Impl2[SparseVector[T], Vector[T], SparseVector[T]] =
new Op.Impl2[SparseVector[T], Vector[T], SparseVector[T]] {
def apply(a: SparseVector[T], b: Vector[T]): SparseVector[T] = {
require(b.length == a.length, "Vectors must be the same length!")
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
var i: Int = 0
while(i < a.length) {
result.add(i, op(a(i), b(i)))
i += 1
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], Vector[T], Op.type, Vector[T]]].register(this)
}
@expand
implicit def implOpSVT_Field_SVT[@expand.args(OpAdd,OpSub,OpDiv,OpMod,OpPow) Op <: OpType, T:Field:ClassTag]
(implicit @expand.sequence[Op]({f.+(_,_)}, {f.-(_,_)}, {f./(_,_)}, {f.%(_,_)},{f.pow(_,_)}) op: Op.Impl2[T,T,T]): Op.Impl2[SparseVector[T],T,SparseVector[T]] = {
new Op.Impl2[SparseVector[T], T, SparseVector[T]] {
def apply(a: SparseVector[T], b: T): SparseVector[T] = {
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
val f = implicitly[Field[T]]
var i: Int = 0
while (i < a.length) {
val r = op(a(i), b)
if (r != f.zero)
result.add(i, r)
i += 1
}
result.toSparseVector(true, true)
}
}
}
@expand
implicit def implOps_SVT_Field_eq_SVT[@expand.args(OpMulScalar, OpMulMatrix) Op<:OpType, T:Field:ClassTag]:
Op.Impl2[SparseVector[T], T, SparseVector[T]] =
new Op.Impl2[SparseVector[T], T, SparseVector[T]] {
val f = implicitly[Field[T]]
def apply(a: SparseVector[T], b: T): SparseVector[T] = {
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
var i: Int = 0
while(i < a.activeSize) {
result.add(a.indexAt(i), f.*(a.valueAt(i), b))
i += 1
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], T, Op.type, Vector[T]]].register(this)
}
@expand
@expand.valify
implicit def implOps_SVT_T_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub, OpSet, OpPow) Op <: OpType]
(implicit @expand.sequence[Op]({_ + _}, {_ - _}, {(a,b) => b}, {_ pow _}) op: Op.Impl2[T, T, T],
@expand.sequence[T](0, 0.0, 0.0f, 0l) zero: T):
Op.Impl2[SparseVector[T], T, SparseVector[T]] =
new Op.Impl2[SparseVector[T], T, SparseVector[T]] {
def apply(a: SparseVector[T], b: T): SparseVector[T] = {
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
var i: Int = 0
while(i < a.length) {
val r = op(a(i), b)
if(r != zero)
result.add(i,r)
i += 1
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], T, Op.type, Vector[T]]].register(this)
}
@expand
@expand.valify
implicit def implOps_SVT_T_eq_SVT[@expand.args(Int, Double, Float, Long) T, @expand.args(OpDiv, OpMod) Op <: OpType]
(implicit @expand.sequence[Op]({_ / _}, {_ % _}) op: Op.Impl2[T, T, T],
@expand.sequence[T](0, 0.0, 0.0f, 0l) zero: T): Op.Impl2[SparseVector[T], T, SparseVector[T]] =
new Op.Impl2[SparseVector[T], T, SparseVector[T]] {
def apply(a: SparseVector[T], b: T): SparseVector[T] = {
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
if(b == zero) {
var i: Int = 0
while(i < a.length) {
val r = op(a(i), b)
if(r != zero)
result.add(i,r)
i += 1
}
} else {
var i: Int = 0
cforRange(0 until a.activeSize) { i =>
val r = op(a.valueAt(i),b)
if(r != zero)
result.add(a.indexAt(i),r)
}
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], T, Op.type, Vector[T]]].register(this)
}
@expand
@expand.valify
implicit def implOps_SVT_T_eq_SVT[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpMulScalar, OpMulMatrix) Op<:OpType]
(implicit @expand.sequence[T](0, 0.0, 0.0f, 0l)
zero: T):
Op.Impl2[SparseVector[T], T, SparseVector[T]] =
new Op.Impl2[SparseVector[T], T, SparseVector[T]] {
def apply(a: SparseVector[T], b: T): SparseVector[T] = {
val result: VectorBuilder[T] = new VectorBuilder[T](a.length)
var i: Int = 0
while(i < a.activeSize) {
result.add(a.indexAt(i), a.valueAt(i) * b)
i += 1
}
result.toSparseVector(true, true)
}
implicitly[BinaryRegistry[Vector[T], T, Op.type, Vector[T]]].register(this)
}
protected def updateFromPure[T, Op<:OpType, Other](implicit op: UFunc.UImpl2[Op, SparseVector[T], Other, SparseVector[T]]): UFunc.InPlaceImpl2[Op, SparseVector[T], Other] = {
new UFunc.InPlaceImpl2[Op, SparseVector[T], Other] {
def apply(a: SparseVector[T], b: Other) {
val result = op(a, b)
a.use(result.index, result.data, result.activeSize)
}
}
}
implicit def implOpSet_SVT_SVT_InPlace[T]: OpSet.InPlaceImpl2[SparseVector[T], SparseVector[T]] = {
new OpSet.InPlaceImpl2[SparseVector[T], SparseVector[T]] {
def apply(a: SparseVector[T], b: SparseVector[T]) {
val result = b.copy
a.use(result.index, result.data, result.activeSize)
}
}
}
implicit def implOpSet_SVT_T_InPlace[T:Semiring:ClassTag]: OpSet.InPlaceImpl2[SparseVector[T], T] = {
val zero = implicitly[Semiring[T]].zero
new OpSet.InPlaceImpl2[SparseVector[T], T] {
def apply(a: SparseVector[T], b: T) {
if(b == zero) {
a.use(new Array[Int](2), new Array[T](2), 0)
return
}
val data = Array.fill(a.length)(b)
val index = Array.range(0, a.length)
a.use(index, data, a.length)
}
}
}
// this shouldn't be necessary but it is:
@expand
@expand.valify
implicit def implOps_SVT_SVT_InPlace[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub, OpDiv, OpPow, OpMod, OpMulScalar) Op <: OpType]:
Op.InPlaceImpl2[SparseVector[T], SparseVector[T]] = {
val uop: Op.InPlaceImpl2[SparseVector[T], SparseVector[T]] = updateFromPure(implicitly[Op.Impl2[SparseVector[T], SparseVector[T], SparseVector[T]]])
implicitly[BinaryUpdateRegistry[Vector[T], Vector[T], Op.type]].register(uop)
uop
}
@expand
implicit def implOps_SVT_Field_InPlace[@expand.args(OpAdd, OpSub, OpDiv, OpPow, OpMod, OpMulScalar, OpMulMatrix) Op <: OpType, T:Field:ClassTag]:
Op.InPlaceImpl2[SparseVector[T], T] = {
val uop: Op.InPlaceImpl2[SparseVector[T], T] = updateFromPure(implicitly[Op.Impl2[SparseVector[T], T, SparseVector[T]]])
uop
}
@expand
@expand.valify
implicit def implOps_SVT_T_InPlace[@expand.args(Int, Double, Float, Long) T,
@expand.args(OpAdd, OpSub, OpDiv, OpPow, OpMod, OpMulScalar, OpMulMatrix) Op <: OpType]:
Op.InPlaceImpl2[SparseVector[T], T] = {
val uop: Op.InPlaceImpl2[SparseVector[T], T] = updateFromPure(implicitly[Op.Impl2[SparseVector[T], T, SparseVector[T]]])
uop
}
@expand
@expand.valify
implicit def implOpMulInner_SVT_SVT_eq_T [@expand.args(Int, Double, Float, Long) T]
(implicit @expand.sequence[T](0, 0.0, 0f, 0l) zero: T):
OpMulInner.Impl2[SparseVector[T], SparseVector[T], T] =
new OpMulInner.Impl2[SparseVector[T], SparseVector[T], T] {
def apply(a: SparseVector[T], b: SparseVector[T]): T = {
require(b.length == a.length, "Vectors must be the same length!")
if (b.activeSize < a.activeSize) {
apply(b, a)
} else if (a.activeSize == 0) {
zero
} else if (b.activeSize <= 32) { // b is bigger than a
smallVectors(a, b)
} else {
bigVectors(a, b)
}
}
def smallVectors(a: SparseVector[T], b: SparseVector[T]): T = {
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
var result: T = zero
var aoff: Int = 0
var boff: Int = 0
while (aoff < asize && boff < bsize) {
if (a.indexAt(aoff) < b.indexAt(boff))
aoff += 1
else if (b.indexAt(boff) < a.indexAt(aoff))
boff += 1
else {
result += a.valueAt(aoff) * b.valueAt(boff)
aoff += 1
boff += 1
}
}
result
}
def bigVectors(a: SparseVector[T], b: SparseVector[T]): T = {
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
var result: T = zero
var aoff: Int = 0
var boff: Int = 0
// used for finding upper bounds for location of b
// var bLastOff = 0
// var bLastInd = 0
// in principle we could do divide and conquer here
// by picking the middle of a, figuring out where that is in b, and then recursing,
// using it as a bracketing.
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
val aind: Int = a.indexAt(aoff)
val bMax = math.min(bsize, aind + 1)
//math.min(bsize, bLastOff + aind - bLastInd + 1)
// we use gallop search because we expect aind to be closish to b.index(boff)
boff = ArrayUtil.gallopSearch(b.index, boff, bMax, aind)
if (boff < 0) {
boff = ~boff
if (boff == bsize) {
// we're through the b array, so we're done.
aoff = asize
} else {
// fast forward a until we get to the b we just got to
val bind: Int = b.indexAt(boff)
// bLastOff = boff
// bLastInd = bind
// val aMax = math.min(asize, aoff + bind - aind + 1)
val aMax = math.min(asize, bind + 1)
var newAoff: Int = ArrayUtil.gallopSearch(a.index, aoff, aMax, bind)
if (newAoff < 0) {
newAoff = ~newAoff
boff += 1
}
aoff = newAoff
}
} else {
// bLastOff = boff
// bLastInd = aind
// b is there, a is there, do the multiplication!
result += a.valueAt(aoff) * b.valueAt(boff)
aoff += 1
boff += 1
}
}
result
}
implicitly[BinaryRegistry[Vector[T], Vector[T], OpMulInner.type, T]].register(this)
}
implicit def implOpMulInner_SVT_SVT_eq_T [T:ClassTag:Zero:Semiring]:
OpMulInner.Impl2[SparseVector[T], SparseVector[T], T] =
new OpMulInner.Impl2[SparseVector[T], SparseVector[T], T] {
val s = implicitly[Semiring[T]]
def apply(a: SparseVector[T], b: SparseVector[T]): T = {
if(b.activeSize < a.activeSize) {
apply(b, a)
} else {
require(b.length == a.length, "Vectors must be the same length!")
val asize: Int = a.activeSize
val bsize: Int = b.activeSize
var result: T = s.zero
var aoff: Int = 0
var boff: Int = 0
// in principle we could do divide and conquer here
// by picking the middle of a, figuring out where that is in b, and then recursing,
// using it as a bracketing.
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
val aind: Int = a.indexAt(aoff)
boff = util.Arrays.binarySearch(b.index, boff, math.min(bsize, aind + 1), aind)
if (boff < 0) {
boff = ~boff
if (boff == bsize) {
// we're through the b array, so we're done.
aoff = asize
} else {
// fast forward a until we get to the b we just got to
val bind: Int = b.indexAt(boff)
var newAoff: Int = util.Arrays.binarySearch(a.index, aoff, math.min(asize, bind + 1), bind)
if (newAoff < 0) {
newAoff = ~newAoff
boff += 1
}
assert(newAoff > aoff, aoff + " " + newAoff)
aoff = newAoff
}
} else {
// b is there, a is there, do the multiplication!
result = s.+(result,s.*(a.valueAt(aoff), b.valueAt(boff)))
aoff += 1
boff += 1
}
}
result
}
}
}
@expand
@expand.valify
implicit def implScaleAdd_SVT_T_SVT_InPlace[@expand.args(Int, Double, Float, Long) T]
(implicit @expand.sequence[T](0, 0.0, 0f, 0l) zero: T):
scaleAdd.InPlaceImpl3[SparseVector[T], T, SparseVector[T]] =
new scaleAdd.InPlaceImpl3[SparseVector[T], T, SparseVector[T]] {
def apply(y: SparseVector[T], a: T, x: SparseVector[T]): Unit = {
require(x.length == y.length, "Vectors must be the same length!")
val asize: Int = y.activeSize
val bsize: Int = x.activeSize
if(a != zero) {
val resultI: Array[Int] = new Array[Int](asize + bsize)
val resultV: Array[T] = new Array[T](asize + bsize)
var resultOff: Int = 0
var aoff: Int = 0
var boff: Int = 0
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
while (boff < bsize && x.indexAt(boff) < y.indexAt(aoff)) {
resultI(resultOff) = x.indexAt(boff)
resultV(resultOff) = a * x.valueAt(boff)
resultOff += 1
boff += 1
}
val bvalue: T = if (boff < bsize && x.indexAt(boff) == y.indexAt(aoff)) {
val bv: T = a * x.valueAt(boff)
boff += 1
bv
} else {
zero
}
resultI(resultOff) = y.indexAt(aoff)
resultV(resultOff) = y.valueAt(aoff) + bvalue
resultOff += 1
aoff += 1
}
while (boff < bsize) {
resultI(resultOff) = x.indexAt(boff)
resultV(resultOff) = a * x.valueAt(boff)
resultOff += 1
boff += 1
}
if (resultOff != resultI.length) {
y.use(util.Arrays.copyOf(resultI, resultOff), util.Arrays.copyOf(resultV, resultOff), resultOff)
} else {
y.use(resultI, resultV, resultOff)
}
}
}
implicitly[TernaryUpdateRegistry[Vector[T], T, Vector[T], scaleAdd.type]].register(this)
}
@expand
implicit def implScaleAdd_SVT_Field_SVT_InPlace[T:Field:ClassTag]:
scaleAdd.InPlaceImpl3[SparseVector[T], T, SparseVector[T]] =
new scaleAdd.InPlaceImpl3[SparseVector[T], T, SparseVector[T]] {
val f = implicitly[Field[T]]
def apply(y: SparseVector[T], a: T, x: SparseVector[T]): Unit = {
require(x.length == y.length, "Vectors must be the same length!")
val asize: Int = y.activeSize
val bsize: Int = x.activeSize
if(a != f.zero) {
val resultI: Array[Int] = new Array[Int](asize + bsize)
val resultV: Array[T] = new Array[T](asize + bsize)
var resultOff: Int = 0
var aoff: Int = 0
var boff: Int = 0
// double loop:
// b moves to catch up with a, then a takes a step (possibly bringing b along)
while (aoff < asize) {
while (boff < bsize && x.indexAt(boff) < y.indexAt(aoff)) {
resultI(resultOff) = x.indexAt(boff)
resultV(resultOff) = f.*(a, x.valueAt(boff))
resultOff += 1
boff += 1
}
val bvalue: T = if (boff < bsize && x.indexAt(boff) == y.indexAt(aoff)) {
val bv: T = f.*(a, x.valueAt(boff))
boff += 1
bv
} else {
f.zero
}
resultI(resultOff) = y.indexAt(aoff)
resultV(resultOff) = f.+(y.valueAt(aoff), bvalue)
resultOff += 1
aoff += 1
}
while (boff < bsize) {
resultI(resultOff) = x.indexAt(boff)
resultV(resultOff) = f.*(a,x.valueAt(boff))
resultOff += 1
boff += 1
}
if (resultOff != resultI.length) {
y.use(util.Arrays.copyOf(resultI, resultOff), ArrayUtil.copyOf[T](resultV, resultOff), resultOff)
} else {
y.use(resultI, resultV, resultOff)
}
}
}
}
implicit def implNorm_SVT_Field_eq_D[T](implicit f: Field[T]):
norm.Impl2[SparseVector[T], Double, Double] =
new norm.Impl2[SparseVector[T], Double, Double] {
def apply(v: SparseVector[T], n: Double): Double = {
import v._
if (n == 1) {
var sum: Double = 0.0
activeValuesIterator foreach (v => sum += f.sNorm(v) )
sum
} else if (n == 2) {
var sum: Double = 0.0
activeValuesIterator foreach (v => { val nn = f.sNorm(v); sum += nn * nn })
math.sqrt(sum)
} else if (n == Double.PositiveInfinity) {
var max: Double = 0.0
activeValuesIterator foreach (v => { val nn = f.sNorm(v); if (nn > max) max = nn })
max
} else {
var sum: Double = 0.0
activeValuesIterator foreach (v => { val nn = f.sNorm(v); sum += math.pow(nn,n) })
math.pow(sum, 1.0 / n)
}
}
}
@expand
@expand.valify
implicit def implNorm_SVT_D_eq_D[@expand.args(Int, Double, Float, Long) T]:
norm.Impl2[SparseVector[T], Double, Double] =
new norm.Impl2[SparseVector[T], Double, Double] {
def apply(v: SparseVector[T], n: Double): Double = {
import v._
if (n == 1) {
var sum: Double = 0.0
activeValuesIterator foreach (v => sum += v.abs.toDouble )
sum
} else if (n == 2) {
var sum: Double = 0.0
activeValuesIterator foreach (v => { val nn = v.abs.toDouble; sum += nn * nn })
math.sqrt(sum)
} else if (n == Double.PositiveInfinity) {
var max: Double = 0.0
activeValuesIterator foreach (v => { val nn = v.abs.toDouble; if (nn > max) max = nn })
max
} else {
var sum: Double = 0.0
activeValuesIterator foreach (v => { val nn = v.abs.toDouble; sum += math.pow(nn,n) })
math.pow(sum, 1.0 / n)
}
}
}
class CanZipMapValuesSparseVector[@spec(Double, Int, Float, Long) V, @spec(Int, Double) RV:ClassTag:Zero:Semiring]
extends CanZipMapValues[SparseVector[V],V,RV,SparseVector[RV]] {
def create(length : Int): SparseVector[RV] = zeros(length)
/**Maps all corresponding values from the two collection. */
def map(from: SparseVector[V], from2: SparseVector[V], fn: (V, V) => RV): SparseVector[RV] = {
require(from.length == from2.length, "Vector lengths must match!")
val zz = fn(from.default, from2.default)
if(zz != implicitly[Zero[RV]].zero) {
val result: SparseVector[RV] = create(from.length)
var i = 0
while (i < from.length) {
result(i) = fn(from(i), from2(i))
i += 1
}
result
} else {
val vb = new VectorBuilder[RV](from.length)
var off1, off2 = 0
while(off1 < from.activeSize) {
while(off2 < from2.activeSize && from2.indexAt(off2) < from.indexAt(off1)) {
val index = from2.indexAt(off2)
vb.add(index, fn(from.default, from2.valueAt(off2)))
off2 += 1
}
if (off2 < from2.activeSize && from.indexAt(off1) == from2.indexAt(off2)) {
val index = from2.indexAt(off2)
vb.add(index, fn(from.valueAt(off1), from2.valueAt(off2)))
off2 += 1
} else {
val index = from.indexAt(off1)
vb.add(index, fn(from.valueAt(off1), from2.default))
}
off1 += 1
}
while(off2 < from2.activeSize) {
val index = from2.indexAt(off2)
vb.add(index, fn(from.default, from2.valueAt(off2)))
off2 += 1
}
vb.toSparseVector(true, true)
}
}
}
implicit def zipMap[V, R:ClassTag:Zero:Semiring]: CanZipMapValuesSparseVector[V, R] = new CanZipMapValuesSparseVector[V, R]
implicit val zipMap_d: CanZipMapValuesSparseVector[Double, Double] = new CanZipMapValuesSparseVector[Double, Double]
implicit val zipMap_f: CanZipMapValuesSparseVector[Float, Float] = new CanZipMapValuesSparseVector[Float, Float]
implicit val zipMap_i: CanZipMapValuesSparseVector[Int, Int] = new CanZipMapValuesSparseVector[Int, Int]
class CanZipMapKeyValuesSparseVector[@spec(Double, Int, Float, Long) V, @spec(Int, Double) RV:ClassTag:Zero:Semiring]
extends CanZipMapKeyValues[SparseVector[V],Int, V,RV,SparseVector[RV]] {
def create(length : Int): SparseVector[RV] = zeros(length)
/**Maps all corresponding values from the two collection. */
def map(from: SparseVector[V], from2: SparseVector[V], fn: (Int, V, V) => RV): SparseVector[RV] = {
require(from.length == from2.length, "Vector lengths must match!")
val result: SparseVector[RV] = create(from.length)
var i = 0
while (i < from.length) {
result(i) = fn(i, from(i), from2(i))
i += 1
}
result
}
override def mapActive(from: SparseVector[V], from2: SparseVector[V], fn: (Int, V, V) => RV): SparseVector[RV] = {
require(from.length == from2.length, "Vector lengths must match!")
val vb = new VectorBuilder[RV](from.length)
var off1, off2 = 0
while(off1 < from.activeSize) {
while(off2 < from2.activeSize && from2.indexAt(off2) < from.indexAt(off1)) {
val index = from2.indexAt(off2)
vb.add(index, fn(index, from.default, from2.valueAt(off2)))
off2 += 1
}
if(off2 < from2.activeSize && from.indexAt(off1) == from2.indexAt(off2)) {
val index = from2.indexAt(off2)
vb.add(index, fn(index, from.valueAt(off1), from2.valueAt(off2)))
off2 += 1
} else {
val index = from.indexAt(off1)
vb.add(index, fn(index, from.valueAt(off1), from2.default))
}
off1 += 1
}
while(off2 < from2.activeSize) {
val index = from2.indexAt(off2)
vb.add(index, fn(index, from.default, from2.valueAt(off2)))
off2 += 1
}
vb.toSparseVector(true, true)
}
}
implicit def zipMapKV[V, R:ClassTag:Zero:Semiring]: CanZipMapKeyValuesSparseVector[V, R] = new CanZipMapKeyValuesSparseVector[V, R]
implicit def implOpNeg_SVT_eq_SVT[@spec(Double, Int, Float, Long) V]
(implicit scale: OpMulScalar.Impl2[SparseVector[V], V, SparseVector[V]], field: Ring[V]):
OpNeg.Impl[SparseVector[V], SparseVector[V]] = {
new OpNeg.Impl[SparseVector[V], SparseVector[V]] {
override def apply(a : SparseVector[V]): SparseVector[V] = {
scale(a, field.negate(field.one))
}
}
}
}
trait SparseVector_DenseMatrixOps { this: SparseVector.type =>
@expand
@expand.valify
implicit def implOpMulMatrix_DM_SV_eq_DV[@expand.args(Int, Float, Long, Double) T]:OpMulMatrix.Impl2[DenseMatrix[T], SparseVector[T], DenseVector[T]] = {
new OpMulMatrix.Impl2[DenseMatrix[T], SparseVector[T], DenseVector[T]] {
override def apply(v: DenseMatrix[T], v2: SparseVector[T]): DenseVector[T] = {
require(v.cols == v2.length)
val result = DenseVector.zeros[T](v.rows)
cforRange(0 until v2.activeSize) { i =>
axpy(v2.valueAt(i), v(::, v2.indexAt(i)), result)
}
result
}
implicitly[BinaryRegistry[DenseMatrix[T], Vector[T], OpMulMatrix.type, DenseVector[T]]].register(this)
}
}
}
| claydonkey/breeze | math/src/main/scala/breeze/linalg/operators/SparseVectorOps.scala | Scala | apache-2.0 | 47,719 |
// Copyright (C) 2011 Dmitri Nikulin
//
// This file is part of Vijil.
//
// Vijil is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Vijil is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Vijil. If not, see <http://www.gnu.org/licenses/>.
//
// Repository: https://github.com/dnikulin/vijil
// Email: [email protected]
package com.dnikulin.vijil.render
import scala.xml._
import com.dnikulin.vijil.text._
import com.dnikulin.vijil.tools.ArrSeq
object RenderNotes {
def apply(root: TextSpan, notes: Seq[TextNote]): IndexedSeq[NodeSpan] = {
for (note <- ArrSeq.convert(notes); if root.includes(note.at)) yield {
// Empty spans are not rendered, so try to make a 1-letter span.
val cmin = note.at
val cmax = (cmin + 1) min (root.max - 1)
val span = root.cut(cmin, cmax)
def wrap(nodes: NodeSeq): NodeSeq = {
// Insert note before the 1-letter span.
<a class="vijil-note" href="#" title={note.body}> ({note.label}) </a> ++ nodes
}
// Give a very high depth so that this is not repeated.
new NodeSpan(span, wrap, 1000)
}
}
}
| dnikulin/vijil | src/main/scala/com/dnikulin/vijil/render/RenderNotes.scala | Scala | agpl-3.0 | 1,604 |
package com.twitter.finagle.stats
import com.twitter.common.metrics._
import com.twitter.common.stats.{Statistics, ApproximateHistogram}
object ImmediateMetricsStatsReceiver {
def newHistogramInterface(name: String): HistogramInterface = {
new HistogramInterface {
private[this] val hist = new ApproximateHistogram()
private[this] val stats = new Statistics()
override def snapshot(): Snapshot = synchronized {
new Snapshot {
override def avg(): Double = stats.mean()
override def count(): Long = stats.populationSize()
override def min(): Long = stats.min()
override def max(): Long = stats.max()
override def stddev(): Double = stats.standardDeviation()
override def sum(): Long = stats.sum()
override def percentiles(): Array[Percentile] = {
val quantiles = Histogram.DEFAULT_QUANTILES
(hist.getQuantiles(quantiles) zip quantiles.toSeq) map {
case (q, p) => new Percentile(p, q)
}
}
}
}
override def getName: String = name
override def clear(): Unit = synchronized {
stats.clear()
hist.clear()
}
override def add(n: Long): Unit = synchronized {
stats.accumulate(n)
hist.add(n)
}
}
}
}
/**
* This implementation of MetricsStatsReceiver that doesn't use WindowedApproximateHistogram
* but ApproximateHistogram.
* Any value added is immediately aggregated in the result.
*/
class ImmediateMetricsStatsReceiver(registry: Metrics)
extends MetricsStatsReceiver(registry, ImmediateMetricsStatsReceiver.newHistogramInterface) {
def this() = this(MetricsStatsReceiver.defaultRegistry)
}
| koshelev/finagle | finagle-stats/src/main/scala/com/twitter/finagle/stats/ImmediateMetricsStatsReceiver.scala | Scala | apache-2.0 | 1,743 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.internal
import cats.effect.Sync
import cats.syntax.all._
import monix.execution.internal.collection.ChunkedArrayStack
import monix.tail.Iterant
import monix.tail.Iterant.{Halt, Last, Next, NextBatch, NextCursor, Scope, Suspend}
private[tail] object IterantFoldRightL {
/** Implementation for `Iterant.foldRightL`. */
def apply[F[_], A, B](self: Iterant[F, A], b: F[B], f: (A, F[B]) => F[B])(implicit F: Sync[F]): F[B] =
F.suspend(new Loop(b, f).apply(self))
private final class Loop[F[_], A, B](b: F[B], f: (A, F[B]) => F[B])(implicit F: Sync[F])
extends Iterant.Visitor[F, A, F[B]] { self =>
private[this] var remainder: Iterant[F, A] = _
private[this] var suspendRef: F[B] = _
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// Used in visit(Concat)
private[this] var stackRef: ChunkedArrayStack[F[Iterant[F, A]]] = _
private def stackPush(item: F[Iterant[F, A]]): Unit = {
if (stackRef == null) stackRef = ChunkedArrayStack()
stackRef.push(item)
}
private def finish(): F[B] = {
val rest =
if (stackRef != null) stackRef.pop()
else null.asInstanceOf[F[Iterant[F, A]]]
rest match {
case null => b
case xs => xs.flatMap(this)
}
}
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def visit(ref: Next[F, A]): F[B] =
f(ref.item, ref.rest.flatMap(this))
def visit(ref: NextBatch[F, A]): F[B] =
visit(ref.toNextCursor())
def visit(ref: NextCursor[F, A]): F[B] = {
val cursor = ref.cursor
if (!cursor.hasNext())
ref.rest.flatMap(this)
else
f(cursor.next(), suspend(ref))
}
def visit(ref: Suspend[F, A]): F[B] =
ref.rest.flatMap(this)
def visit(ref: Iterant.Concat[F, A]): F[B] = {
stackPush(ref.rh)
ref.lh.flatMap(this)
}
def visit[S](ref: Scope[F, S, A]): F[B] =
ref.runFold(this)
def visit(ref: Last[F, A]): F[B] =
f(ref.item, finish())
def visit(ref: Halt[F, A]): F[B] =
ref.e match {
case None => finish()
case Some(e) =>
F.raiseError(e)
}
def fail(e: Throwable): F[B] =
F.raiseError(e)
private def suspend(node: Iterant[F, A]): F[B] = {
if (suspendRef == null) suspendRef = F.suspend {
self.remainder match {
case null => fail(new NullPointerException("foldRight/remainder"))
case rest => this.apply(rest)
}
}
remainder = node
suspendRef
}
}
}
| monifu/monix | monix-tail/shared/src/main/scala/monix/tail/internal/IterantFoldRightL.scala | Scala | apache-2.0 | 3,244 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.releaser.domain
import java.nio.file.Paths
import org.scalatest.{Matchers, TryValues, WordSpec}
import uk.gov.hmrc.releaser.GithubApi
import scala.util.Failure
class ArtefactMetaDataSpecs extends WordSpec with Matchers with TryValues{
"ArtefactMetaData" should {
"build instance from file" in {
val md = ArtefactMetaData.fromFile(Paths.get(this.getClass.getResource("/sbt-bobby/sbt-bobby.jar").toURI)) match {
case Failure(e) => fail(e)
case s => s
}
md.success.value.commitAuthor shouldBe "Charles Kubicek"
md.success.value.sha shouldBe "e733d26fa504c040f2c95ecd25a3a55399a00883"
md.success.value.commitDate shouldBe GithubApi.githubDateTimeFormatter.parseDateTime("2015-04-09T10:18:12.000Z")
}
}
}
| xnejp03/releaser | src/test/scala/uk/gov/hmrc/releaser/domain/ArtefactMetaDataSpecs.scala | Scala | apache-2.0 | 1,391 |
package com.github.diegopacheco.sandbox.ml.scala.smile.naivebayes
import smile.classification.NaiveBayes
import smile.feature.Bag
import java.io.File
object NaiveBayesExample extends App {
val basePath = new File(".").getCanonicalPath + "/src/main/resources"
val spamPath = basePath + "/spam"
val spam2Path = basePath + "/spam_2"
val easyHamPath = basePath + "/easy_ham"
val easyHam2Path = basePath + "/easy_ham_2"
val hardHamPath = basePath + "/hard_ham"
val hardHam2Path = basePath + "/hard_ham_2"
val amountOfSamplesPerSet = 500
val amountOfFeaturesToTake = 400
//First get a subset of the file names for the spam sample set (500 is the complete set in this case)
val listOfSpamFiles = getFilesFromDir(spamPath).take(amountOfSamplesPerSet)
//Then get the messages that are contained in these files
val spamMails = listOfSpamFiles.map(x => (x, getMessage(x)))
val stopWords = getStopWords
val spamTDM = spamMails
.flatMap(email => email
._2.split(" ")
.filter(word => word.nonEmpty && !stopWords.contains(word))
.map(word => (email._1.getName,word)))
.groupBy(x => x._2)
.map(x => (x._1, x._2.groupBy(x => x._1)))
.map(x => (x._1, x._2.map( y => (y._1, y._2.length)))).toList
//Sort the words by occurrence rate descending (amount of times the word occurs among all documents)
val sortedSpamTDM = spamTDM.sortBy(x => - (x._2.size.toDouble / spamMails.length))
val spamFeatures = sortedSpamTDM.take(amountOfFeaturesToTake).map(x => x._1)
//Get a subset of the file names from the ham sample set (note that in this case it is not necessary to randomly sample as the emails are already randomly ordered)
val listOfHamFiles = getFilesFromDir(easyHamPath).take(amountOfSamplesPerSet)
//Get the messages that are contained in the ham files
val hamMails = listOfHamFiles.map(x => (x, getMessage(x)))
//Then its time for feature selection specifically for the Ham messages
val hamTDM = hamMails
.flatMap(email => email
._2.split(" ")
.filter(word => word.nonEmpty && !stopWords.contains(word))
.map(word => (email._1.getName,word)))
.groupBy(x => x._2)
.map(x => (x._1, x._2.groupBy(x => x._1)))
.map(x => (x._1, x._2.map( y => (y._1, y._2.length)))).toList
//Sort the words by occurrence rate descending (amount of times the word occurs among all documents)
val sortedHamTDM = hamTDM.sortBy(x => - (x._2.size.toDouble / spamMails.length))
val hamFeatures = sortedHamTDM.take(amountOfFeaturesToTake).map(x => x._1)
//Now we have a set of ham and spam features, we group them and then remove the intersecting features, as these are noise.
var data = (hamFeatures ++ spamFeatures).toSet
hamFeatures.intersect(spamFeatures).foreach(x => data = data - x)
//Initialize a bag of words that takes the top x features from both spam and ham and combines them
val bag = new Bag[String](data.toArray)
//Initialize the classifier array with first a set of 0(spam) and then a set of 1(ham) values that represent the emails
val classifiers = Array.fill[Int](amountOfSamplesPerSet)(0) ++ Array.fill[Int](amountOfSamplesPerSet)(1)
//Get the trainingData in the right format for the spam mails
val spamData = spamMails.map(x => bag.feature(x._2.split(" "))).toArray
//Get the trainingData in the right format for the ham mails
val hamData = hamMails.map(x => bag.feature(x._2.split(" "))).toArray
//Combine the training data from both categories
val trainingData = spamData ++ hamData
//Create the bayes model as a multinomial with 2 classification groups and the amount of features passed in the constructor.
val bayes = new NaiveBayes(NaiveBayes.Model.MULTINOMIAL, 2, data.size)
//Now train the bayes instance with the training data, which is represented in a specific format due to the bag.feature method, and the known classifiers.
bayes.learn(trainingData, classifiers)
//Now we are ready for evaluation, for this we will use the testing sets:
val listOfSpam2Files = getFilesFromDir(easyHam2Path)
//Then get the messages that are contained in these files
val spam2Mails = listOfSpam2Files.map { x => (x, getMessage(x))}
val spam2FeatureVectors = spam2Mails.map(x => bag.feature(x._2.split(" ")))
val spam2ClassificationResults = spam2FeatureVectors.map(x => bayes.predict(x))
val spamClassifications = spam2ClassificationResults.count(x => x == 0)
println(spamClassifications + " of " + listOfSpam2Files.length + " were classified as spam")
println(((spamClassifications.toDouble / listOfSpam2Files.length) * 100) + "% was classified as spam")
val hamClassifications = spam2ClassificationResults.count(x => x == 1)
println(hamClassifications + " of " + listOfSpam2Files.length + " were classified as ham")
println(((hamClassifications.toDouble / listOfSpam2Files.length) * 100) + "% was classified as ham")
val unknownClassifications = spam2ClassificationResults.count(x => x == -1)
println(unknownClassifications + " of " + listOfSpam2Files.length + " were unknowingly classified")
println(((unknownClassifications.toDouble / listOfSpam2Files.length) * 100) + "% was unknowingly classified")
def getFilesFromDir(path: String): List[File] = {
val d = new File(path)
if (d.exists && d.isDirectory) {
//Remove the mac os basic storage file, and alternatively for unix systems "cmds"
d.listFiles.filter(x => x.isFile && !x.toString.contains(".DS_Store") && !x.toString.contains("cmds")).toList
} else {
List[File]()
}
}
def getStopWords: List[String] = {
val source = scala.io.Source.fromFile(new File( basePath + "/stopwords.txt"))("latin1")
val lines = source.mkString.split("\\n")
source.close()
lines.toList
}
def getMessage(file: File): String = {
//Note that the encoding of the example files is latin1, thus this should be passed to the from file method.
val source = scala.io.Source.fromFile(file)("latin1")
val lines = source.getLines mkString "\\n"
source.close()
//Find the first line break in the email, as this indicates the message body
val firstLineBreak = lines.indexOf("\\n\\n")
//Return the message body filtered by only text from a-z and to lower case
lines.substring(firstLineBreak).replace("\\n", " ").replaceAll("[^a-zA-Z ]", "").toLowerCase
}
} | diegopacheco/ML_sandbox | Scala/scala_smile_ML_playground/src/main/scala/com/github/diegopacheco/sandbox/ml/scala/smile/naivebayes/NaiveBayesExample.scala | Scala | unlicense | 6,597 |
/*
* Copyright 2013-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the License for the specific language governing permissions
* and limitations under the License.
*/
import org.scalatest._
class ASuite extends FunSuite {
test("A should have ASCII value 41 hex") {
assert('A' === 0x41)
}
test("a should have ASCII value 61 hex") {
assert('a' === 0x61)
}
}
class BSuite extends FunSuite {
test("B should have ASCII value 42 hex") {
assert('B' === 0x42)
}
test("b should have ASCII value 62 hex") {
assert('b' === 0x62)
}
}
class CSuite extends FunSuite {
test("C should have ASCII value 43 hex") {
assert('C' === 0x43)
}
test("c should have ASCII value 63 hex") {
assert('c' === 0x63)
}
}
class ASCIISuite extends Suites(
new ASuite,
new BSuite,
new CSuite
) | JetBrains/sbt-tc-logger | test/testdata/testsupport/nested/src/test/scala/MainSuite.scala | Scala | apache-2.0 | 1,253 |
package org.talkingpuffin.ui
import java.awt.Rectangle
import javax.swing.ImageIcon
import java.text.NumberFormat
import scala.swing.event.WindowClosing
import org.talkingpuffin.{Main, Globals, Session, Constants}
import org.talkingpuffin.filter.TagUsers
import org.talkingpuffin.util.{FetchRequest, Loggable}
import org.talkingpuffin.state.{GlobalPrefs, StateSaver}
import util.{AppEvent, eventDistributor}
import twitter4j.{RateLimitStatusListener, Twitter, User, RateLimitStatusEvent}
import swing._
import swing.TabbedPane.Page
/**
* The top-level application Swing frame window. There is one per user session.
*/
class TopFrame(tw: Twitter) extends Frame with Loggable
with PeoplePaneCreator with Reactor {
val service = org.talkingpuffin.apix.Constants.ServiceName
val prefs = GlobalPrefs.prefsForUser(service, tw.getScreenName)
val tagUsers = new TagUsers(service, tw.getScreenName)
TopFrames.addFrame(this)
val session = new Session(service, tw)
Globals.sessions ::= session
iconImage = new ImageIcon(getClass.getResource("/TalkingPuffin.png")).getImage
session.peoplePaneCreator = this
private var peoplePane: PeoplePane = _
val mainToolBar = new MainToolBar
session.progress = mainToolBar
setUpUserStatusReactor()
val rels = new Relationships()
val providers = new DataProviders(session, prefs, session.progress)
session.dataProviders = providers
val streams = new Streams(prefs, session, tagUsers, rels)
session.streams = streams
menuBar = new MainMenuBar(session, tagUsers)
mainToolBar.init(streams)
title = Main.title + " - " + service + " " + tw.getScreenName
reactions += {
case e: AppEvent if e.session != session => // Ignore all from other sessions
case e: NewFollowingViewEvent => createView(providers.following, e.include, None)
case e: NewViewEvent => createView(e.provider, e.include, None)
case e: NewPeoplePaneEvent => createPeoplePane()
case e: SendStatusEvent => (new SendMsgDialog(session, null, None, None, None, false)).visible = true
case e: SendDirectMessageEvent => (new SendMsgDialog(session, null, None, None, None, true)).visible = true
}
listenTo(eventDistributor)
contents = new GridBagPanel {
val userPic = new Label
val picFetcher = new PictureFetcher("Frame picture " + hashCode, None, 1, None)
picFetcher.requestItem(FetchRequest(tw.showUser(tw.getScreenName).getProfileImageURL.toString, null,
(imageReady: PictureFetcher.ImageReady) => SwingInvoke.later {
val icon = imageReady.resource.image
if (icon.getIconHeight <= Thumbnail.THUMBNAIL_SIZE) {
userPic.icon = icon
}
}))
add(userPic, new Constraints { grid = (0,0); gridheight=2})
add(session.statusMsgLabel, new Constraints {
grid = (1,0); anchor=GridBagPanel.Anchor.West; fill = GridBagPanel.Fill.Horizontal; weightx = 1;
})
peer.add(mainToolBar, new Constraints {grid = (1,1); anchor=GridBagPanel.Anchor.West}.peer)
add(session.tabbedPane, new Constraints {
grid = (0,2); fill = GridBagPanel.Fill.Both; weightx = 1; weighty = 1; gridwidth=2})
}
reactions += {
case WindowClosing(_) => close()
}
peer.setLocationRelativeTo(null)
listenTo(rels)
reactions += {
case ic: IdsChanged =>
if ((rels.followers.isEmpty && rels.friends.isEmpty) &&
(rels.followerIds.length + rels.friendIds.length < Constants.MaxPeopleForAutoPaneCreation)) {
updatePeople()
}
}
rels.fetchAndPublishUserIds(session, mainToolBar)
pack()
visible = true
setFocus()
def setFocus() {
streams.views.foreach(_.pane.requestFocusForTable)
}
override def close() {
streams.stop
tw.setRateLimitStatusListener(null)
Globals.sessions -= session
dispose()
StateSaver.save(streams, session.userPrefs, tagUsers)
TopFrames.removeFrame(this)
}
type Users = List[User]
def createPeoplePane(longTitle: String, shortTitle: String, opOtherRels: Option[Relationships], opUsers: Option[Users],
updatePeople: Option[() => Unit], location: Option[Rectangle]): PeoplePane = {
def getRels = if (opOtherRels.isDefined) opOtherRels.get else rels
val usersTableModel =
if (opUsers.isDefined || opOtherRels.isDefined)
new UsersTableModel(opUsers, tagUsers, getRels)
else
streams.usersTableModel
val customRels = opUsers.map(users =>
new Relationships {
friends = rels.friends intersect users
friendIds = friends map(_.getId.toLong)
followers = rels.followers intersect users
followerIds = followers map(_.getId.toLong)
}
).getOrElse(getRels)
val peoplePane = new PeoplePane(longTitle, shortTitle, session, usersTableModel, customRels, updatePeople)
session.tabbedPane.pages += new Page(shortTitle, peoplePane) {tip = longTitle}
peoplePane
}
private def updatePeople() {
rels.fetchAndPublishUsers(session, tw.getScreenName, mainToolBar)
}
private def createPeoplePane() {
peoplePane = createPeoplePane("People You Follow and People Who Follow You", "People", None, None,
Some(updatePeople _), None)
}
private def setUpUserStatusReactor() {
tw.setRateLimitStatusListener(new RateLimitStatusListener() {
def onRateLimitReached(e: RateLimitStatusEvent) {}
def onRateLimitStatus(e: RateLimitStatusEvent) {
SwingInvoke.later {
mainToolBar.remaining.text = NumberFormat.getIntegerInstance.format(e.getRateLimitStatus.getRemainingHits)
}
}
})
}
private def createView(provider: DataProvider, include: Option[String], location: Option[Rectangle]) {
streams.createView(session.tabbedPane, provider, include, location)
provider.loadContinually()
}
}
| dcbriccetti/talking-puffin | desktop/src/main/scala/org/talkingpuffin/ui/TopFrame.scala | Scala | mit | 5,814 |
/*******************************************************************************
* (C) Copyright 2015 Haifeng Li
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/** Shell initialization script.
*
* @author Haifeng Li
*/
import smile._
import smile.util._
import smile.math._, Math._
import smile.math.distance._
import smile.math.kernel._
import smile.stat.distribution._
import smile.data._
import java.awt.Color, smile.plot._
import smile.interpolation._
import smile.validation._
import smile.association._
import smile.regression._
import smile.classification._
import smile.feature._
import smile.clustering._
import smile.vq._
import smile.manifold._
import smile.mds._
import smile.sequence._
import smile.projection._
import smile.nlp._
import smile.wavelet._
import smile.shell._
| arehart13/smile | shell/src/universal/bin/init.scala | Scala | apache-2.0 | 1,390 |
package bad.robot.radiate
import bad.robot.radiate.OptionSyntax._
import scalaz.syntax.std.option._
object Environment {
def getEnvironmentVariable(variable: String): Option[String] = {
NonEmptyOption(sys.env.get(variable))
}
def getEnvironmentVariable(variable: String, defaultValue: String): String = {
NonEmptyOption(sys.env.get(variable)).some(identity).none(defaultValue)
}
}
| tobyweston/radiate | src/main/scala/bad/robot/radiate/Environment.scala | Scala | apache-2.0 | 402 |
package scalacookbook.chapter05
/**
* Created by liguodong on 2016/7/4.
*/
object Main extends App{
// java
// public String doSomething(int x) {
// // code here
// }
// scala
// def doSomething(x: Int): String = {
// // code here
// }
def plusOne(i: Int) = i + 1
println(plusOne(109))
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter05/Main.scala | Scala | apache-2.0 | 310 |
package scala.meta.internal
package semanticdb
import java.nio.charset.Charset
import scala.collection.mutable
import scala.{meta => m}
import scala.meta.internal.io._
import scala.reflect.internal.util.{Position => GPosition, SourceFile => GSourceFile}
import scala.reflect.io.VirtualFile
import scala.reflect.io.{PlainFile => GPlainFile}
trait InputOps { self: DatabaseOps =>
private lazy val gSourceFileInputCache = mutable.Map[GSourceFile, m.Input]()
implicit class XtensionGSourceFileInput(gsource: GSourceFile) {
def toInput: m.Input =
gSourceFileInputCache.getOrElseUpdate(gsource, {
gsource.file match {
case gfile: GPlainFile =>
val path = m.AbsolutePath(gfile.file)
import SemanticdbMode._
config.mode match {
case Slim =>
m.Input.File(path)
case Fat =>
val label = path.toRelative(config.sourceroot).toString
// NOTE: Can't use gsource.content because it's preprocessed by scalac.
// TODO: Obtain charset from Global.reader.
val charset = Charset.forName("UTF-8")
val contents = FileIO.slurp(path, charset)
m.Input.VirtualFile(label, contents)
case Disabled =>
m.Input.None
}
case gfile: VirtualFile =>
m.Input.VirtualFile(gfile.path, gsource.content.mkString)
case other =>
m.Input.None
}
})
}
implicit class XtensionGPositionMPosition(pos: GPosition) {
def toMeta: m.Position = {
// NOTE: Even with -Yrangepos enabled we cannot be guaranteed that all positions are
// range positions. In the case we encounter a non-range position we assume start == end.
val input = pos.source.toInput
if (input == m.Input.None) m.Position.None
else if (!pos.isDefined) m.Position.None
else if (pos.isRange) m.Position.Range(input, pos.start, pos.end)
else m.Position.Range(input, pos.point, pos.point)
}
}
}
| DavidDudson/scalameta | scalameta/semanticdb-scalac-core/src/main/scala/scala/meta/internal/semanticdb/InputOps.scala | Scala | bsd-3-clause | 2,074 |
/* Copyright 2009-2021 EPFL, Lausanne */
import stainless.lang._
object MySet {
def set1(): Boolean = {
val s = Set(1, 2, 3, 4)
s.contains(3)
}.holds
def set2(): Boolean = {
val s1 = Set[Int]()
val s2 = Set.empty[Int]
s1 == s2
}.holds
}
| epfl-lara/stainless | frontends/benchmarks/verification/valid/MySet.scala | Scala | apache-2.0 | 270 |
package geotrellis.raster.op.local
import geotrellis._
import geotrellis._
import geotrellis.process._
/**
* Set all values of output raster to one value or another based on whether a
* condition is true or false.
*/
case class IfElseCell(r:Op[Raster], cond:Int => Boolean, trueValue:Int,
falseValue:Int) extends Op1(r)({
(r) => Result(r.map(z => if (cond(z)) trueValue else falseValue))
})
| Tjoene/thesis | Case_Programs/geotrellis-0.7.0/src/main/scala/geotrellis/raster/op/local/IfElseCell.scala | Scala | gpl-2.0 | 420 |
def fuse[M[_],N[_],A,B](fa: F[A])(f: A => M[B], g: A => N[B])
(implicit M: Applicative[M], N: Applicative[N]): (M[F[B]], N[F[B]]) =
traverse[({type f[x] = (M[x], N[x])})#f, A, B](fa)(a => (f(a), g(a)))(M product N) | galarragas/FpInScala | answerkey/applicative/14.answer.scala | Scala | mit | 239 |
/*
* konpare
* Copyright (C) 2015 Alexander Fefelov <https://github.com/alexanderfefelov>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.github.alexanderfefelov.konpare.syntax.predicate
import com.github.alexanderfefelov.konpare.syntax.{Syntax, Predicate}
import com.github.alexanderfefelov.konpare.syntax.subject.create._
object Create extends Predicate {
override val subjects = Map(
Syntax.SUBJECT_SNMP -> Snmp,
Syntax.SUBJECT_SYSLOG -> Syslog,
Syntax.SUBJECT_VLAN -> Vlan
)
} | alexanderfefelov/konpare | src/main/scala/com/github/alexanderfefelov/konpare/syntax/predicate/Create.scala | Scala | gpl-3.0 | 1,123 |
package controllers.forms
import play.api.data.Form
import play.api.data.Forms._
/**
* Created by info on 11.07.2016.
*/
case class SetupForm(email: String, password: String, firstName: String, lastName: String, hadoopUser: String, qrygraphFolder: String, fsDefaultName: String, mapredJobTracker: String)
object SetupForm {
def form = Form(
mapping(
"email" -> email,
"password" -> nonEmptyText,
"firstName" -> nonEmptyText,
"lastName" -> nonEmptyText,
"hadoopUser" -> nonEmptyText,
"qrygraphFolder" -> nonEmptyText,
"fsDefaultName" -> nonEmptyText,
"mapredJobTracker" -> nonEmptyText
)(SetupForm.apply)(SetupForm.unapply)
)
} | Starofall/QryGraph | qrygraph/jvm/app/controllers/forms/SetupForm.scala | Scala | mit | 693 |
/* Copyright 2009-2015 - Big Data Technologies S.R.L. All Rights Reserved. */
package org.supler.field
import org.supler._
import org.supler.validation._
import play.api.libs.json._
case class SelectManyListField[T, U](
name: String,
read: T => List[U],
write: (T, List[U]) => T,
validators: List[Validator[T, List[U]]],
valuesProvider: ValuesProvider[T, U],
label: Option[String],
labelForValue: U => String,
description: Option[String],
idForValue: Option[U => String],
renderHint: Option[RenderHint with SelectManyFieldCompatible],
enabledIf: T => Boolean,
includeIf: T => Boolean) extends Field[T] with SelectField[T, U] with ValidateWithValidators[T, List[U]] {
def label(newLabel: String): SelectManyListField[T, U] = this.copy(label = Some(newLabel))
def description(newDescription: String): SelectManyListField[T, U] = this.copy(description = Some(newDescription))
def validate(validators: Validator[T, List[U]]*): SelectManyListField[T, U] = this.copy(validators = this.validators ++ validators)
def renderHint(newRenderHint: RenderHint with SelectManyFieldCompatible): SelectManyListField[T, U] = this.copy(renderHint = Some(newRenderHint))
def enabledIf(condition: T => Boolean): SelectManyListField[T, U] = this.copy(enabledIf = condition)
def includeIf(condition: T => Boolean): SelectManyListField[T, U] = this.copy(includeIf = condition)
def idForValue[I](idFn: U => I)(implicit idTransformer: SelectValueIdSerializer[I]): SelectManyListField[T, U] =
this.copy(idForValue = Some(idFn andThen idTransformer.toString))
override def emptyValue = None
override def required = false
override protected def multiple = true
protected def generateValueJSONData(obj: T) = {
val possibleValues = valuesProvider(obj)
val currentValues = read(obj)
ValueJSONData(Some(JsArray(currentValues.toList.flatMap(idFromValue(possibleValues, _)).map(JsString))),
None)
}
private[supler] override def applyFieldJSONValues(parentPath: FieldPath, obj: T, jsonFields: Map[String, JsValue]): PartiallyAppliedObj[T] = {
import org.supler.validation.PartiallyAppliedObj._
val possibleValues = valuesProvider(obj)
val values = for {
jsonValue <- jsonFields.get(name).toList
ids <- jsonValue match { case JsArray(ids) => List(ids.collect { case JsString(id) => id }); case _ => Nil }
id <- ids
value <- valueFromId(possibleValues, id)
} yield value
full(write(obj, values.toList))
}
}
class AlmostSelectManyListField[T, U](
name: String,
read: T => List[U],
write: (T, List[U]) => T,
labelForValue: U => String,
renderHint: Option[RenderHint with SelectManyFieldCompatible]) {
def possibleValues(valuesProvider: ValuesProvider[T, U]): SelectManyListField[T, U] =
SelectManyListField(name, read, write, Nil, valuesProvider, None, labelForValue, None, None, renderHint,
AlwaysCondition, AlwaysCondition)
}
| aparo/scalajs-supler | supler/shared/src/main/scala/org/supler/field/SelectManyListField.scala | Scala | apache-2.0 | 2,944 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util.{Locale, TimeZone}
import scala.reflect.ClassTag
import org.apache.log4j.Level
import org.scalatest.Matchers
import org.apache.spark.api.python.PythonEvalType
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType, InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.errors.TreeNodeException
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Count, Sum}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan
import org.apache.spark.sql.catalyst.plans.{Cross, Inner}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, RangePartitioning, RoundRobinPartitioning}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
class AnalysisSuite extends AnalysisTest with Matchers {
import org.apache.spark.sql.catalyst.analysis.TestRelations._
test("union project *") {
val plan = (1 to 120)
.map(_ => testRelation)
.fold[LogicalPlan](testRelation) { (a, b) =>
a.select(UnresolvedStar(None)).select($"a").union(b.select(UnresolvedStar(None)))
}
assertAnalysisSuccess(plan)
}
test("check project's resolved") {
assert(Project(testRelation.output, testRelation).resolved)
assert(!Project(Seq(UnresolvedAttribute("a")), testRelation).resolved)
val explode = Explode(AttributeReference("a", IntegerType, nullable = true)())
assert(!Project(Seq(Alias(explode, "explode")()), testRelation).resolved)
assert(!Project(Seq(Alias(count(Literal(1)), "count")()), testRelation).resolved)
}
test("analyze project") {
checkAnalysis(
Project(Seq(UnresolvedAttribute("a")), testRelation),
Project(testRelation.output, testRelation))
checkAnalysis(
Project(Seq(UnresolvedAttribute("TbL.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Project(testRelation.output, testRelation))
assertAnalysisError(
Project(Seq(UnresolvedAttribute("tBl.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Seq("cannot resolve"))
checkAnalysis(
Project(Seq(UnresolvedAttribute("TbL.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Project(testRelation.output, testRelation),
caseSensitive = false)
checkAnalysis(
Project(Seq(UnresolvedAttribute("tBl.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Project(testRelation.output, testRelation),
caseSensitive = false)
}
test("resolve sort references - filter/limit") {
val a = testRelation2.output(0)
val b = testRelation2.output(1)
val c = testRelation2.output(2)
// Case 1: one missing attribute is in the leaf node and another is in the unary node
val plan1 = testRelation2
.where($"a" > "str").select($"a", $"b")
.where($"b" > "str").select($"a")
.sortBy($"b".asc, $"c".desc)
val expected1 = testRelation2
.where(a > "str").select(a, b, c)
.where(b > "str").select(a, b, c)
.sortBy(b.asc, c.desc)
.select(a)
checkAnalysis(plan1, expected1)
// Case 2: all the missing attributes are in the leaf node
val plan2 = testRelation2
.where($"a" > "str").select($"a")
.where($"a" > "str").select($"a")
.sortBy($"b".asc, $"c".desc)
val expected2 = testRelation2
.where(a > "str").select(a, b, c)
.where(a > "str").select(a, b, c)
.sortBy(b.asc, c.desc)
.select(a)
checkAnalysis(plan2, expected2)
}
test("resolve sort references - join") {
val a = testRelation2.output(0)
val b = testRelation2.output(1)
val c = testRelation2.output(2)
val h = testRelation3.output(3)
// Case: join itself can resolve all the missing attributes
val plan = testRelation2.join(testRelation3)
.where($"a" > "str").select($"a", $"b")
.sortBy($"c".desc, $"h".asc)
val expected = testRelation2.join(testRelation3)
.where(a > "str").select(a, b, c, h)
.sortBy(c.desc, h.asc)
.select(a, b)
checkAnalysis(plan, expected)
}
test("resolve sort references - aggregate") {
val a = testRelation2.output(0)
val b = testRelation2.output(1)
val c = testRelation2.output(2)
val alias_a3 = count(a).as("a3")
val alias_b = b.as("aggOrder")
// Case 1: when the child of Sort is not Aggregate,
// the sort reference is handled by the rule ResolveSortReferences
val plan1 = testRelation2
.groupBy($"a", $"c", $"b")($"a", $"c", count($"a").as("a3"))
.select($"a", $"c", $"a3")
.orderBy($"b".asc)
val expected1 = testRelation2
.groupBy(a, c, b)(a, c, alias_a3, b)
.select(a, c, alias_a3.toAttribute, b)
.orderBy(b.asc)
.select(a, c, alias_a3.toAttribute)
checkAnalysis(plan1, expected1)
// Case 2: when the child of Sort is Aggregate,
// the sort reference is handled by the rule ResolveAggregateFunctions
val plan2 = testRelation2
.groupBy($"a", $"c", $"b")($"a", $"c", count($"a").as("a3"))
.orderBy($"b".asc)
val expected2 = testRelation2
.groupBy(a, c, b)(a, c, alias_a3, alias_b)
.orderBy(alias_b.toAttribute.asc)
.select(a, c, alias_a3.toAttribute)
checkAnalysis(plan2, expected2)
}
test("resolve relations") {
assertAnalysisError(UnresolvedRelation(TableIdentifier("tAbLe")), Seq())
checkAnalysis(UnresolvedRelation(TableIdentifier("TaBlE")), testRelation)
checkAnalysis(
UnresolvedRelation(TableIdentifier("tAbLe")), testRelation, caseSensitive = false)
checkAnalysis(
UnresolvedRelation(TableIdentifier("TaBlE")), testRelation, caseSensitive = false)
}
test("divide should be casted into fractional types") {
val plan = caseInsensitiveAnalyzer.execute(
testRelation2.select(
$"a" / Literal(2) as "div1",
$"a" / $"b" as "div2",
$"a" / $"c" as "div3",
$"a" / $"d" as "div4",
$"e" / $"e" as "div5"))
val pl = plan.asInstanceOf[Project].projectList
assert(pl(0).dataType == DoubleType)
assert(pl(1).dataType == DoubleType)
assert(pl(2).dataType == DoubleType)
assert(pl(3).dataType == DoubleType)
assert(pl(4).dataType == DoubleType)
}
test("pull out nondeterministic expressions from RepartitionByExpression") {
val plan = RepartitionByExpression(Seq(Rand(33)), testRelation, numPartitions = 10)
val projected = Alias(Rand(33), "_nondeterministic")()
val expected =
Project(testRelation.output,
RepartitionByExpression(Seq(projected.toAttribute),
Project(testRelation.output :+ projected, testRelation),
numPartitions = 10))
checkAnalysis(plan, expected)
}
test("pull out nondeterministic expressions from Sort") {
val plan = Sort(Seq(SortOrder(Rand(33), Ascending)), false, testRelation)
val projected = Alias(Rand(33), "_nondeterministic")()
val expected =
Project(testRelation.output,
Sort(Seq(SortOrder(projected.toAttribute, Ascending)), false,
Project(testRelation.output :+ projected, testRelation)))
checkAnalysis(plan, expected)
}
test("SPARK-9634: cleanup unnecessary Aliases in LogicalPlan") {
val a = testRelation.output.head
var plan = testRelation.select(((a + 1).as("a+1") + 2).as("col"))
var expected = testRelation.select((a + 1 + 2).as("col"))
checkAnalysis(plan, expected)
plan = testRelation.groupBy(a.as("a1").as("a2"))((min(a).as("min_a") + 1).as("col"))
expected = testRelation.groupBy(a)((min(a) + 1).as("col"))
checkAnalysis(plan, expected)
// CreateStruct is a special case that we should not trim Alias for it.
plan = testRelation.select(CreateStruct(Seq(a, (a + 1).as("a+1"))).as("col"))
expected = testRelation.select(CreateNamedStruct(Seq(
Literal(a.name), a,
Literal("a+1"), (a + 1))).as("col"))
checkAnalysis(plan, expected)
}
test("Analysis may leave unnecessary aliases") {
val att1 = testRelation.output.head
var plan = testRelation.select(
CreateStruct(Seq(att1, ((att1.as("aa")) + 1).as("a_plus_1"))).as("col"),
att1
)
val prevPlan = getAnalyzer(true).execute(plan)
plan = prevPlan.select(CreateArray(Seq(
CreateStruct(Seq(att1, (att1 + 1).as("a_plus_1"))).as("col1"),
/** alias should be eliminated by [[CleanupAliases]] */
"col".attr.as("col2")
)).as("arr"))
plan = getAnalyzer(true).execute(plan)
val expectedPlan = prevPlan.select(
CreateArray(Seq(
CreateNamedStruct(Seq(
Literal(att1.name), att1,
Literal("a_plus_1"), (att1 + 1))),
Symbol("col").struct(prevPlan.output(0).dataType.asInstanceOf[StructType]).notNull
)).as("arr")
)
checkAnalysis(plan, expectedPlan)
}
test("SPARK-10534: resolve attribute references in order by clause") {
val a = testRelation2.output(0)
val c = testRelation2.output(2)
val plan = testRelation2.select($"c").orderBy(Floor($"a").asc)
val expected = testRelation2.select(c, a)
.orderBy(Floor(Cast(a, DoubleType, Option(TimeZone.getDefault().getID))).asc).select(c)
checkAnalysis(plan, expected)
}
test("self intersect should resolve duplicate expression IDs") {
val plan = testRelation.intersect(testRelation, isAll = false)
assertAnalysisSuccess(plan)
}
test("SPARK-8654: invalid CAST in NULL IN(...) expression") {
val plan = Project(Alias(In(Literal(null), Seq(Literal(1), Literal(2))), "a")() :: Nil,
LocalRelation()
)
assertAnalysisSuccess(plan)
}
test("SPARK-8654: different types in inlist but can be converted to a common type") {
val plan = Project(Alias(In(Literal(null), Seq(Literal(1), Literal(1.2345))), "a")() :: Nil,
LocalRelation()
)
assertAnalysisSuccess(plan)
}
test("SPARK-8654: check type compatibility error") {
val plan = Project(Alias(In(Literal(null), Seq(Literal(true), Literal(1))), "a")() :: Nil,
LocalRelation()
)
assertAnalysisError(plan, Seq("data type mismatch: Arguments must be same type"))
}
test("SPARK-11725: correctly handle null inputs for ScalaUDF") {
val testRelation = LocalRelation(
AttributeReference("a", StringType)(),
AttributeReference("b", DoubleType)(),
AttributeReference("c", ShortType)(),
AttributeReference("d", DoubleType, nullable = false)())
val string = testRelation.output(0)
val double = testRelation.output(1)
val short = testRelation.output(2)
val nonNullableDouble = testRelation.output(3)
val nullResult = Literal.create(null, StringType)
def checkUDF(udf: Expression, transformed: Expression): Unit = {
checkAnalysis(
Project(Alias(udf, "")() :: Nil, testRelation),
Project(Alias(transformed, "")() :: Nil, testRelation)
)
}
// non-primitive parameters do not need special null handling
val udf1 = ScalaUDF((s: String) => "x", StringType, string :: Nil,
Option(ExpressionEncoder[String]()) :: Nil)
val expected1 = udf1
checkUDF(udf1, expected1)
// only primitive parameter needs special null handling
val udf2 = ScalaUDF((s: String, d: Double) => "x", StringType, string :: double :: Nil,
Option(ExpressionEncoder[String]()) :: Option(ExpressionEncoder[Double]()) :: Nil)
val expected2 =
If(IsNull(double), nullResult, udf2.copy(children = string :: KnownNotNull(double) :: Nil))
checkUDF(udf2, expected2)
// special null handling should apply to all primitive parameters
val udf3 = ScalaUDF((s: Short, d: Double) => "x", StringType, short :: double :: Nil,
Option(ExpressionEncoder[Short]()) :: Option(ExpressionEncoder[Double]()) :: Nil)
val expected3 = If(
IsNull(short) || IsNull(double),
nullResult,
udf3.copy(children = KnownNotNull(short) :: KnownNotNull(double) :: Nil))
checkUDF(udf3, expected3)
// we can skip special null handling for primitive parameters that are not nullable
val udf4 = ScalaUDF(
(s: Short, d: Double) => "x",
StringType,
short :: nonNullableDouble :: Nil,
Option(ExpressionEncoder[Short]()) :: Option(ExpressionEncoder[Double]()) :: Nil)
val expected4 = If(
IsNull(short),
nullResult,
udf4.copy(children = KnownNotNull(short) :: nonNullableDouble :: Nil))
checkUDF(udf4, expected4)
}
test("SPARK-24891 Fix HandleNullInputsForUDF rule") {
val a = testRelation.output(0)
val func = (x: Int, y: Int) => x + y
val udf1 = ScalaUDF(func, IntegerType, a :: a :: Nil,
Option(ExpressionEncoder[java.lang.Integer]()) ::
Option(ExpressionEncoder[java.lang.Integer]()) :: Nil)
val udf2 = ScalaUDF(func, IntegerType, a :: udf1 :: Nil,
Option(ExpressionEncoder[java.lang.Integer]()) ::
Option(ExpressionEncoder[java.lang.Integer]()) :: Nil)
val plan = Project(Alias(udf2, "")() :: Nil, testRelation)
comparePlans(plan.analyze, plan.analyze.analyze)
}
test("SPARK-11863 mixture of aliases and real columns in order by clause - tpcds 19,55,71") {
val a = testRelation2.output(0)
val c = testRelation2.output(2)
val alias1 = a.as("a1")
val alias2 = c.as("a2")
val alias3 = count(a).as("a3")
val plan = testRelation2
.groupBy($"a", $"c")($"a".as("a1"), $"c".as("a2"), count($"a").as("a3"))
.orderBy($"a1".asc, $"c".asc)
val expected = testRelation2
.groupBy(a, c)(alias1, alias2, alias3)
.orderBy(alias1.toAttribute.asc, alias2.toAttribute.asc)
.select(alias1.toAttribute, alias2.toAttribute, alias3.toAttribute)
checkAnalysis(plan, expected)
}
test("Eliminate the unnecessary union") {
val plan = Union(testRelation :: Nil)
val expected = testRelation
checkAnalysis(plan, expected)
}
test("SPARK-12102: Ignore nullablity when comparing two sides of case") {
val relation = LocalRelation(Symbol("a").struct(Symbol("x").int),
Symbol("b").struct(Symbol("x").int.withNullability(false)))
val plan = relation.select(
CaseWhen(Seq((Literal(true), Symbol("a").attr)), Symbol("b")).as("val"))
assertAnalysisSuccess(plan)
}
test("Keep attribute qualifiers after dedup") {
val input = LocalRelation(Symbol("key").int, Symbol("value").string)
val query =
Project(Seq($"x.key", $"y.key"),
Join(
Project(Seq($"x.key"), SubqueryAlias("x", input)),
Project(Seq($"y.key"), SubqueryAlias("y", input)),
Cross, None, JoinHint.NONE))
assertAnalysisSuccess(query)
}
private def assertExpressionType(
expression: Expression,
expectedDataType: DataType): Unit = {
val afterAnalyze =
Project(Seq(Alias(expression, "a")()), OneRowRelation()).analyze.expressions.head
if (!afterAnalyze.dataType.equals(expectedDataType)) {
fail(
s"""
|data type of expression $expression doesn't match expected:
|Actual data type:
|${afterAnalyze.dataType}
|
|Expected data type:
|${expectedDataType}
""".stripMargin)
}
}
test("SPARK-15776: test whether Divide expression's data type can be deduced correctly by " +
"analyzer") {
assertExpressionType(sum(Divide(1, 2)), DoubleType)
assertExpressionType(sum(Divide(1.0, 2)), DoubleType)
assertExpressionType(sum(Divide(1, 2.0)), DoubleType)
assertExpressionType(sum(Divide(1.0, 2.0)), DoubleType)
assertExpressionType(sum(Divide(1, 2.0f)), DoubleType)
assertExpressionType(sum(Divide(1.0f, 2)), DoubleType)
assertExpressionType(sum(Divide(1, Decimal(2))), DecimalType(22, 11))
assertExpressionType(sum(Divide(Decimal(1), 2)), DecimalType(26, 6))
assertExpressionType(sum(Divide(Decimal(1), 2.0)), DoubleType)
assertExpressionType(sum(Divide(1.0, Decimal(2.0))), DoubleType)
}
test("SPARK-18058: union and set operations shall not care about the nullability" +
" when comparing column types") {
val firstTable = LocalRelation(
AttributeReference("a",
StructType(Seq(StructField("a", IntegerType, nullable = true))), nullable = false)())
val secondTable = LocalRelation(
AttributeReference("a",
StructType(Seq(StructField("a", IntegerType, nullable = false))), nullable = false)())
val unionPlan = Union(firstTable, secondTable)
assertAnalysisSuccess(unionPlan)
val r1 = Except(firstTable, secondTable, isAll = false)
val r2 = Intersect(firstTable, secondTable, isAll = false)
assertAnalysisSuccess(r1)
assertAnalysisSuccess(r2)
}
test("resolve as with an already existed alias") {
checkAnalysis(
Project(Seq(UnresolvedAttribute("tbl2.a")),
SubqueryAlias("tbl", testRelation).as("tbl2")),
Project(testRelation.output, testRelation),
caseSensitive = false)
checkAnalysis(SubqueryAlias("tbl", testRelation).as("tbl2"), testRelation)
}
test("SPARK-20311 range(N) as alias") {
def rangeWithAliases(args: Seq[Int], outputNames: Seq[String]): LogicalPlan = {
SubqueryAlias("t", UnresolvedTableValuedFunction("range", args.map(Literal(_)), outputNames))
.select(star())
}
assertAnalysisSuccess(rangeWithAliases(3 :: Nil, "a" :: Nil))
assertAnalysisSuccess(rangeWithAliases(1 :: 4 :: Nil, "b" :: Nil))
assertAnalysisSuccess(rangeWithAliases(2 :: 6 :: 2 :: Nil, "c" :: Nil))
assertAnalysisError(
rangeWithAliases(3 :: Nil, "a" :: "b" :: Nil),
Seq("Number of given aliases does not match number of output columns. "
+ "Function name: range; number of aliases: 2; number of output columns: 1."))
}
test("SPARK-20841 Support table column aliases in FROM clause") {
def tableColumnsWithAliases(outputNames: Seq[String]): LogicalPlan = {
UnresolvedSubqueryColumnAliases(
outputNames,
SubqueryAlias("t", UnresolvedRelation(TableIdentifier("TaBlE3")))
).select(star())
}
assertAnalysisSuccess(tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: Nil))
assertAnalysisError(
tableColumnsWithAliases("col1" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 1; number of columns: 4."))
assertAnalysisError(
tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: "col5" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 5; number of columns: 4."))
}
test("SPARK-20962 Support subquery column aliases in FROM clause") {
def tableColumnsWithAliases(outputNames: Seq[String]): LogicalPlan = {
UnresolvedSubqueryColumnAliases(
outputNames,
SubqueryAlias(
"t",
UnresolvedRelation(TableIdentifier("TaBlE3")))
).select(star())
}
assertAnalysisSuccess(tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: Nil))
assertAnalysisError(
tableColumnsWithAliases("col1" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 1; number of columns: 4."))
assertAnalysisError(
tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: "col5" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 5; number of columns: 4."))
}
test("SPARK-20963 Support aliases for join relations in FROM clause") {
def joinRelationWithAliases(outputNames: Seq[String]): LogicalPlan = {
val src1 = LocalRelation(Symbol("id").int, Symbol("v1").string).as("s1")
val src2 = LocalRelation(Symbol("id").int, Symbol("v2").string).as("s2")
UnresolvedSubqueryColumnAliases(
outputNames,
SubqueryAlias(
"dst",
src1.join(src2, Inner, Option(Symbol("s1.id") === Symbol("s2.id"))))
).select(star())
}
assertAnalysisSuccess(joinRelationWithAliases("col1" :: "col2" :: "col3" :: "col4" :: Nil))
assertAnalysisError(
joinRelationWithAliases("col1" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 1; number of columns: 4."))
assertAnalysisError(
joinRelationWithAliases("col1" :: "col2" :: "col3" :: "col4" :: "col5" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 5; number of columns: 4."))
}
test("SPARK-22614 RepartitionByExpression partitioning") {
def checkPartitioning[T <: Partitioning: ClassTag](
numPartitions: Int, exprs: Expression*): Unit = {
val partitioning = RepartitionByExpression(exprs, testRelation2, numPartitions).partitioning
val clazz = implicitly[ClassTag[T]].runtimeClass
assert(clazz.isInstance(partitioning))
}
checkPartitioning[HashPartitioning](numPartitions = 10, exprs = Literal(20))
checkPartitioning[HashPartitioning](numPartitions = 10,
exprs = Symbol("a").attr, Symbol("b").attr)
checkPartitioning[RangePartitioning](numPartitions = 10,
exprs = SortOrder(Literal(10), Ascending))
checkPartitioning[RangePartitioning](numPartitions = 10,
exprs = SortOrder(Symbol("a").attr, Ascending), SortOrder(Symbol("b").attr, Descending))
checkPartitioning[RoundRobinPartitioning](numPartitions = 10, exprs = Seq.empty: _*)
intercept[IllegalArgumentException] {
checkPartitioning(numPartitions = 0, exprs = Literal(20))
}
intercept[IllegalArgumentException] {
checkPartitioning(numPartitions = -1, exprs = Literal(20))
}
intercept[IllegalArgumentException] {
checkPartitioning(numPartitions = 10, exprs =
SortOrder(Symbol("a").attr, Ascending), Symbol("b").attr)
}
}
test("SPARK-24208: analysis fails on self-join with FlatMapGroupsInPandas") {
val pythonUdf = PythonUDF("pyUDF", null,
StructType(Seq(StructField("a", LongType))),
Seq.empty,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
true)
val output = pythonUdf.dataType.asInstanceOf[StructType].toAttributes
val project = Project(Seq(UnresolvedAttribute("a")), testRelation)
val flatMapGroupsInPandas = FlatMapGroupsInPandas(
Seq(UnresolvedAttribute("a")), pythonUdf, output, project)
val left = SubqueryAlias("temp0", flatMapGroupsInPandas)
val right = SubqueryAlias("temp1", flatMapGroupsInPandas)
val join = Join(left, right, Inner, None, JoinHint.NONE)
assertAnalysisSuccess(
Project(Seq(UnresolvedAttribute("temp0.a"), UnresolvedAttribute("temp1.a")), join))
}
test("SPARK-24488 Generator with multiple aliases") {
assertAnalysisSuccess(
listRelation.select(Explode($"list").as("first_alias").as("second_alias")))
assertAnalysisSuccess(
listRelation.select(MultiAlias(MultiAlias(
PosExplode($"list"), Seq("first_pos", "first_val")), Seq("second_pos", "second_val"))))
}
test("SPARK-24151: CURRENT_DATE, CURRENT_TIMESTAMP should be case insensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val input = Project(Seq(
UnresolvedAttribute("current_date"),
UnresolvedAttribute("CURRENT_DATE"),
UnresolvedAttribute("CURRENT_TIMESTAMP"),
UnresolvedAttribute("current_timestamp")), testRelation)
val expected = Project(Seq(
Alias(CurrentDate(), toPrettySQL(CurrentDate()))(),
Alias(CurrentDate(), toPrettySQL(CurrentDate()))(),
Alias(CurrentTimestamp(), toPrettySQL(CurrentTimestamp()))(),
Alias(CurrentTimestamp(), toPrettySQL(CurrentTimestamp()))()), testRelation).analyze
checkAnalysis(input, expected)
}
}
test("SPARK-25691: AliasViewChild with different nullabilities") {
object ViewAnalyzer extends RuleExecutor[LogicalPlan] {
val batches = Batch("View", Once, EliminateView) :: Nil
}
val relation = LocalRelation(Symbol("a").int.notNull, Symbol("b").string)
val view = View(CatalogTable(
identifier = TableIdentifier("v1"),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = StructType(Seq(StructField("a", IntegerType), StructField("b", StringType)))),
output = Seq(Symbol("a").int, Symbol("b").string),
child = relation)
val tz = Option(conf.sessionLocalTimeZone)
val expected = Project(Seq(
Alias(Cast(Symbol("a").int.notNull, IntegerType, tz), "a")(),
Alias(Cast(Symbol("b").string, StringType, tz), "b")()),
relation)
val res = ViewAnalyzer.execute(view)
comparePlans(res, expected)
}
test("CTE with non-existing column alias") {
assertAnalysisError(parsePlan("WITH t(x) AS (SELECT 1) SELECT * FROM t WHERE y = 1"),
Seq("cannot resolve '`y`' given input columns: [x]"))
}
test("CTE with non-matching column alias") {
assertAnalysisError(parsePlan("WITH t(x, y) AS (SELECT 1) SELECT * FROM t WHERE x = 1"),
Seq("Number of column aliases does not match number of columns. Number of column aliases: " +
"2; number of columns: 1."))
}
test("SPARK-28251: Insert into non-existing table error message is user friendly") {
assertAnalysisError(parsePlan("INSERT INTO test VALUES (1)"),
Seq("Table not found: test"))
}
test("check CollectMetrics resolved") {
val a = testRelation.output.head
val sum = Sum(a).toAggregateExpression().as("sum")
val random_sum = Sum(Rand(1L)).toAggregateExpression().as("rand_sum")
val literal = Literal(1).as("lit")
// Ok
assert(CollectMetrics("event", literal :: sum :: random_sum :: Nil, testRelation).resolved)
// Bad name
assert(!CollectMetrics("", sum :: Nil, testRelation).resolved)
assertAnalysisError(CollectMetrics("", sum :: Nil, testRelation),
"observed metrics should be named" :: Nil)
// No columns
assert(!CollectMetrics("evt", Nil, testRelation).resolved)
def checkAnalysisError(exprs: Seq[NamedExpression], errors: String*): Unit = {
assertAnalysisError(CollectMetrics("event", exprs, testRelation), errors)
}
// Unwrapped attribute
checkAnalysisError(
a :: Nil,
"Attribute", "can only be used as an argument to an aggregate function")
// Unwrapped non-deterministic expression
checkAnalysisError(
Rand(10).as("rnd") :: Nil,
"non-deterministic expression", "can only be used as an argument to an aggregate function")
// Distinct aggregate
checkAnalysisError(
Sum(a).toAggregateExpression(isDistinct = true).as("sum") :: Nil,
"distinct aggregates are not allowed in observed metrics, but found")
// Nested aggregate
checkAnalysisError(
Sum(Sum(a).toAggregateExpression()).toAggregateExpression().as("sum") :: Nil,
"nested aggregates are not allowed in observed metrics, but found")
// Windowed aggregate
val windowExpr = WindowExpression(
RowNumber(),
WindowSpecDefinition(Nil, a.asc :: Nil,
SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow)))
checkAnalysisError(
windowExpr.as("rn") :: Nil,
"window expressions are not allowed in observed metrics, but found")
}
test("check CollectMetrics duplicates") {
val a = testRelation.output.head
val sum = Sum(a).toAggregateExpression().as("sum")
val count = Count(Literal(1)).toAggregateExpression().as("cnt")
// Same result - duplicate names are allowed
assertAnalysisSuccess(Union(
CollectMetrics("evt1", count :: Nil, testRelation) ::
CollectMetrics("evt1", count :: Nil, testRelation) :: Nil))
// Same children, structurally different metrics - fail
assertAnalysisError(Union(
CollectMetrics("evt1", count :: Nil, testRelation) ::
CollectMetrics("evt1", sum :: Nil, testRelation) :: Nil),
"Multiple definitions of observed metrics" :: "evt1" :: Nil)
// Different children, same metrics - fail
val b = Symbol("b").string
val tblB = LocalRelation(b)
assertAnalysisError(Union(
CollectMetrics("evt1", count :: Nil, testRelation) ::
CollectMetrics("evt1", count :: Nil, tblB) :: Nil),
"Multiple definitions of observed metrics" :: "evt1" :: Nil)
// Subquery different tree - fail
val subquery = Aggregate(Nil, sum :: Nil, CollectMetrics("evt1", count :: Nil, testRelation))
val query = Project(
b :: ScalarSubquery(subquery, Nil).as("sum") :: Nil,
CollectMetrics("evt1", count :: Nil, tblB))
assertAnalysisError(query, "Multiple definitions of observed metrics" :: "evt1" :: Nil)
// Aggregate with filter predicate - fail
val sumWithFilter = sum.transform {
case a: AggregateExpression => a.copy(filter = Some(true))
}.asInstanceOf[NamedExpression]
assertAnalysisError(
CollectMetrics("evt1", sumWithFilter :: Nil, testRelation),
"aggregates with filter predicate are not allowed" :: Nil)
}
test("Analysis exceed max iterations") {
// RuleExecutor only throw exception or log warning when the rule is supposed to run
// more than once.
val maxIterations = 2
val conf = new SQLConf().copy(SQLConf.ANALYZER_MAX_ITERATIONS -> maxIterations)
val testAnalyzer = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin, conf), conf)
val plan = testRelation2.select(
$"a" / Literal(2) as "div1",
$"a" / $"b" as "div2",
$"a" / $"c" as "div3",
$"a" / $"d" as "div4",
$"e" / $"e" as "div5")
val message = intercept[TreeNodeException[LogicalPlan]] {
testAnalyzer.execute(plan)
}.getMessage
assert(message.startsWith(s"Max iterations ($maxIterations) reached for batch Resolution, " +
s"please set '${SQLConf.ANALYZER_MAX_ITERATIONS.key}' to a larger value."))
}
test("SPARK-30886 Deprecate two-parameter TRIM/LTRIM/RTRIM") {
Seq("trim", "ltrim", "rtrim").foreach { f =>
val logAppender = new LogAppender("deprecated two-parameter TRIM/LTRIM/RTRIM functions")
def check(count: Int): Unit = {
val message = "Two-parameter TRIM/LTRIM/RTRIM function signatures are deprecated."
assert(logAppender.loggingEvents.size == count)
assert(logAppender.loggingEvents.exists(
e => e.getLevel == Level.WARN &&
e.getRenderedMessage.contains(message)))
}
withLogAppender(logAppender) {
val testAnalyzer1 = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin, conf), conf)
val plan1 = testRelation2.select(
UnresolvedFunction(f, $"a" :: Nil, isDistinct = false))
testAnalyzer1.execute(plan1)
// One-parameter is not deprecated.
assert(logAppender.loggingEvents.isEmpty)
val plan2 = testRelation2.select(
UnresolvedFunction(f, $"a" :: $"b" :: Nil, isDistinct = false))
testAnalyzer1.execute(plan2)
// Deprecation warning is printed out once.
check(1)
val plan3 = testRelation2.select(
UnresolvedFunction(f, $"b" :: $"a" :: Nil, isDistinct = false))
testAnalyzer1.execute(plan3)
// There is no change in the log.
check(1)
// New analyzer from new SessionState
val testAnalyzer2 = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin, conf), conf)
val plan4 = testRelation2.select(
UnresolvedFunction(f, $"c" :: $"d" :: Nil, isDistinct = false))
testAnalyzer2.execute(plan4)
// Additional deprecation warning from new analyzer
check(2)
val plan5 = testRelation2.select(
UnresolvedFunction(f, $"c" :: $"d" :: Nil, isDistinct = false))
testAnalyzer2.execute(plan5)
// There is no change in the log.
check(2)
}
}
}
}
| kevinyu98/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala | Scala | apache-2.0 | 33,274 |
package com.github.ellchow.scaramouch.collection
import org.scalatest._
import org.scalatest.matchers._
import org.scalatest.prop._
import org.scalacheck._
import scalaz._, Scalaz._
class SearchTest extends FlatSpec
with Matchers
with GeneratorDrivenPropertyChecks {
behavior of "binary search"
it should "find existing items in sorted seq" in {
forAll {
(xs: Set[Int]) => whenever(xs.size > 0) {
val x = scala.util.Random.shuffle(xs.toVector).head
val sorted = xs.toVector.sorted
binarySearch(sorted, x) should be(sorted.indexOf(x).right)
}
}
}
it should "return 0 for empty seq" in {
forAll{
(x: Int) => binarySearch(Vector[Int](), x) should be(0.left)
}
}
it should "identify indices at which to insert missing items" in {
forAll{
(xs: Set[Int]) => whenever(xs.size > 0){
val x = scala.util.Random.shuffle(xs.toVector).head
val sorted = xs.toVector.sorted
val i = sorted.indexOf(x)
val dropped = sorted.take(i) ++ sorted.drop(i + 1)
binarySearch(dropped, x) should be(i.left)
}
}
}
}
| ellchow/scaramouch | scaramouch-collection/src/test/scala/com/github/ellchow/scaramouch/collection/SearchTest.scala | Scala | apache-2.0 | 1,137 |
package doodle.core
import scala.annotation.tailrec
/**
* A value in the range [0, 1]
*/
final case class Normalized(get: Double) extends AnyVal {
def +(that: Normalized): Double =
this.get + that.get
def -(that: Normalized): Double =
this.get - that.get
def max(that: Normalized): Normalized =
if(this.get > that.get)
this
else
that
def min(that: Normalized): Normalized =
if(this.get < that.get)
this
else
that
def toTurns: Angle =
Angle.turns(get)
def toPercentage: String =
s"${get * 100}%"
def toUnsignedByte: UnsignedByte =
UnsignedByte.clip(Math.round(get * UnsignedByte.MaxValue.get).toInt)
def toCanvas: String =
get.toString
}
object Normalized {
val MinValue = Normalized(0.0)
val MaxValue = Normalized(1.0)
def clip(value: Double): Normalized =
value match {
case v if value < 0.0 => MinValue
case v if value > 1.0 => MaxValue
case v => Normalized(v)
}
def wrap(value: Double): Normalized = {
@tailrec def loop(value: Double): Normalized = value match {
case v if v > 1.0 => loop(v - 1.0)
case v if v < 0.0 => loop(v + 1.0)
case v => Normalized(v)
}
loop(value)
}
}
| Angeldude/doodle | shared/src/main/scala/doodle/core/Normalized.scala | Scala | apache-2.0 | 1,241 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala.subscriptions
import rx.lang.scala._
object CompositeSubscription {
/**
* Creates a [[rx.lang.scala.subscriptions.CompositeSubscription]] from a group of [[rx.lang.scala.Subscription]].
*/
def apply(subscriptions: Subscription*): CompositeSubscription = {
new CompositeSubscription(new rx.subscriptions.CompositeSubscription(subscriptions.map(_.asJavaSubscription).toArray : _*))
}
/**
* Creates a [[rx.lang.scala.subscriptions.CompositeSubscription]].
*/
def apply(): CompositeSubscription = {
new CompositeSubscription(new rx.subscriptions.CompositeSubscription())
}
/**
* Creates a [[rx.lang.scala.subscriptions.CompositeSubscription]].
*/
private [scala] def apply(subscription: rx.subscriptions.CompositeSubscription): CompositeSubscription = {
new CompositeSubscription(subscription)
}
}
/**
* Represents a group of [[rx.lang.scala.Subscription]] that are disposed together.
*/
class CompositeSubscription private[scala] (override val asJavaSubscription: rx.subscriptions.CompositeSubscription) extends Subscription
{
//override def asJavaSubscription = subscription
/**
* Adds a subscription to the group,
* or unsubscribes immediately is the [[rx.lang.scala.subscriptions.CompositeSubscription]] is unsubscribed.
* @param subscription the subscription to be added.
* @return the [[rx.lang.scala.subscriptions.CompositeSubscription]] itself.
*/
def +=(subscription: Subscription): this.type = {
asJavaSubscription.add(subscription.asJavaSubscription)
this
}
/**
* Removes and unsubscribes a subscription to the group,
* @param subscription the subscription be removed.
* @return the [[rx.lang.scala.subscriptions.CompositeSubscription]] itself.
*/
def -=(subscription: Subscription): this.type = {
asJavaSubscription.remove(subscription.asJavaSubscription)
this
}
override def unsubscribe(): Unit = asJavaSubscription.unsubscribe()
override def isUnsubscribed: Boolean = asJavaSubscription.isUnsubscribed
}
| samuelgruetter/RxScala | src/main/scala/rx/lang/scala/subscriptions/CompositeSubscription.scala | Scala | apache-2.0 | 2,656 |
package im.actor.server.persist.contact
import im.actor.server.db.ActorPostgresDriver.api._
import im.actor.server.models
class UserPhoneContactTable(tag: Tag) extends UserContactBase[models.contact.UserPhoneContact](tag, "user_phone_contacts") with InheritingTable {
def phoneNumber = column[Long]("phone_number")
val inherited = UserContact.contacts.baseTableRow
def * = (phoneNumber, ownerUserId, contactUserId, name, accessSalt, isDeleted) <> (models.contact.UserPhoneContact.tupled, models.contact.UserPhoneContact.unapply)
}
object UserPhoneContact {
val pcontacts = TableQuery[UserPhoneContactTable]
def createOrRestore(ownerUserId: Int, contactUserId: Int, phoneNumber: Long, name: Option[String], accessSalt: String) = {
val contact = models.contact.UserPhoneContact(phoneNumber, ownerUserId, contactUserId, name, accessSalt, isDeleted = false)
pcontacts.insertOrUpdate(contact)
}
def insertOrUpdate(contact: models.contact.UserPhoneContact) =
pcontacts.insertOrUpdate(contact)
} | VikingDen/actor-platform | actor-server/actor-persist/src/main/scala/im/actor/server/persist/contact/UserPhoneContact.scala | Scala | mit | 1,021 |
package com.olegych.scastie
package client
package components
import japgolly.scalajs.react._, vdom.all._, extra._
final case class PromptModal(modalText: String,
modalId: String,
isClosed: Boolean,
close: Reusable[Callback],
actionText: String,
actionLabel: String,
action: Reusable[Callback]) {
@inline def render: VdomElement = PromptModal.component(this)
}
object PromptModal {
implicit val reusability: Reusability[PromptModal] =
Reusability.derive[PromptModal]
private def render(props: PromptModal): VdomElement = {
Modal(
title = props.modalText,
isClosed = props.isClosed,
close = props.close,
modalCss = TagMod(cls := "modal-reset"),
modalId = props.modalId,
content = TagMod(
p(
cls := "modal-intro",
props.actionText
),
ul(
li(onClick ==> (
e => e.stopPropagationCB >> props.action >> props.close
),
cls := "btn")(
props.actionLabel
),
li(onClick ==> (e => e.stopPropagationCB >> props.close), cls := "btn")(
"Cancel"
)
)
)
).render
}
private val component =
ScalaComponent
.builder[PromptModal]("PrompModal")
.render_P(render)
.configure(Reusability.shouldComponentUpdate)
.build
}
| scalacenter/scastie | client/src/main/scala/com.olegych.scastie.client/components/PromptModal.scala | Scala | apache-2.0 | 1,527 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.render
import cats.data.NonEmptySet
import laika.ast.{InternalTarget, Styles, _}
import laika.render.FOFormatter._
import laika.rst.ast.{Line, LineBlock}
/** Default renderer implementation for the XSL-FO output format.
*
* @author Jens Halm
*/
object FORenderer extends ((FOFormatter, Element) => String) {
private val formats: NonEmptySet[String] = NonEmptySet.of("pdf", "fo", "xslfo", "xsl-fo")
def apply (fmt: FOFormatter, element: Element): String = {
def noneIfDefault [T](actual: T, default: T): Option[String] = if (actual == default) None else Some(actual.toString)
def renderTable (table: Table): String = {
if (table.caption.content.nonEmpty) {
// FOP does not support fo:table-caption
fmt.child(TitledBlock(table.caption.content, List(table.copy(caption = Caption()))))
}
else {
val children = table.columns.content ++ (List(table.head, table.body) filterNot (_.content.isEmpty))
fmt.indentedElement("fo:table", table, children)
}
}
object WithFallback {
def unapply (value: Element): Option[Element] = value match {
case f: Fallback => Some(f.fallback)
case _ => None
}
}
def renderBlockContainer (con: BlockContainer): String = {
def quotedBlockContent (content: Seq[Block], attr: Seq[Span]): Seq[Block] =
if (attr.isEmpty) content
else content :+ SpanSequence(attr, Style.attribution)
def figureContent (img: Span, caption: Seq[Span], legend: Seq[Block]): List[Block] =
List(Paragraph(img), SpanSequence(caption, Style.caption), BlockSequence(legend, Style.legend))
def enumLabel (format: EnumFormat, num: Int): String = {
import EnumType._
val pos = format.enumType match {
case Arabic => num.toString
case LowerAlpha => ('a' + num - 1).toChar.toString
case UpperAlpha => ('A' + num - 1).toChar.toString
case LowerRoman => RomanNumerals.intToRoman(num).toLowerCase
case UpperRoman => RomanNumerals.intToRoman(num).toUpperCase
}
format.prefix + pos + format.suffix
}
def bulletLabel (format: BulletFormat): Span = format match {
case StringBullet(_) => RawContent(NonEmptySet.one("fo"), "•")
case other => Text(other.toString)
}
def replaceSpanSequences (content: Seq[Block]): Seq[Block] = content map {
case sc: SpanSequence => Paragraph(sc.content, sc.options)
case other => other
}
con match {
case RootElement(content, _) => fmt.childPerLine(content)
case EmbeddedRoot(content, indent, _) => fmt.withMinIndentation(indent)(_.childPerLine(content))
case Section(header, content,_) => fmt.childPerLine(header +: content)
case e @ TitledBlock(title, content, _) => fmt.blockContainer(e, SpanSequence(title, Style.title) +: content)
case e @ QuotedBlock(content,attr,_) => fmt.blockContainer(e, quotedBlockContent(content,attr))
case e @ BulletListItem(content,format,_) => fmt.listItem(e, List(bulletLabel(format)), content)
case e @ EnumListItem(content,format,num,_) => fmt.listItem(e, List(Text(enumLabel(format,num))), content)
case e @ DefinitionListItem(term,defn,_) => fmt.listItem(e, term, defn)
case e @ ListItemBody(content,_) => fmt.listItemBody(e, replaceSpanSequences(content))
case e @ Figure(img,caption,legend,_) => fmt.blockContainer(e, figureContent(img,caption,legend))
case e @ FootnoteBody(content,_) => fmt.indentedElement("fo:footnote-body", e, content)
case _: Footnote => "" // rendered in link position
case _: Citation => "" // rendered in link position
case WithFallback(fallback) => fmt.child(fallback)
case BlockSequence(Seq(SpanSequence(Seq(img: Image), optSpan)), optBlock) =>
fmt.child(SpanSequence(Seq(img.mergeOptions(optSpan + optBlock)), Styles("align-center", "default-space")))
case BlockSequence(content, NoOpt) => fmt.childPerLine(content)
case unknown => fmt.blockContainer(unknown, unknown.content)
}
}
def renderLink (link: SpanLink): String = {
fmt.pathTranslator.translate(link.target) match {
case int: InternalTarget => fmt.internalLink(link, int.relativeTo(fmt.path).absolutePath, link.content)
case ext: ExternalTarget => fmt.externalLink(link, ext.url, link.content)
}
}
def renderSpanContainer (con: SpanContainer): String = {
def codeStyles (language: String): Option[String] = if (language.isEmpty) None else Some(language)
con match {
case Paragraph(Seq(img: Image), _) => fmt.child(SpanSequence(Seq(img), Styles("align-center", "default-space")))
case e @ Paragraph(content,_) => fmt.block(e, content)
case e @ ParsedLiteralBlock(content,_)=> fmt.blockWithWS(e, content)
case e @ CodeBlock(lang,content,_, _) => fmt.blockWithWS(e.withStyles(codeStyles(lang).toSeq), content)
case e @ Header(level, content,_) => fmt.block(e.mergeOptions(Style.level(level)), content, "keep-with-next"->"always")
case e @ Title(content,_) => fmt.block(e, content, "keep-with-next"->"always")
case e @ Emphasized(content,_) => fmt.inline(e, content)
case e @ Strong(content,_) => fmt.inline(e, content)
case e @ Deleted(content,_) => fmt.inline(e, content)
case e @ Inserted(content,_) => fmt.inline(e, content)
case e @ InlineCode(lang,content,_) => fmt.inline(e.withStyles(codeStyles(lang).toSeq), content)
case e @ Line(content,_) => fmt.block(e, content)
case link: SpanLink => renderLink(link)
case WithFallback(fallback) => fmt.child(fallback)
case SpanSequence(content, NoOpt) => fmt.children(content)
case CodeSpanSequence(content, NoOpt) => fmt.children(content)
case unknown: Block => fmt.block(unknown, unknown.content) // TODO - needs to be inline if parent is not a block container
case unknown => fmt.inline(unknown, unknown.content)
}
}
def renderTemplateSpanContainer (con: TemplateSpanContainer): String = {
con match {
case TemplateRoot(content, NoOpt) => fmt.children(content)
case TemplateSpanSequence(content, NoOpt) => fmt.children(content)
case unknown => fmt.inline(unknown, unknown.content)
}
}
def renderListContainer (con: ListContainer): String = con match {
case e @ EnumList(content,_,_,_) => fmt.listBlock(e, content)
case e @ BulletList(content,_,_) => fmt.listBlock(e, content)
case e @ DefinitionList(content,_) => fmt.listBlock(e, content)
case e: NavigationList => if (e.hasStyle("bookmark")) fmt.bookmarkTree(e) else fmt.childPerLine(e.content)
case WithFallback(fallback) => fmt.child(fallback)
case unknown => fmt.listBlock(unknown, unknown.content)
}
def renderTextContainer (con: TextContainer): String = con match {
case e @ Text(content,_) => fmt.text(e, content)
case e @ TemplateString(content,_) => fmt.rawText(e, content)
case e @ RawContent(f, content, _) => if (f.intersect(formats).nonEmpty) fmt.rawText(e, content) else ""
case e @ CodeSpan(content, categories, _) => fmt.textWithWS(e.withStyles(categories.map(_.name)), content)
case e @ Literal(content,_) => fmt.textWithWS(e, content)
case e @ LiteralBlock(content,_) => fmt.textBlockWithWS(e, content)
case e: BookmarkTitle => fmt.bookmarkTitle(e)
case Comment(content,_) => fmt.comment(content)
case WithFallback(fallback) => fmt.child(fallback)
case unknown => fmt.text(unknown, unknown.content)
}
def renderChoices (name: String, choices: Seq[Choice], options: Options): String = {
val content = choices.flatMap { choice => Paragraph(Strong(Text(choice.label))) +: choice.content }
fmt.child(BlockSequence(content, options))
}
def renderSimpleBlock (block: Block): String = block match {
case e: Preamble => renderPreamble(e)
case e @ ListItemLabel(content,_) => fmt.listItemLabel(e, content)
case e: Rule => fmt.rawElement("fo:block", e, fmt.textElement("fo:leader", e, "", "leader-pattern"->"rule"))
case Selection(name, choices, opt) => renderChoices(name, choices, opt)
case e: InternalLinkTarget => fmt.internalLinkTarget(e)
case e: PageBreak => fmt.block(e)
case e @ LineBlock(content,_) => fmt.blockContainer(e, content)
case TargetFormat(f,e,_) if f.intersect(formats).nonEmpty => fmt.child(e)
case WithFallback(fallback) => fmt.child(fallback)
case _ => ""
}
def renderTarget (target: Target): String = fmt.pathTranslator.translate(target) match {
case ext: ExternalTarget => ext.url
case int: InternalTarget => fmt.buildId(int.relativeTo(fmt.path).absolutePath)
}
def renderIcon (icon: Icon): String = icon match {
case icon: IconGlyph => fmt.rawElement("fo:inline", icon, icon.codePointAsEntity)
case icon: InlineSVGIcon =>
val styles = fmt.styles.collectStyles(SpanSequence.empty.withStyle("svg-shape"), fmt.parents).get("color")
val svg = styles.fold(icon.content) { color =>
val parts = icon.content.split(">", 2) // inlining styles as FOP itself does not support CSS for SVG
if (parts.length == 2) parts.head + s">\n <style>.svg-shape { fill: $color; }</style>" + parts.last
else icon.content
}
fmt.rawElement("fo:instream-foreign-object", icon, svg)
case _ => ""
}
def renderSimpleSpan (span: Span): String = span match {
case e @ CitationLink(ref,label,_) => fmt.withCitation(ref)(c => fmt.footnote(e,label,c.content,c.options))
case e @ FootnoteLink(ref,label,_) => fmt.withFootnote(ref)(f => fmt.footnote(e,label,f.content,f.options))
case RawLink(target,_) => renderTarget(target)
case SectionNumber(pos, opt) => fmt.child(Text(pos.mkString(".") + " ", opt + Style.sectionNumber))
case e @ Image(target,_,_,_,_,_) =>
val uri = target match {
case it: InternalTarget => it.relativeTo(fmt.path).absolutePath.toString
case et: ExternalTarget => et.url
}
fmt.externalGraphic(e, uri, None, None) // ignore intrinsic size and rely on styles for sizing
case icon: Icon => renderIcon(icon)
case e: Leader => fmt.textElement("fo:leader", e, "", "leader-pattern"->"dots", "padding-left" -> "2mm", "padding-right" -> "2mm")
case PageNumberCitation(target,_) => s"""<fo:page-number-citation ref-id="${fmt.buildId(target.relativeTo(fmt.path).absolutePath)}" />"""
case LineBreak(_) => "
"
case TemplateElement(elem,indent,_) => fmt.withMinIndentation(indent)(_.child(elem))
case WithFallback(fallback) => fmt.child(fallback)
case _ => ""
}
def addRowStyles (rows: Seq[Row]): Seq[Row] = rows.zipWithIndex.map {
case (row, index) => row.withStyle(if (index % 2 == 0) "cell-odd" else "cell-even") // switch to 1-base
}
def renderTableElement (elem: TableElement): String = elem match {
case e @ TableHead(rows,_) => fmt.indentedElement("fo:table-header", e, rows)
case e @ TableBody(rows,_) => fmt.indentedElement("fo:table-body", e, addRowStyles(rows))
case Caption(_,_) => "" // replaced by Table renderer
case Columns(_,_) => "" // replaced by Table renderer
case e: Column => fmt.emptyElement("fo:table-column", e)
case e @ Row(cells,_) => fmt.indentedElement("fo:table-row", e, cells)
case e @ Cell(_, content, colspan, rowspan, _) =>
fmt.indentedElement("fo:table-cell", e, content, fmt.optAttributes(
"number-columns-spanned" -> noneIfDefault(colspan,1),
"number-rows-spanned" -> noneIfDefault(rowspan,1)):_*
)
}
def renderNavigationItem (elem: NavigationItem): String = {
val keepWithNext = Styles("keepWithNext")
val keepWithPrev = Styles("keepWithPrevious")
def avoidOrphan (content: Seq[NavigationItem]): Seq[NavigationItem] = content match {
case init :+ last if last.content.isEmpty => init :+ last.mergeOptions(keepWithPrev)
case empty => empty
}
elem match {
case l: NavigationItem if l.hasStyle("bookmark") => fmt.bookmark(l)
case NavigationItem(title, content, Some(NavigationLink(target: InternalTarget,_,_)), _, opt) =>
val link = SpanLink(
content = title.content :+ Leader() :+ PageNumberCitation(target),
target = target
)
val keep = if (content.isEmpty) NoOpt else keepWithNext
fmt.childPerLine(Paragraph(Seq(link), Style.nav + keep + opt) +: avoidOrphan(content))
case NavigationItem(title, content, None, _, opt) =>
fmt.childPerLine(Paragraph(title.content, Style.nav + keepWithNext + opt) +: avoidOrphan(content))
case _ => ""
}
}
def renderUnresolvedReference (ref: Reference): String = {
fmt.child(InvalidSpan(s"unresolved reference: $ref", ref.source))
}
def renderInvalidElement (elem: Invalid): String = elem match {
case InvalidBlock(msg, _, fallback, opt) =>
fmt.forMessage(msg)(fmt.child(Paragraph(List(msg), opt))) + fmt.child(fallback)
case e =>
fmt.forMessage(e.message)(fmt.child(e.message) + " ") + fmt.child(e.fallback)
}
def renderRuntimeMessage (message: RuntimeMessage): String = {
fmt.forMessage(message) {
fmt.text(message.withStyle(message.level.toString.toLowerCase), message.content)
}
}
def renderPreamble (p: Preamble): String = {
s"""
|
|<fo:block id="${fmt.buildId(fmt.path)}" page-break-before="always">
| <fo:marker marker-class-name="chapter"><fo:block>${fmt.text(p.title)}</fo:block></fo:marker>
|</fo:block>""".stripMargin
}
element match {
case e: RuntimeMessage => renderRuntimeMessage(e)
case e: Table => renderTable(e)
case e: TableElement => renderTableElement(e)
case e: NavigationItem => renderNavigationItem(e)
case e: Reference => renderUnresolvedReference(e)
case e: Invalid => renderInvalidElement(e)
case e: BlockContainer => renderBlockContainer(e)
case e: SpanContainer => renderSpanContainer(e)
case e: ListContainer => renderListContainer(e)
case e: TextContainer => renderTextContainer(e)
case e: TemplateSpanContainer => renderTemplateSpanContainer(e)
case e: Block => renderSimpleBlock(e)
case e: Span => renderSimpleSpan(e)
case _ => ""
}
}
}
| planet42/Laika | core/shared/src/main/scala/laika/render/FORenderer.scala | Scala | apache-2.0 | 16,246 |
package com.twitter.finagle
import com.twitter.finagle.server.ServerRegistry
import com.twitter.finagle.util.InetSocketAddressUtil
import com.twitter.util._
import java.net.{InetSocketAddress, SocketAddress}
/**
* Trait ListeningServer represents a bound and listening
* server. Closing a server instance unbinds the port and
* relinquishes resources that are associated with the server.
*/
trait ListeningServer
extends Closable
with Awaitable[Unit]
{
/**
* The address to which this server is bound.
*/
def boundAddress: SocketAddress
protected[finagle] lazy val set = Var.value(Set(boundAddress))
protected def closeServer(deadline: Time): Future[Unit]
private[this] var isClosed = false
private[this] var announcements = List.empty[Future[Announcement]]
/**
* Announce the given address and return a future to the announcement
*/
def announce(addr: String): Future[Announcement] = synchronized {
val public = InetSocketAddressUtil.toPublic(boundAddress).asInstanceOf[InetSocketAddress]
if (isClosed)
Future.exception(new Exception("Cannot announce on a closed server"))
else {
val ann = Announcer.announce(public, addr)
announcements ::= ann
ann
}
}
final def close(deadline: Time): Future[Unit] = synchronized {
isClosed = true
val collected = Future.collect(announcements)
Future.join(Seq(
collected.flatMap { list =>
Closable.all(list:_*).close(deadline)
},
// StackServer assumes that closeServer is called synchronously, so we must be
// careful that it doesn't get scheduled for later.
closeServer(deadline)
))
}
}
/**
* An empty ListeningServer that can be used as a placeholder. For
* example:
*
* {{{
* @volatile var server = NullServer
* def main() { server = Http.serve(...) }
* def exit() { server.close() }
* }}}
*/
object NullServer extends ListeningServer with CloseAwaitably {
def closeServer(deadline: Time) = closeAwaitably { Future.Done }
val boundAddress = new InetSocketAddress(0)
}
/**
* Servers implement RPC servers with `Req`-typed requests and
* `Rep`-typed responses. Servers dispatch requests to a
* [[com.twitter.finagle.Service]] or
* [[com.twitter.finagle.ServiceFactory]] provided through `serve`.
*
* Servers are implemented by the various protocol packages in finagle,
* for example [[com.twitter.finagle.Http]]:
*
* {{{
* object Http extends Server[HttpRequest, HttpResponse] ...
*
* val server = Http.serve(":*", new Service[HttpRequest, HttpResponse] {
* def apply(req: HttpRequest): Future[HttpResponse] = ...
* })
* }}}
*
* Will bind to an ephemeral port (":*") and dispatch request to
* `server.boundAddress` to the provided
* [[com.twitter.finagle.Service]] instance.
*
* The `serve` method has two variants: one for instances of
* `Service`, and another for `ServiceFactory`. The `ServiceFactory`
* variants are used for protocols in which connection state is
* significant: a new `Service` is requested from the
* `ServiceFactory` for each new connection, and requests on that
* connection are dispatched to the supplied service. The service is
* also closed when the client disconnects or the connection is
* otherwise terminated.
*
* @define addr
*
* Serve `service` at `addr`
*
* @define serveAndAnnounce
*
* Serve `service` at `addr` and announce with `name`. Announcements will be removed
* when the service is closed. Omitting the `addr` will bind to an ephemeral port.
*/
trait Server[Req, Rep] {
/** $addr */
def serve(addr: SocketAddress, service: ServiceFactory[Req, Rep]): ListeningServer
/** $addr */
def serve(addr: SocketAddress, service: Service[Req, Rep]): ListeningServer =
serve(addr, ServiceFactory.const(service))
/** $addr */
def serve(addr: String, service: ServiceFactory[Req, Rep]): ListeningServer =
serve(ServerRegistry.register(addr), service)
/** $addr */
def serve(addr: String, service: Service[Req, Rep]): ListeningServer =
serve(addr, ServiceFactory.const(service))
/** $serveAndAnnounce */
def serveAndAnnounce(
name: String,
addr: SocketAddress,
service: ServiceFactory[Req, Rep]
): ListeningServer = {
val server = serve(addr, service)
server.announce(name)
server
}
/** $serveAndAnnounce */
def serveAndAnnounce(
name: String,
addr: SocketAddress,
service: Service[Req, Rep]
): ListeningServer =
serveAndAnnounce(name, addr, ServiceFactory.const(service))
/** $serveAndAnnounce */
def serveAndAnnounce(
name: String,
addr: String,
service: ServiceFactory[Req, Rep]
): ListeningServer = {
val server = serve(addr, service)
server.announce(name)
server
}
/** $serveAndAnnounce */
def serveAndAnnounce(name: String, addr: String, service: Service[Req, Rep]): ListeningServer =
serveAndAnnounce(name, addr, ServiceFactory.const(service))
/** $serveAndAnnounce */
def serveAndAnnounce(name: String, service: ServiceFactory[Req, Rep]): ListeningServer =
serveAndAnnounce(name, ":*", service)
/** $serveAndAnnounce */
def serveAndAnnounce(name: String, service: Service[Req, Rep]): ListeningServer =
serveAndAnnounce(name, ServiceFactory.const(service))
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/Server.scala | Scala | apache-2.0 | 5,279 |
package com.simple.simplespec.matchers
import org.hamcrest.{Description, BaseMatcher}
class ApproximateNumericMatcher[A](expected: A,
delta: A,
num: Numeric[A]) extends BaseMatcher[A] {
def describeTo(description: Description) {
description.appendValue(expected).appendText(" (+/- ").appendValue(delta).appendText(")")
}
def matches(item: AnyRef) = if (item.getClass.isAssignableFrom(expected.asInstanceOf[AnyRef].getClass)) {
val actual = item.asInstanceOf[A]
val lowerBound = num.minus(expected, delta)
val upperBound = num.plus(expected, delta)
!(num.lt(actual, lowerBound) || num.gt(actual, upperBound))
} else false
}
| SimpleFinance/simplespec | src/main/scala/com/simple/simplespec/matchers/ApproximateNumericMatcher.scala | Scala | mit | 724 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules.bitmap
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.sizes._
import edu.latrobe.io.image._
final class CropCenterSquare(override val builder: CropCenterSquareBuilder,
override val inputHints: BuildHints,
override val seed: InstanceSeed,
override val weightBufferBuilder: ValueTensorBufferBuilder)
extends BitmapLayer[CropCenterSquareBuilder] {
override protected def doPredict(input: BitmapTensor)
: BitmapTensor = {
val out = input.mapBitmaps(
_.cropCenterSquare()
)
BitmapTensor(out)
}
}
final class CropCenterSquareBuilder
extends BitmapLayerBuilder[CropCenterSquareBuilder] {
override def repr
: CropCenterSquareBuilder = this
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[CropCenterSquareBuilder]
override protected def doCopy()
: CropCenterSquareBuilder = CropCenterSquareBuilder()
override def outputSizeFor(sizeHint: Size): Size2 = sizeHint match {
case sizeHint: Size2 =>
val tmp = Math.min(sizeHint.dims._1, sizeHint.dims._2)
Size2(tmp, tmp, sizeHint.noChannels)
case _ =>
Size2(sizeHint.noTuples, 1, sizeHint.noChannels)
}
override def build(hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: CropCenterSquare = new CropCenterSquare(this, hints, seed, weightsBuilder)
}
object CropCenterSquareBuilder {
final def apply()
: CropCenterSquareBuilder = new CropCenterSquareBuilder
} | bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/bitmap/CropCenterSquare.scala | Scala | apache-2.0 | 2,335 |
import sbt._
import Keys._
import play.Project._
import cloudbees.Plugin._
object ApplicationBuild extends Build {
val appName = "jphm1"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
jdbc,
anorm,
"mysql" % "mysql-connector-java" % "5.1.18",
"se.radley" %% "play-plugins-enumeration" % "1.1.0"
)
val main = play.Project(appName, appVersion, appDependencies)
// Add your own project settings here
.settings(cloudBeesSettings :_*)
.settings(CloudBees.applicationId := Some("maji1"))
}
| smk-wgen/maji3 | project/Build.scala | Scala | gpl-2.0 | 596 |
/*
* Test case for SI-4835. This tests confirm that the fix
* doesn't break laziness. To test memory consumption,
* I need to confirm that OutOfMemoryError doesn't occur.
* I could create such tests. However, such tests consume
* too much time and memory.
*/
object Test {
private final val INFINITE = -1
def testStreamIterator(num: Int, stream: Stream[Int]): Unit = {
val iter = stream.iterator
print(num)
// if num == -1, then steram is infinite sequence
if (num == INFINITE) {
for(i <- 0 until 10) {
print(" " + iter.next())
}
} else {
while(iter.hasNext) {
print(" " + iter.next())
}
}
println()
}
def main(args: Array[String]): Unit = {
import Stream.{from, cons, empty}
testStreamIterator(INFINITE, from(0))
testStreamIterator(INFINITE, from(0).filter(_ % 2 == 1))
testStreamIterator(1, Stream(1))
testStreamIterator(2, Stream(1, 2))
//Stream with side effect
testStreamIterator(2, cons(1, cons({ print(" A"); 2}, empty)))
testStreamIterator(3, Stream(1, 2, 3))
//Stream with side effect
testStreamIterator(3, cons(1, cons({ print(" A"); 2}, cons({ print(" B"); 3}, Stream.empty))))
}
}
| yusuke2255/dotty | tests/run/t4835.scala | Scala | bsd-3-clause | 1,221 |
package com.atomist.param
case class SimpleParameterValues(parameterValues: Seq[ParameterValue])
extends ParameterValues
object SimpleParameterValues {
def apply(pvs: ParameterValue*): ParameterValues =
SimpleParameterValues(pvs)
def apply(m: Map[String, Object]): ParameterValues = fromMap(m)
def apply(k: String, o: Object) : ParameterValues = fromMap(Map(k -> o))
def fromMap(m: Map[String, Object]): ParameterValues =
SimpleParameterValues((m map {
case (k, v) => SimpleParameterValue(k, v)
}).toSeq)
val Empty = new SimpleParameterValues(Seq())
} | atomist/rug | src/main/scala/com/atomist/param/SimpleParameterValues.scala | Scala | gpl-3.0 | 588 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.