code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.antipathy.mvn_scalafmt.format
// $COVERAGE-OFF$
/** Base trait for formatting
* @tparam I Input type
* @tparam O Output type
*/
trait Formatter[I, O] {
/** Format the passed in input
* @param input The input to format
* @return Formatted output
*/
def format(input: I): O
}
// $COVERAGE-ON$
| SimonJPegg/mvn_scalafmt | src/main/scala/org/antipathy/mvn_scalafmt/format/Formatter.scala | Scala | apache-2.0 | 327 |
/*
* Copyright 2017 Sumo Logic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ws.epigraph.java.service.projections.op
import ws.epigraph.java.NewlineStringInterpolator.{NewlineHelper, i}
import ws.epigraph.java.ObjectGenContext
import ws.epigraph.java.ObjectGenUtils.{genFieldExpr, genLinkedMap, genList, genTypeExpr}
import ws.epigraph.java.service.ServiceObjectGenerators.gen
import ws.epigraph.projections.op.{OpFieldProjectionEntry, OpRecordModelProjection}
import ws.epigraph.types.{RecordTypeApi, TypeApi}
import scala.collection.JavaConversions._
/**
* @author <a href="mailto:[email protected]">Konstantin Sobolev</a>
*/
class OpRecordModelProjectionGen(p: OpRecordModelProjection)
extends OpProjectionGen[OpRecordModelProjection](p) {
override protected def generateNonVisitedObject(o: String, ctx: ObjectGenContext): String = {
// ctx.use(classOf[RecordType].getName)
val fpe = ctx.use(classOf[OpFieldProjectionEntry].getName)
/*@formatter:off*/sn"""\\
new $o(
${genTypeExpr(p.`type`().asInstanceOf[TypeApi], ctx.gctx)},
${p.flag().toString},
${i(gen(p.defaultValue(), ctx))},
${i(gen(p.params(), ctx))},
${i(gen(p.annotations(), ctx))},
${i(gen(p.metaProjection(), ctx))},
${i(genLinkedMap("java.lang.String", fpe.toString, p.fieldProjections().entrySet().toList.map{e =>
("\\"" + e.getKey + "\\"", genFieldProjectionEntry(p.`type`(), e.getValue, ctx))}, ctx))},
${i(if (p.polymorphicTails() == null) "null" else genList(p.polymorphicTails().map(gen(_, ctx)),ctx))},
${gen(p.location(), ctx)}
)"""/*@formatter:on*/
}
private def genFieldProjectionEntry(
t: RecordTypeApi,
fpe: OpFieldProjectionEntry,
ctx: ObjectGenContext): String = {
val fpes = ctx.use(classOf[OpFieldProjectionEntry].getName)
/*@formatter:off*/sn"""\\
new $fpes(
${genFieldExpr(t.asInstanceOf[TypeApi], fpe.field().name(), ctx.gctx)},
${i(gen(fpe.fieldProjection(), ctx))},
${gen(fpe.location(), ctx)}
)"""/*@formatter:on*/
}
}
| SumoLogic/epigraph | java/codegen/src/main/scala/ws/epigraph/java/service/projections/op/OpRecordModelProjectionGen.scala | Scala | apache-2.0 | 2,522 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette
/**
* A marker trait for authentication information.
*/
trait AuthInfo
| datalek/silhouette | silhouette/src/main/scala/silhouette/AuthInfo.scala | Scala | apache-2.0 | 858 |
package com.karasiq.shadowcloud.server.http
import akka.http.scaladsl.server._
trait SCAkkaHttpRoutes extends SCHttpServerSettings with SCAkkaHttpApiRoutes with SCAkkaHttpFileRoutes with SCWebSocketRoutes { self: Directives ⇒
def scRoutes: Route = {
encodeResponse(scApiRoute) ~ scFileRoute ~ scWebSocketRoutes
}
}
| Karasiq/shadowcloud | server/api-routes/src/main/scala/com/karasiq/shadowcloud/server/http/SCAkkaHttpRoutes.scala | Scala | apache-2.0 | 327 |
package com.garnercorp.swdvm
import com.github.nscala_time.time.Imports._
import org.joda.time.DateTimeConstants._
trait Elapsed {
def time: Double
def completionFrom(start: DateTime) = start + (time*MINUTES_PER_DAY).toInt.minutes
}
object Elapsed {
private case class PlainElapsed(time: Double) extends Elapsed {
override def toString = time.toString
}
def apply(value: Double): Elapsed = PlainElapsed(value)
}
trait CompositeElapsed[E <: Elapsed] extends Elapsed {
def composition: Vector[E]
override def time = composition.map(_.time).sum
}
object CompositeElapsed {
def apply[E <: Elapsed](values: E*): CompositeElapsed[E] = new CompositeElapsed[E] {
override def composition: Vector[E] = Vector(values: _*)
}
} | thinkingbox/velmod | src/main/scala/com/garnercorp/swdvm/Elapsed.scala | Scala | apache-2.0 | 747 |
package com.github.xiaodongw.swagger.finatra
import java.util.Date
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.finatra.http.Controller
import com.twitter.util.Future
import org.joda.time.{DateTime, LocalDate}
class SampleFilter extends SimpleFilter[Request, Response] {
override def apply(request: Request, service: Service[Request, Response]): Future[Response] = {
service(request)
}
}
class SampleController extends Controller with SwaggerSupport {
override implicit protected val swagger = SampleSwagger
case class HelloResponse(text: String, time: Date)
getWithDoc("/students/:id") { o =>
o.summary("Read student information")
.description("Read the detail information about the student.")
.tag("Student")
.routeParam[String]("id", "the student id")
.produces("application/json")
.responseWith[Student](200, "the student object",
example = Some(Student("Tom", "Wang", Gender.Male, new LocalDate(), 4, Some(Address("California Street", "94111")))))
.responseWith[Unit](404, "the student is not found")
} { request: Request =>
val id = request.getParam("id")
response.ok.json(Student("Alice", "Wang", Gender.Female, new LocalDate(), 4, Some(Address("California Street", "94111")))).toFuture
}
postWithDoc("/students/:id") { o =>
o.summary("Sample request with route")
.description("Read the detail information about the student.")
.tag("Student")
.request[StudentWithRoute]
} { request: StudentWithRoute =>
val id = request.id
response.ok.json(Student("Alice", "Wang", Gender.Female, new LocalDate(), 4, Some(Address("California Street", "94111")))).toFuture
}
postWithDoc("/students/test/:id") { o =>
o.summary("Sample request with route2")
.description("Read the detail information about the student.")
.tag("Student")
.request[StudentWithRoute]
} { request: StudentWithRoute =>
val id = request.id
response.ok.json(Student("Alice", "Wang", Gender.Female, new LocalDate(), 4, Some(Address("California Street", "94111")))).toFuture
}
postWithDoc("/students/firstName") {
_.request[StringWithRequest]
.tag("Student")
} { request: StringWithRequest =>
request.firstName
}
postWithDoc("/students") { o =>
o.summary("Create a new student")
.tag("Student")
.bodyParam[Student]("student", "the student details")
.responseWith[Unit](200, "the student is created")
.responseWith[Unit](500, "internal error")
} { student: Student =>
//val student = request.contentString
response.ok.json(student).toFuture
}
postWithDoc("/students/bulk") { o =>
o.summary("Create a list of students")
.tag("Student")
.bodyParam[Array[Student]]("students", "the list of students")
.responseWith[Unit](200, "the students are created")
.responseWith[Unit](500, "internal error")
} { students: List[Student] =>
response.ok.json(students).toFuture
}
putWithDoc("/students/:id") { o =>
o.summary("Update the student")
.tag("Student")
.formParam[String]("name", "the student name")
.formParam[Int]("grade", "the student grade")
.routeParam[String]("id", "student ID")
.cookieParam[String]("who", "who make the update")
.headerParam[String]("token", "the token")
.responseWith[Unit](200, "the student is updated")
.responseWith[Unit](404, "the student is not found")
} { request: Request =>
val id = request.getParam("id")
val name = request.getParam("name")
val grade = request.getIntParam("grade")
val who = request.cookies.getOrElse("who", "Sam") //todo swagger-ui not set the cookie?
val token = request.headerMap("token")
response.ok.toFuture
}
getWithDoc("/students") { o =>
o.summary("Get a list of students")
.tag("Student")
.responseWith[Array[String]](200, "the student ids")
.responseWith[Unit](500, "internal error")
.addSecurity("sampleBasic", List())
} { request: Request =>
response.ok.json(Array("student1", "student2")).toFuture
}
getWithDoc("/courses") { o =>
o.summary("Get a list of courses")
.tag("Course")
.responseWith[Array[String]](200, "the courses ids")
.responseWith[Unit](500, "internal error")
} { request: Request =>
response.ok.json(Array("course1", "course2")).toFuture
}
getWithDoc("/courses/:id") { o =>
o.summary("Get the detail of a course")
.tag("Course")
.routeParam[String]("id", "the course id")
.responseWith[Course](200, "the courses detail")
.responseWith[Unit](500, "internal error")
} { request: Request =>
response.ok.json(Course(new DateTime(), "calculation", Seq("math"), CourseType.LAB, 20, BigDecimal(300.54))).toFuture
}
filter[SampleFilter].getWithDoc("/courses/:courseId/student/:studentId") { o =>
o.summary("Is the student in this course")
.tags(List("Course", "Student"))
.routeParam[String]("courseId", "the course id")
.routeParam[String]("studentId", "the student id")
.responseWith[Boolean](200, "true / false")
.responseWith[Unit](500, "internal error")
.deprecated(true)
} { request: Request =>
response.ok.json(true).toFuture
}
}
| xiaodongw/swagger-finatra | src/test/scala/com/github/xiaodongw/swagger/finatra/SampleController.scala | Scala | apache-2.0 | 5,347 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.core.searches
import java.time.Instant
import java.util.{Date, UUID}
import com.fasterxml.jackson.annotation.JsonSubTypes.Type
import com.fasterxml.jackson.annotation.JsonTypeInfo.Id
import com.fasterxml.jackson.annotation.{JsonIgnoreProperties, JsonSubTypes, JsonTypeInfo}
import com.tle.core.validation.OEQEntityEdits
import io.circe.{Decoder, Encoder}
@JsonTypeInfo(use = Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type")
@JsonSubTypes(
Array(
new Type(value = classOf[SortControl], name = "sort"),
new Type(value = classOf[OwnerControl], name = "owner"),
new Type(value = classOf[ModifiedWithinControl], name = "modifiedWithin"),
new Type(value = classOf[FacetControl], name = "facet"),
new Type(value = classOf[CollectionsControl], name = "collections")
)
)
sealed trait SearchControl
case class SortControl(default: String, editable: Boolean) extends SearchControl
case class OwnerControl(default: Option[String], editable: Boolean) extends SearchControl
case class ModifiedWithinControl(default: Double, editable: Boolean) extends SearchControl
case class FacetControl(title: String, node: String) extends SearchControl
case class CollectionsControl(collections: Option[Iterable[UUID]], editable: Boolean)
extends SearchControl
object SearchControl {
import io.circe.generic.extras.Configuration
import io.circe.generic.extras.semiauto._
implicit val customConfig: Configuration = Configuration.default
.withDiscriminator("type")
.copy(transformConstructorNames = {
case "SortControl" => "sort"
case "OwnerControl" => "owner"
case "ModifiedWithinControl" => "modifiedWithin"
case "FacetControl" => "facet"
case "CollectionsControl" => "collections"
})
implicit val sctrlEncoder: Encoder[SearchControl] = deriveEncoder
implicit val sctrlDecoder: Decoder[SearchControl] = deriveDecoder
}
case class SearchConfig(
id: UUID,
index: String,
name: String,
nameStrings: Option[Map[String, String]],
description: Option[String],
descriptionStrings: Option[Map[String, String]],
created: Date,
modified: Date,
sections: Map[String, Iterable[SearchControl]]
)
@JsonIgnoreProperties(ignoreUnknown = true)
case class SearchConfigEdit(
id: Option[UUID],
index: String,
name: String,
nameStrings: Option[Map[String, String]],
description: Option[String],
descriptionStrings: Option[Map[String, String]],
sections: Map[String, Iterable[SearchControl]]
) extends OEQEntityEdits
case class SearchPageConfig(configId: UUID)
object SearchPageConfig {
import io.circe.generic.semiauto._
implicit val spcEncoder: Encoder[SearchPageConfig] = deriveEncoder
implicit val spcDecoder: Decoder[SearchPageConfig] = deriveDecoder
}
| equella/Equella | Source/Plugins/Core/com.equella.core/scalasrc/com/tle/core/searches/SearchConfig.scala | Scala | apache-2.0 | 3,666 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification.impl
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.util.Loader
import org.apache.spark.sql.{Row, SQLContext}
/**
* Helper class for import/export of GLM classification models.
*/
private[classification] object GLMClassificationModel {
object SaveLoadV1_0 {
def thisFormatVersion: String = "1.0"
/** Model data for import/export */
case class Data(weights: Vector, intercept: Double, threshold: Option[Double])
/**
* Helper method for saving GLM classification model metadata and data.
* @param modelClass String name for model class, to be saved with metadata
* @param numClasses Number of classes label can take, to be saved with metadata
*/
def save(
sc: SparkContext,
path: String,
modelClass: String,
numFeatures: Int,
numClasses: Int,
weights: Vector,
intercept: Double,
threshold: Option[Double]): Unit = {
val sqlContext = SQLContext.getOrCreate(sc)
import sqlContext.implicits._
// Create JSON metadata.
val metadata = compact(render(
("class" -> modelClass) ~ ("version" -> thisFormatVersion) ~
("numFeatures" -> numFeatures) ~ ("numClasses" -> numClasses)))
sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path))
// Create Parquet data.
val data = Data(weights, intercept, threshold)
sc.parallelize(Seq(data), 1).toDF().write.parquet(Loader.dataPath(path))
}
/**
* Helper method for loading GLM classification model data.
*
* NOTE: Callers of this method should check numClasses, numFeatures on their own.
*
* @param modelClass String name for model class (used for error messages)
*/
def loadData(sc: SparkContext, path: String, modelClass: String): Data = {
val datapath = Loader.dataPath(path)
val sqlContext = SQLContext.getOrCreate(sc)
val dataRDD = sqlContext.read.parquet(datapath)
val dataArray = dataRDD.select("weights", "intercept", "threshold").take(1)
assert(dataArray.length == 1, s"Unable to load $modelClass data from: $datapath")
val data = dataArray(0)
assert(data.size == 3, s"Unable to load $modelClass data from: $datapath")
val (weights, intercept) = data match {
case Row(weights: Vector, intercept: Double, _) =>
(weights, intercept)
}
val threshold = if (data.isNullAt(2)) {
None
} else {
Some(data.getDouble(2))
}
Data(weights, intercept, threshold)
}
}
}
| xieguobin/Spark_2.0.0_cn1 | mllib/classification/impl/GLMClassificationModel.scala | Scala | apache-2.0 | 3,539 |
package functionalProgramming.memoization
import org.scalatest.{FunSuite, WordSpecLike}
import DicPath._
/**
* Created by yujieshui on 2017/2/17.
*/
class DicPathTest extends WordSpecLike {
"solution" must {
"case 1,2" in {
assert(solution(1, 2) == 4)
assert(solution(2, 1) == 6)
}
"case 2,2" in {
assert(solution(2, 2) == 9)
}
"case 3 3" in {
val result = solution(3, 3)
println(111)
assert(result == 19)
}
"case 2 2" in {
val result = solution(2, 2)
assert(result == 9)
}
"case 6 6" in {
val result = solution(6, 6)
println(result)
}
"case 10,10" in {
val result = solution(10, 10)
println(result)
}
"case 18,18" in {
val result = solution(18, 18)
println(result)
}
}
}
| 1178615156/hackerrank | src/test/scala/functionalProgramming/memoization/DicPathTest.scala | Scala | apache-2.0 | 823 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
import scala.annotation.tailrec
import ScalaOps._
import java.lang.{reflect => jlr}
abstract class AbstractCollection[E] protected () extends Collection[E] {
def iterator(): Iterator[E]
def size(): Int
def isEmpty(): Boolean = size == 0
def contains(o: Any): Boolean =
this.scalaOps.exists(o === _)
def toArray(): Array[AnyRef] =
toArray(new Array[AnyRef](size))
def toArray[T <: AnyRef](a: Array[T]): Array[T] = {
val toFill: Array[T] =
if (a.size >= size) a
else jlr.Array.newInstance(a.getClass.getComponentType, size).asInstanceOf[Array[T]]
val iter = iterator
for (i <- 0 until size)
toFill(i) = iter.next().asInstanceOf[T]
if (toFill.size > size)
toFill(size) = null.asInstanceOf[T]
toFill
}
def add(e: E): Boolean =
throw new UnsupportedOperationException()
def remove(o: Any): Boolean = {
@tailrec
def findAndRemove(iter: Iterator[E]): Boolean = {
if (iter.hasNext) {
if (iter.next() === o) {
iter.remove()
true
} else
findAndRemove(iter)
} else
false
}
findAndRemove(iterator())
}
def containsAll(c: Collection[_]): Boolean =
c.scalaOps.forall(this.contains(_))
def addAll(c: Collection[_ <: E]): Boolean =
c.scalaOps.foldLeft(false)((prev, elem) => add(elem) || prev)
def removeAll(c: Collection[_]): Boolean =
removeWhere(c.contains(_))
def retainAll(c: Collection[_]): Boolean =
removeWhere(!c.contains(_))
def clear(): Unit =
removeWhere(_ => true)
private def removeWhere(p: Any => Boolean): Boolean = {
val iter = iterator()
var changed = false
while (iter.hasNext) {
if (p(iter.next())) {
iter.remove()
changed = true
}
}
changed
}
override def toString(): String =
this.scalaOps.mkString("[", ",", "]")
}
| SebsLittleHelpers/scala-js | javalib/src/main/scala/java/util/AbstractCollection.scala | Scala | apache-2.0 | 2,178 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.common
import org.I0Itec.zkclient.ZkClient
import kafka.utils.{ZKStringSerializer, ZKConfig}
import java.util.concurrent.atomic.AtomicReference
object KafkaZookeeperClient {
private val INSTANCE = new AtomicReference[ZkClient](null)
def getZookeeperClient(config: ZKConfig): ZkClient = {
// TODO: This cannot be a singleton since unit tests break if we do that
// INSTANCE.compareAndSet(null, new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs,
// ZKStringSerializer))
INSTANCE.set(new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs,
ZKStringSerializer))
INSTANCE.get()
}
}
| kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/common/KafkaZookeperClient.scala | Scala | apache-2.0 | 1,577 |
package org.eichelberger.sfc.utils
object BitManipulations {
// if the first on-bit is at position P, then this routine returns a
// mask in which all of the bits from 0..P are turned on, and all of
// the bits from P+1..63 are off
def usedMask(x: Long): Long = {
var y = x | (x >> 1L)
y = y | (y >> 2L)
y = y | (y >> 4L)
y = y | (y >> 8L)
y = y | (y >> 16L)
y | (y >> 32L)
}
def sharedBitPrefix(a: Long, b: Long): Long =
a & ~usedMask(a ^ b)
def commonBlockMin(a: Long, b: Long): Long =
a & ~usedMask(a ^ b)
def commonBlockMax(a: Long, b: Long): Long = {
val mask = usedMask(a ^ b)
(a & ~mask) | mask
}
}
| cne1x/sfseize | src/main/scala/org/eichelberger/sfc/utils/BitManipulations.scala | Scala | apache-2.0 | 669 |
package lila.round
import akka.actor._
import chess.Color
import lila.game.{ Game, GameRepo, Pov }
private[round] final class CheatDetector(reporter: ActorSelection) {
private val createReport = false
def apply(game: Game): Fu[Option[Color]] = interesting(game) ?? {
GameRepo findMirror game map {
_ ?? { mirror =>
mirror.players map (p => p -> p.userId) collectFirst {
case (player, Some(userId)) => game.players find (_.userId == player.userId) map { cheater =>
lila.log("cheat").info(s"${cheater.color} ($userId) @ ${game.id} uses ${mirror.id}")
if (createReport) reporter ! lila.hub.actorApi.report.Cheater(userId,
s"Cheat detected on ${gameUrl(game.id)}, using lichess AI: ${gameUrl(mirror.id)}")
cheater.color
}
} flatten
}
}
}
private def gameUrl(gameId: String) = s"https://lichess.org/${gameId}"
private val TURNS_MODULUS = 10
private def interesting(game: Game) =
game.rated && game.turns > 0 && (game.turns % TURNS_MODULUS == 0)
}
| clarkerubber/lila | modules/round/src/main/CheatDetector.scala | Scala | agpl-3.0 | 1,072 |
package org.bitcoins.spvnode.serializers.messages.data
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.serializers.RawBitcoinSerializer
import org.bitcoins.spvnode.messages.TypeIdentifier
import org.bitcoins.spvnode.messages.data.Inventory
/**
* Created by chris on 6/1/16.
* Serializes/deserializes a inventory
* https://bitcoin.org/en/developer-reference#term-inventory
*/
trait RawInventorySerializer extends RawBitcoinSerializer[Inventory] {
def read(bytes : List[Byte]) : Inventory = {
val typeIdentifier = TypeIdentifier(bytes.take(4))
val hash = DoubleSha256Digest(bytes.slice(4,bytes.size))
Inventory(typeIdentifier,hash)
}
def write(inventory : Inventory) : String = {
inventory.typeIdentifier.hex + inventory.hash.hex
}
}
object RawInventorySerializer extends RawInventorySerializer
| bitcoin-s/bitcoin-s-spv-node | src/main/scala/org/bitcoins/spvnode/serializers/messages/data/RawInventorySerializer.scala | Scala | mit | 858 |
/** Copyright 2016 - 2021 Martin Mauch (@nightscape)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.crealytics.spark.v2.excel
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.spark.sql.connector.write.LogicalWriteInfo
import org.apache.spark.sql.execution.datasources.OutputWriter
import org.apache.spark.sql.execution.datasources.OutputWriterFactory
import org.apache.spark.sql.execution.datasources.v2.FileWriteBuilder
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.types.StructType
class ExcelWriteBuilder(
paths: Seq[String],
formatName: String,
supportsDataType: DataType => Boolean,
info: LogicalWriteInfo
) extends FileWriteBuilder(paths, formatName, supportsDataType, info) {
override def prepareWrite(
sqlConf: SQLConf,
job: Job,
options: Map[String, String],
dataSchema: StructType
): OutputWriterFactory = {
val excelOptions = new ExcelOptions(options, sqlConf.sessionLocalTimeZone)
new OutputWriterFactory {
override def newInstance(path: String, dataSchema: StructType, context: TaskAttemptContext): OutputWriter = {
new ExcelOutputWriter(path, dataSchema, context, excelOptions)
}
override def getFileExtension(context: TaskAttemptContext): String =
s".${excelOptions.fileExtension}"
}
}
}
| crealytics/spark-excel | src/main/3.0_3.1/scala/com/crealytics/spark/v2/excel/ExcelWriteBuilder.scala | Scala | apache-2.0 | 1,940 |
package com.gshakhn.privatewiki.client.components
import com.gshakhn.privatewiki.client.UnlockedBinder
import com.gshakhn.privatewiki.client.components.testutil.PageInteractions._
import com.gshakhn.privatewiki.client.components.testutil.PrivateWikiBaseSpec
import com.gshakhn.privatewiki.shared.{BinderLoaded, NoEncryption, Paper}
import org.scalajs.jquery._
import org.scalatest.path
import upickle.default._
class UnlockingBinderSpec extends PrivateWikiBaseSpec {
override def newInstance: path.FunSpecLike = new UnlockingBinderSpec()
describe("A PrivateWiki") {
implicit val client = new TestClient
render
describe("after a binder with NoEncryption is loaded") {
enterBinderName("binder")
enterBinderPassword("secure")
val papers = Set(Paper("paper", ""))
client.response = BinderLoaded("binder", NoEncryption, write(papers))
clickLoadBinder()
describe("clicking the binder") {
clickUnlockBinder("binder")
it("marks binder as unlocked") {
val li = jQuery(".binder-list-item")
val span = li.find("span")
li should haveClass("unlocked-binder")
span shouldNot haveClass("glyphicon")
span shouldNot haveClass("glyphicon-lock")
}
it("adds the binder to the paper picker") {
val paperPickerButtons = jQuery(".paper-picker-btn")
paperPickerButtons.length shouldBe 2 // 'All' and the binder
}
it("creates an unlocked binder with papers") {
val unlockedBinder = rootComponent.state.loadedBinders.collect { case b: UnlockedBinder => b}.head
unlockedBinder.papers shouldBe papers
}
}
}
}
tearDown()
}
| gshakhn/private-wiki | client/src/test/scala/com/gshakhn/privatewiki/client/components/UnlockingBinderSpec.scala | Scala | apache-2.0 | 1,716 |
package com.softwaremill.session
import org.scalacheck.{Gen, Prop, Properties}
object SessionManagerBasicEncoderTest extends Properties("SessionManagerBasicEncoder") {
import Prop._
val secretGen = Gen.choose(64, 256).flatMap(size => Gen.listOfN(size, Gen.alphaNumChar).map(_.mkString))
property("encode+decode") = forAllNoShrink(secretGen) { (secret: String) =>
forAll { (encrypt: Boolean, useMaxAgeSeconds: Boolean, data: Map[String, String]) =>
val config = SessionConfig.default(secret)
.copy(sessionEncryptData = encrypt)
.copy(sessionMaxAgeSeconds = if (useMaxAgeSeconds) Some(3600L) else None)
val manager = new SessionManager[Map[String, String]](config).clientSessionManager
manager.decode(manager.encode(data)) == SessionResult.Decoded(data)
}
}
property("doesn't decode expired session") = forAllNoShrink(secretGen) { (secret: String) =>
forAll { (encrypt: Boolean, data: Map[String, String]) =>
val config = SessionConfig.default(secret)
.copy(sessionEncryptData = encrypt)
.copy(sessionMaxAgeSeconds = Some(20L)) // expires after 20s
val managerPast = new SessionManager[Map[String, String]](config) {
override def nowMillis = 8172L * 1000L
}.clientSessionManager
val managerFuture = new SessionManager[Map[String, String]](config) {
override def nowMillis = (8172L + 600L) * 1000L // 600s later
}.clientSessionManager
managerFuture.decode(managerPast.encode(data)) == SessionResult.Expired
}
}
}
| ilyai/akka-http-session | core/src/test/scala/com/softwaremill/session/SessionManagerBasicEncoderTest.scala | Scala | apache-2.0 | 1,548 |
package com.giyeok.jparser.metalang2
import com.giyeok.jparser.Symbols
sealed trait AstifierExpr {
def replaceThisNode(node: AstifierExpr): AstifierExpr
}
case object ThisNode extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): AstifierExpr = node
}
case class Unbind(expr: AstifierExpr, symbol: Symbols.Symbol) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): Unbind = Unbind(expr.replaceThisNode(node), symbol)
}
case class SeqRef(expr: AstifierExpr, idx: Int) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): SeqRef = SeqRef(expr.replaceThisNode(node), idx)
}
case class UnrollRepeat(lower: Int, source: AstifierExpr, eachAstifier: AstifierExpr) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): UnrollRepeat =
UnrollRepeat(lower: Int, source.replaceThisNode(node), eachAstifier)
}
case class UnrollOptional(source: AstifierExpr, contentAstifier: AstifierExpr, emptySym: Symbols.Symbol, contentSym: Symbols.Symbol) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): UnrollOptional =
UnrollOptional(source.replaceThisNode(node), contentAstifier, emptySym, contentSym)
}
case class EachMap(target: AstifierExpr, mapFn: AstifierExpr) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): EachMap =
EachMap(target.replaceThisNode(node), mapFn)
}
case class UnrollChoices(choiceSymbols: Map[Symbols.Symbol, AstifierExpr]) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): UnrollChoices =
UnrollChoices(choiceSymbols.view.mapValues(_.replaceThisNode(node)).toMap)
}
case class CreateObj(className: String, args: List[AstifierExpr]) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): CreateObj = CreateObj(className, args map (_.replaceThisNode(node)))
}
case class CreateList(elems: List[AstifierExpr]) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): CreateList = CreateList(elems map (_.replaceThisNode(node)))
}
case class ConcatList(lhs: AstifierExpr, rhs: AstifierExpr) extends AstifierExpr {
override def replaceThisNode(node: AstifierExpr): ConcatList = ConcatList(lhs.replaceThisNode(node), rhs.replaceThisNode(node))
}
case class AstifiedCtx(refs: List[Astified]) {
def :+(ref: Astified): AstifiedCtx = AstifiedCtx(refs :+ ref)
def replaceThisNode(node: AstifierExpr): AstifiedCtx = AstifiedCtx(refs map { r =>
Astified(r.symbol, r.astifierExpr.replaceThisNode(node), r.insideCtx)
})
}
// symbol이 ThisNode인 경우, 그 symbol을 astify하는 expression이 astifierExpr.
// BoundExpr에서 이 symbol을 지정하면 그 내부에서 사용 가능한 ref들이 insideCtx.
case class Astified(symbol: Symbols.Symbol, astifierExpr: AstifierExpr, insideCtx: Option[AstifiedCtx])
| Joonsoo/moon-parser | metalang/src/main/scala/com/giyeok/jparser/metalang2/AstifierExpr.scala | Scala | mit | 2,898 |
import sbt._, Keys._
object Generator {
private val generateCode = TaskKey[Unit]("generateCode")
private val generateFiles = SettingKey[Seq[GeneratedCode]]("generateFiles")
private val checkGenerateCode = TaskKey[Boolean]("checkGenerateCode")
private val checkGenerateCodeError = TaskKey[Unit]("checkGenerateCodeError")
private final case class GeneratedCode(file: File, code: String) {
def write(): Unit = IO.write(file, code)
def check: Boolean = {
if (file.isFile) {
IO.read(file) == code
} else {
println(red(file + " not found!"))
false
}
}
}
private def red(str: String) = {
("\\n" * 2) + scala.Console.RED + str + ("\\n " * 2) + scala.Console.RESET
}
val settings: Seq[Def.Setting[_]] = Seq(
generateFiles := {
val pack = "msgpack4z"
val dir = CustomCrossType.shared(baseDirectory.value, "main") / pack
val caseCodec = dir / "CaseCodec.scala"
val caseMapCodec = dir / "CaseMapCodec.scala"
val arrayCodec = dir / "AnyValArrayCodec.scala"
val tupleCodec = dir / "TupleCodec.scala"
val anyValCodec = dir / "AnyValCodec.scala"
List(
GeneratedCode(caseCodec, CaseCodec.generate(pack)),
GeneratedCode(caseMapCodec, CaseMapCodec.generate(pack)),
GeneratedCode(arrayCodec, ArrayCodec.generate(pack)),
GeneratedCode(tupleCodec, TupleCodec.generate(pack)),
GeneratedCode(anyValCodec, AnyValCodec.generate(pack))
)
},
generateCode := generateFiles.value.foreach(_.write),
checkGenerateCode := generateFiles.value.forall(_.check),
checkGenerateCodeError := {
generateCode.value
Thread.sleep(1000)
val diff = scala.sys.process.Process("git diff").!!
if (diff.nonEmpty) {
sys.error("Working directory is dirty!\\n" + diff)
}
},
shellPrompt := { state =>
val extracted = Project.extract(state)
if (extracted.runTask(checkGenerateCode, state)._2) {
shellPrompt.?.value.map(_.apply(state)).getOrElse("")
} else {
red("generator code changed. please execute " + generateCode.key.label)
}
}
)
}
| msgpack4z/msgpack4z-core | project/Generator.scala | Scala | mit | 2,166 |
package com.twitter.finagle.mux
import com.twitter.conversions.time._
import com.twitter.finagle.mux.transport.Message
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.{Failure, Status}
import com.twitter.util.{Duration, Future, Promise, Time}
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.concurrent.locks.ReentrantReadWriteLock
import java.util.logging.{Level, Logger}
/**
* A ClientSession implements the state machine for a mux client session. It's
* implemented as transport and, thus, can sit below a client dispatcher. It
* transitions between various states based on the messages it processes
* and can be in one of following states:
*
* `Dispatching`: The stable operating state of a Session. The `status` is
* [[com.twitter.finagle.Status.Open]] and calls to `write` are passed on
* to the transport below.
*
* `Draining`: When a session is `Draining` it has processed a `Tdrain`
* message, but still has outstanding requests. In this state, we have
* promised our peer not to send any more requests, thus the session's `status`
* is [[com.twitter.finagle.Status.Busy]] and calls to `write` are nacked.
*
* `Drained`: When a session is fully drained; that is, it has received a
* `Tdrain` and there are no more pending requests, the sessions's
* state is set to `Drained`. In this state, the session is useless.
* It is dead. Its `status` is set to [[com.twitter.finagle.Status.Closed]]
* and calls to `write` are nacked.
*
* `Leasing`: When the session has processed a lease, its state is
* set to `Leasing` which comprises the lease expiry time. This state
* is equivalent to `Dispatching` except if the lease has expired.
* At this time, the session's `status` is set to [[com.twitter.finagle.Status.Busy]].
*
* This can be composed below a `ClientDispatcher` to manages its session.
*
* @param trans The underlying transport.
*
* @param detectorConfig The config used to instantiate a failure detector over the
* session. The detector is given control over the session's ping mechanism and its
* status is reflected in the session's status.
*
* @param name The identifier for the session, used when logging.
*
* @param sr The [[com.twitter.finagle.StatsReceiver]] which the session uses to
* export internal stats.
*/
private[twitter] class ClientSession(
trans: Transport[Message, Message],
detectorConfig: FailureDetector.Config,
name: String,
sr: StatsReceiver)
extends Transport[Message, Message] {
import ClientSession._
// Maintain the sessions's state, whose access is mediated
// by the readLk and writeLk.
@volatile private[this] var state: State = Dispatching
private[this] val (readLk, writeLk) = {
val lk = new ReentrantReadWriteLock
(lk.readLock, lk.writeLock)
}
// keeps track of outstanding Rmessages.
private[this] val outstanding = new AtomicInteger()
private[this] val pingMessage = new Message.PreEncodedTping
private[this] val pingPromise = new AtomicReference[Promise[Unit]](null)
private[this] val log = Logger.getLogger(getClass.getName)
private[this] def safeLog(msg: String, level: Level = Level.INFO): Unit =
try log.log(level, msg) catch {
case _: Throwable =>
}
private[this] val leaseGauge = sr.addGauge("current_lease_ms") {
state match {
case l: Leasing => l.remaining.inMilliseconds
case _ => (Time.Top - Time.now).inMilliseconds
}
}
private[this] val leaseCounter = sr.counter("leased")
private[this] val drainingCounter = sr.counter("draining")
private[this] val drainedCounter = sr.counter("drained")
/**
* Processes mux control messages and transitions the state accordingly.
* The transitions are synchronized with and reflected in `status`.
*/
def processControlMsg(m: Message): Unit = m match {
case Message.Tdrain(tag) =>
if (log.isLoggable(Level.FINE))
safeLog(s"Started draining a connection to $name", Level.FINE)
drainingCounter.incr()
writeLk.lockInterruptibly()
try {
state = if (outstanding.get() > 0) Draining else {
if (log.isLoggable(Level.FINE))
safeLog(s"Finished draining a connection to $name", Level.FINE)
drainedCounter.incr()
Drained
}
trans.write(Message.Rdrain(tag))
} finally writeLk.unlock()
case Message.Tlease(Message.Tlease.MillisDuration, millis) =>
writeLk.lock()
try state match {
case Leasing(_) | Dispatching =>
state = Leasing(Time.now + millis.milliseconds)
if (log.isLoggable(Level.FINE))
safeLog(s"leased for ${millis.milliseconds} to $name", Level.FINE)
leaseCounter.incr()
case Draining | Drained =>
// Ignore the lease if we're closed, since these are anyway
// a irrecoverable states.
} finally writeLk.unlock()
case Message.Tping(tag) => trans.write(Message.Rping(tag))
case _ => // do nothing
}
private[this] def processRmsg(msg: Message): Unit = msg match {
case Message.Rping(Message.PingTag) =>
val p = pingPromise.getAndSet(null)
if (p != null) p.setDone()
case Message.Rerr(Message.PingTag, err) =>
val p = pingPromise.getAndSet(null)
if (p != null) p.setException(ServerError(err))
// Move the session to `Drained`, effectively closing the session,
// if we were `Draining` our session.
case Message.Rmessage(_) =>
readLk.lock()
if (outstanding.decrementAndGet() == 0 && state == Draining) {
readLk.unlock()
writeLk.lock()
try {
drainedCounter.incr()
if (log.isLoggable(Level.FINE))
safeLog(s"Finished draining a connection to $name", Level.FINE)
state = Drained
} finally writeLk.unlock()
} else {
readLk.unlock()
}
case _ => // do nothing.
}
private[this] val processTwriteFail: Throwable => Unit = { _ =>
outstanding.decrementAndGet()
}
private[this] def processAndWrite(msg: Message): Future[Unit] = msg match {
case _: Message.Treq | _: Message.Tdispatch =>
outstanding.incrementAndGet()
trans.write(msg).onFailure(processTwriteFail)
case _ => trans.write(msg)
}
private[this] def processRead(msg: Message) = msg match {
case [email protected](_) => processRmsg(m)
case [email protected](_) => processControlMsg(m)
case _ => // do nothing.
}
/**
* Write to the underlying transport if our state permits,
* otherwise return a nack.
*/
def write(msg: Message): Future[Unit] = {
readLk.lock()
try state match {
case Dispatching | Leasing(_) => processAndWrite(msg)
case Draining | Drained => FutureNackException
} finally readLk.unlock()
}
def read(): Future[Message] = trans.read().onSuccess(processRead)
/**
* Send a mux Tping to our peer. Note, only one outstanding ping is
* permitted, subsequent calls to ping are failed fast.
*/
def ping(): Future[Unit] = {
val done = new Promise[Unit]
if (pingPromise.compareAndSet(null, done)) {
trans.write(pingMessage).before(done)
} else {
FuturePingNack
}
}
private[this] val detector = FailureDetector(
detectorConfig, ping, sr.scope("failuredetector"))
def status: Status = Status.worst(detector.status, {
readLk.lock()
try state match {
case Draining => Status.Busy
case Drained => Status.Closed
case leased@Leasing(_) if leased.expired => Status.Busy
case Leasing(_) | Dispatching => Status.Open
} finally readLk.unlock()
})
val onClose = trans.onClose
def localAddress = trans.localAddress
def remoteAddress = trans.remoteAddress
def peerCertificate = trans.peerCertificate
def close(deadline: Time): Future[Unit] = {
leaseGauge.remove()
trans.close(deadline)
}
}
private object ClientSession {
val FutureNackException = Future.exception(
Failure.rejected("The request was Nacked by the server"))
val FuturePingNack = Future.exception(Failure(
"A ping is already oustanding on this session."))
sealed trait State
case object Dispatching extends State
case object Draining extends State
case object Drained extends State
case class Leasing(end: Time) extends State {
def remaining: Duration = end.sinceNow
def expired: Boolean = end < Time.now
}
} | a-manumohan/finagle | finagle-mux/src/main/scala/com/twitter/finagle/mux/ClientSession.scala | Scala | apache-2.0 | 8,498 |
package com.twitter.finagle.http
import com.twitter.finagle.client.StackClient
import com.twitter.finagle.util.AsyncLatch
import com.twitter.finagle._
import com.twitter.io.{Buf, Reader, Writer}
import com.twitter.util.{Future, Promise, Return, Throw, Time}
import java.util.concurrent.atomic.AtomicBoolean
private[finagle] object DelayedRelease {
val role = StackClient.Role.prepFactory
val description = "Prevents an HTTP service from being closed until its response completes"
val module: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module1[FactoryToService.Enabled, ServiceFactory[Request, Response]] {
val role = DelayedRelease.role
val description = DelayedRelease.description
def make(_enabled: FactoryToService.Enabled, next: ServiceFactory[Request, Response]) =
if (_enabled.enabled) next.map(new DelayedReleaseService(_))
else next
}
}
/**
* Delay release of the connection until all chunks have been received.
*/
private[finagle] class DelayedReleaseService[-Req <: Request](
service: Service[Req, Response]
) extends ServiceProxy[Req, Response](service) {
protected[this] val latch = new AsyncLatch
private[this] def proxy(in: Response) = {
val released = new AtomicBoolean(false)
def done() {
if (released.compareAndSet(false, true)) {
latch.decr()
}
}
Response(
in.httpResponse,
new Reader {
def read(n: Int) = in.reader.read(n) respond {
case Return(None) => done()
case Throw(_) => done()
case _ =>
}
def discard() = {
// Note: Discarding the underlying reader terminates the session and
// marks the service as unavailable. It's important that we discard
// before releasing the service (by invoking `done`), to ensure that
// the service wrapper in the pool will create a new service instead
// of reusing this one whose transport is closing.
in.reader.discard()
done()
}
}
)
}
override def apply(request: Req): Future[Response] = {
latch.incr()
service(request) transform {
case Return(r) if r.isChunked =>
Future.value(proxy(r))
case t =>
latch.decr()
Future.const(t)
}
}
override final def close(deadline: Time): Future[Unit] = {
val p = new Promise[Unit]
latch.await { p.become(service.close(deadline)) }
p
}
}
| lukiano/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/DelayedReleaseService.scala | Scala | apache-2.0 | 2,472 |
package success.independent.descendent
import autoregister.annotations._
object A {
def register(toRegister: A): Unit = {}
}
@RegisterAllDescendentObjects("success.independent.descendent.A.register")
trait A | math85360/autoregister | src/test/resources/success/independent/descendent/A.scala | Scala | mit | 212 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends App {
{
val x = 42
def foo() = reify(reify(reify(x)));
{
val x = 2
val code1 = foo()
val code2 = code1.eval
val code3 = code2.eval
println(code3.eval)
}
}
}
| som-snytt/dotty | tests/disabled/macro/run/reify_newimpl_37.scala | Scala | apache-2.0 | 300 |
package spire
package examples
import spire.algebra._
import spire.math.{Natural, UInt}
import scala.collection.mutable
object SetUtil {
def powerStream[A](members: Stream[A]): Stream[Set[A]] = {
val done = Stream.empty[Set[A]]
def powerLoop(as: Stream[A], i: Int): Stream[Set[A]] = {
def nthLoop(as: Stream[A], i: Int): Stream[Set[A]] = as match {
case a #:: tail =>
if (i == 0) {
Set(a) #:: done
} else {
val next = nthLoop(tail, i - 1)
next #::: next.map(_ + a)
}
case _ =>
done
}
val nth = nthLoop(as, i)
if (nth.isEmpty) nth else nth #::: powerLoop(as, i + 1)
}
Set.empty[A] #:: powerLoop(members, 0)
}
}
object PureSet {
def empty[A]: PureSet[A] = PureSet[A](a => false)
def infinite[A]: PureSet[A] = PureSet[A](a => true)
implicit def monoid[A] = new Monoid[PureSet[A]] {
def id: PureSet[A] = empty
def op(x: PureSet[A], y: PureSet[A]): PureSet[A] = x | y
}
implicit def bool[A] = new Bool[PureSet[A]] {
def one: PureSet[A] = infinite
def zero: PureSet[A] = empty
def complement(a: PureSet[A]): PureSet[A] = ~a
def and(a: PureSet[A], b: PureSet[A]): PureSet[A] = a & b
def or(a: PureSet[A], b: PureSet[A]): PureSet[A] = a | b
override def xor(a: PureSet[A], b: PureSet[A]): PureSet[A] = a ^ b
}
}
case class PureSet[A](f: A => Boolean) extends Function1[A, Boolean] { lhs =>
def apply(a: A): Boolean =
f(a)
def toSet(universe: Set[A]): Set[A] =
universe.filter(f)
def toMathSet(universe: Set[A]): MathSet[A] =
MathSet(universe.filter(f))
def filter(g: A => Boolean): PureSet[A] =
PureSet(a => f(a) && g(a))
def contramap[B](g: B => A): PureSet[B] =
PureSet(b => f(g(b)))
def unary_~(): PureSet[A] =
PureSet(a => !(f(a)))
def |(rhs: PureSet[A]): PureSet[A] =
PureSet(a => lhs.f(a) || rhs.f(a))
def &(rhs: PureSet[A]): PureSet[A] =
PureSet(a => lhs.f(a) && rhs.f(a))
def --(rhs: PureSet[A]): PureSet[A] =
PureSet(a => lhs.f(a) && !rhs.f(a))
def ^(rhs: PureSet[A]): PureSet[A] =
PureSet(a => lhs.f(a) ^ rhs.f(a))
def cross[B](rhs: PureSet[B]): PureSet[(A, B)] =
PureSet[(A, B)](t => lhs.f(t._1) && rhs.f(t._2))
def power(universe: Stream[A]): Stream[Set[A]] =
SetUtil.powerStream(universe.filter(f))
}
object MathSet {
def empty[A]: MathSet[A] = Fin(Set.empty)
def apply[A](as: A*): MathSet[A] = Fin(as.toSet)
def infinite[A]: MathSet[A] = Inf(Set.empty)
def apply[A](as: Set[A]): MathSet[A] = Fin(as)
case class Fin[A](members: Set[A]) extends MathSet[A] { lhs =>
def apply(a: A): Boolean =
members(a)
def toSet(universe: Set[A]): Set[A] =
universe & members
def toPureSet: PureSet[A] =
PureSet(members)
def filter(f: A => Boolean): Fin[A] =
Fin(members.filter(f))
def cross[B](rhs: Fin[B]): Fin[(A, B)] =
Fin(lhs.members.flatMap(a => rhs.members.map(b => (a, b))))
def size(usize: Option[Natural]): Option[Natural] =
Some(Natural(members.size))
def map[B](f: A => B): MathSet[B] = Fin(members.map(f))
def unary_~(): MathSet[A] = Inf(members)
override def toString: String =
members.mkString("{", ", ", "}")
}
case class Inf[A](outsiders: Set[A]) extends MathSet[A] {
def apply(a: A): Boolean =
!outsiders(a)
def toSet(universe: Set[A]): Set[A] =
universe -- outsiders
def toPureSet: PureSet[A] =
PureSet(a => !outsiders(a))
def size(usize: Option[Natural]): Option[Natural] =
usize.map(_ - UInt(outsiders.size))
def map[B](f: A => B): MathSet[B] = Inf(outsiders.map(f))
def unary_~(): MathSet[A] = Fin(outsiders)
override def toString: String =
if (outsiders.isEmpty) "(U)"
else outsiders.mkString("(U -- {", ", ", "})")
}
implicit def monoid[A] = new Monoid[MathSet[A]] {
def id: MathSet[A] = empty
def op(x: MathSet[A], y: MathSet[A]): MathSet[A] = x | y
}
implicit def bool[A] = new Bool[MathSet[A]] {
def one: MathSet[A] = infinite
def zero: MathSet[A] = empty
def complement(a: MathSet[A]): MathSet[A] = ~a
def and(a: MathSet[A], b: MathSet[A]): MathSet[A] = a & b
def or(a: MathSet[A], b: MathSet[A]): MathSet[A] = a | b
override def xor(a: MathSet[A], b: MathSet[A]): MathSet[A] = a ^ b
}
}
sealed trait MathSet[A] extends Function1[A, Boolean] { lhs =>
import MathSet._
def toSet(universe: Set[A]): Set[A]
def toPureSet: PureSet[A]
def size(usize: Option[Natural]): Option[Natural]
def toFinite(universe: Set[A]): Fin[A] = Fin(toSet(universe))
def map[B](f: A => B): MathSet[B]
def unary_~(): MathSet[A]
def |(rhs: MathSet[A]): MathSet[A] = (lhs, rhs) match {
case (Fin(x), Fin(y)) => Fin(x | y)
case (Fin(x), Inf(y)) => Inf(y -- x)
case (Inf(x), Fin(y)) => Inf(x -- y)
case (Inf(x), Inf(y)) => Inf(x & y)
}
def &(rhs: MathSet[A]): MathSet[A] = (lhs, rhs) match {
case (Fin(x), Fin(y)) => Fin(x & y)
case (Fin(x), Inf(y)) => Fin(x -- y)
case (Inf(x), Fin(y)) => Fin(y -- x)
case (Inf(x), Inf(y)) => Inf(x | y)
}
def --(rhs: MathSet[A]): MathSet[A] = (lhs, rhs) match {
case (Fin(x), Fin(y)) => Fin(x -- y)
case (Fin(x), Inf(y)) => Fin(x & y)
case (Inf(x), Fin(y)) => Inf(x | y)
case (Inf(x), Inf(y)) => Fin(y -- x)
}
private def xor(x: Set[A], y: Set[A]): Set[A] = {
val builder = new mutable.SetBuilder[A, Set[A]](Set.empty)
x.foreach(a => if (!y(a)) builder += a)
y.foreach(a => if (!x(a)) builder += a)
builder.result()
}
def ^(rhs: MathSet[A]): MathSet[A] = (lhs, rhs) match {
case (Fin(x), Fin(y)) => Fin(xor(x, y))
case (Fin(x), Inf(y)) => Inf(x -- y)
case (Inf(x), Fin(y)) => Inf(y -- x)
case (Inf(x), Inf(y)) => Fin(xor(x, y))
}
def power(universe: Stream[A]): Stream[Set[A]] =
SetUtil.powerStream(universe.filter(this))
}
| tixxit/spire | examples/src/main/scala/spire/example/infset.scala | Scala | mit | 5,945 |
package mesosphere.marathon
package api
import java.net.URI
import javax.servlet.http.{ HttpServlet, HttpServletRequest, HttpServletResponse }
import mesosphere.marathon.io.IO
import org.slf4j.LoggerFactory
class WebJarServlet extends HttpServlet {
private[this] val log = LoggerFactory.getLogger(getClass)
override def doGet(req: HttpServletRequest, resp: HttpServletResponse): Unit = {
def sendResource(resourceURI: String, mime: String): Unit = {
IO.withResource(resourceURI) { stream =>
resp.setContentType(mime)
resp.setContentLength(stream.available())
resp.setStatus(200)
IO.transfer(stream, resp.getOutputStream)
} getOrElse {
resp.sendError(404)
}
}
def sendResourceNormalized(resourceURI: String, mime: String): Unit = {
val normalized = new URI(resourceURI).normalize().getPath
if (normalized.startsWith("/META-INF/resources/webjars")) sendResource(normalized, mime)
else resp.sendError(404, s"Path: $normalized")
}
//extract request data
val jar = req.getServletPath // e.g. /ui
var resource = req.getPathInfo // e.g. /fonts/icon.gif
if (resource.endsWith("/")) resource = resource + "index.html" // welcome file
val file = resource.split("/").last //e.g. icon.gif
val mediaType = file.split("\\\\.").lastOption.getOrElse("") //e.g. gif
val mime = Option(getServletContext.getMimeType(file)).getOrElse(mimeType(mediaType)) //e.g plain/text
val resourceURI = s"/META-INF/resources/webjars$jar$resource"
//log request data, since the names are not very intuitive
if (log.isDebugEnabled) {
log.debug(
s"""
|pathinfo: ${req.getPathInfo}
|context: ${req.getContextPath}
|servlet: ${req.getServletPath}
|path: ${req.getPathTranslated}
|uri: ${req.getRequestURI}
|jar: $jar
|resource: $resource
|file: $file
|mime: $mime
|resourceURI: $resourceURI
""".stripMargin)
}
//special rule for accessing root -> redirect to ui main page
if (req.getRequestURI == "/") sendRedirect(resp, "ui/")
//special rule for accessing /help -> redirect to api-console main page
else if (req.getRequestURI == "/help") sendRedirect(resp, "api-console/index.html")
//if a directory is requested, redirect to trailing slash
else if (!file.contains(".")) sendRedirect(resp, req.getRequestURI + "/") //request /ui -> /ui/
//if we come here, it must be a resource
else sendResourceNormalized(resourceURI, mime)
}
private[this] def mimeType(mediaType: String): String = {
mediaType.toLowerCase match {
case "eot" => "application/vnd.ms-fontobject"
case "svg" => "image/svg+xml"
case "ttf" => "application/font-ttf"
case _ => "application/octet-stream"
}
}
private[this] def sendRedirect(response: HttpServletResponse, location: String): Unit = {
response.setStatus(HttpServletResponse.SC_MOVED_TEMPORARILY)
response.setHeader("Location", location)
}
}
| natemurthy/marathon | src/main/scala/mesosphere/marathon/api/WebJarServlet.scala | Scala | apache-2.0 | 3,071 |
/**
* SolidAttachment element template for TABuddy-Desktop.
*
* Copyright (c) 2013 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TABuddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TABuddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TABuddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.logic.payload.template.attachment.solid
import java.io.BufferedOutputStream
import java.io.File
import java.io.FileOutputStream
import java.io.IOException
import java.io.OutputStream
import java.util.Date
import scala.ref.WeakReference
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.util.FileUtil
import org.digimead.digi.lib.util.Util
import org.digimead.tabuddy.desktop.{ Messages => CoreMessages }
import org.digimead.tabuddy.desktop.Resources
import org.digimead.tabuddy.desktop.support.WritableValue
import org.digimead.tabuddy.model.element.Element
import org.eclipse.jface.action.Action
import org.eclipse.jface.action.ActionContributionItem
import org.eclipse.jface.dialogs.IDialogConstants
import org.eclipse.jface.dialogs.IInputValidator
import org.eclipse.jface.dialogs.InputDialog
import org.eclipse.swt.SWT
import org.eclipse.swt.dnd.DND
import org.eclipse.swt.dnd.DragSourceEvent
import org.eclipse.swt.dnd.DragSourceListener
import org.eclipse.swt.dnd.DropTargetEvent
import org.eclipse.swt.dnd.DropTargetListener
import org.eclipse.swt.dnd.FileTransfer
import org.eclipse.swt.dnd.TextTransfer
import org.eclipse.swt.events.ModifyEvent
import org.eclipse.swt.events.ModifyListener
import org.eclipse.swt.layout.GridData
import org.eclipse.swt.widgets.Button
import org.eclipse.swt.widgets.Composite
import org.eclipse.swt.widgets.Control
import org.eclipse.swt.widgets.FileDialog
import org.eclipse.swt.widgets.Shell
import org.eclipse.ui.forms.events.ExpansionEvent
import org.eclipse.ui.forms.events.IExpansionListener
import org.eclipse.swt.program.Program
import org.digimead.tabuddy.model.dsl.attachment.solid.SolidAttachment
import org.digimead.digi.lib.log.api.Loggable
import org.digimead.tabuddy.desktop.support.App
abstract class PropertyDialog(val parentShell: Shell)
extends PropertyDialogSkel(parentShell) with org.digimead.tabuddy.desktop.definition.Dialog with Loggable {
/** Payload value */
val data: WritableValue[SolidAttachment]
/** Element property id */
val propertyId: Symbol
/** Element that contains 'data' in property with 'propertyId' */
val element: Element.Generic
/** DND source listener */
protected val dragSourceListener: DNDSource
/** DND target listener */
protected val dragTargetListener: DNDTarget
/**
* Create contents of the dialog.
*
* @param parent
*/
override protected def createDialogArea(parent: Composite): Control = {
val result = super.createDialogArea(parent)
Option(new ActionContributionItem(ActionImport)).foreach { action =>
action.fill(getCompositeLeft())
action.getWidget().asInstanceOf[Button].
setLayoutData(new GridData(SWT.FILL, SWT.CENTER, false, false, 1, 1))
}
Option(new ActionContributionItem(ActionExport)).foreach { action =>
action.fill(getCompositeLeft())
action.getWidget().asInstanceOf[Button].
setLayoutData(new GridData(SWT.FILL, SWT.CENTER, false, false, 1, 1))
}
Option(new ActionContributionItem(ActionDelete)).foreach { action =>
action.fill(getCompositeLeft())
action.getWidget().asInstanceOf[Button].
setLayoutData(new GridData(SWT.FILL, SWT.CENTER, false, false, 1, 1))
}
Option(new ActionContributionItem(ActionEdit)).foreach { action =>
action.fill(getCompositeLeft())
action.getWidget().asInstanceOf[Button].
setLayoutData(new GridData(SWT.FILL, SWT.CENTER, false, false, 1, 1))
}
data.addChangeListener { (value, event) =>
Option(data.value) match {
case Some(value) =>
setMessage(Messages.dialogMessage_text.format(value.name))
case None =>
setMessage(Messages.dialogMessageEmpty_text)
}
updateLogo(Option(data.value))
updateGeneralSection(Option(data.value))
updateSummarySection(Option(data.value))
updatePreview(Option(data.value))
updateActions(Option(data.value))
}
getSctnGeneral().addExpansionListener(new IExpansionListener {
def expansionStateChanging(e: ExpansionEvent) {}
def expansionStateChanged(e: ExpansionEvent) =
getScrolledCompositeInformation.setMinSize(getCompositeInformation.computeSize(SWT.DEFAULT, SWT.DEFAULT))
})
getSctnSummary().addExpansionListener(new IExpansionListener {
def expansionStateChanging(e: ExpansionEvent) {}
def expansionStateChanged(e: ExpansionEvent) =
getScrolledCompositeInformation.setMinSize(getCompositeInformation.computeSize(SWT.DEFAULT, SWT.DEFAULT))
})
getLblDNDTip().setFont(Resources.setFontStyle(Resources.fontSmall, SWT.ITALIC))
createDragNDropZone()
// update content
updateLogo(Option(data.value))
updateGeneralSection(Option(data.value))
updateSummarySection(Option(data.value))
updatePreview(Option(data.value))
updateActions(Option(data.value))
// Set the dialog title
setTitle(Messages.dialogTitle_text.format(propertyId.name))
// Set the dialog message
Option(data.value) match {
case Some(value) =>
setMessage(Messages.dialogMessage_text.format(value.name))
case None =>
setMessage(Messages.dialogMessageEmpty_text)
}
// Set the dialog window title
getShell().setText(Messages.dialogShell_text.format(element.eId.name))
// Update size
getScrolledCompositeInformation().setMinSize(getCompositeInformation().computeSize(SWT.DEFAULT, SWT.DEFAULT))
result
}
/** Create drag source */
protected def createDragNDropZone() {
val source = getDragSourceInfo()
source.setTransfer(Array(FileTransfer.getInstance()))
source.addDragListener(dragSourceListener)
val target = getDropTargetInfo()
target.setTransfer(Array(FileTransfer.getInstance()))
target.addDropListener(dragTargetListener)
}
/** Export an attachment */
protected def exportAttachment(exportFile: File): Option[File] = Option(data.value) match {
case Some(attachment) =>
var result: Option[File] = None
log.debug(s"export attachment '${attachment.name}' to " + exportFile.getAbsolutePath())
attachment.attachment foreach { in =>
var out: OutputStream = null
try {
if (exportFile.exists())
exportFile.delete()
exportFile.createNewFile()
out = new BufferedOutputStream(new FileOutputStream(exportFile))
FileUtil.writeToStream(in, out)
result = Some(exportFile)
} catch {
case e: IOException =>
log.error("unable to write the attachment: " + e.getMessage())
} finally {
try { in.close() } catch { case e: Throwable => }
try { out.close() } catch { case e: Throwable => }
}
}
result
case None =>
log.error(s"unable to export, the attachment is empty")
None
}
/** Export an attachment to the spool directory */
protected def exportToSpool(): Option[File] = {
log.debug("create a temporary file for an attachment")
val tmpDir = Some(File.createTempFile("TABuddyDesktop-", "-SolidAttachment"))
tmpDir.flatMap { dir =>
dir.delete()
if (dir.mkdirs()) {
var result: Option[File] = None
Option(data.value) foreach { attachment =>
attachment.attachment foreach { in =>
val file = new File(dir, attachment.name)
log.debug("create DND temporary file " + file.getAbsolutePath())
file.deleteOnExit()
dir.deleteOnExit()
result = exportAttachment(file)
}
}
result
} else
None
}
}
/** Export an attachment to the temporary directory */
protected def exportToTemp(): Option[File] = {
log.debug("create a temporary file for an attachment")
val tmpDir = Some(File.createTempFile("TABuddyDesktop-", "-SolidAttachment"))
tmpDir.flatMap { dir =>
dir.delete()
if (dir.mkdirs()) {
var result: Option[File] = None
Option(data.value) foreach { attachment =>
attachment.attachment foreach { in =>
val file = new File(dir, attachment.name)
log.debug("create DND temporary file " + file.getAbsolutePath())
file.deleteOnExit()
dir.deleteOnExit()
result = exportAttachment(file)
}
}
result
} else
None
}
}
/** Import an attachment interactive */
protected def importAttachmentInteractive(importFile: File) = Option(data.value) match {
case Some(previous) =>
log.debug(s"ask about to replace attachment with '${importFile.getName}'")
val dialog = new Dialog.Import(PropertyDialog.this.getShell(), Messages.replaceFileDialogTitle_text.format(importFile.getName()),
Messages.attachmentName_text, importFile.getName(), new Dialog.Validator, Some(previous.name))
// if (Window.currentShell.withValue(Some(dialog.getShell)) { dialog.open() } == org.eclipse.jface.window.Window.OK)
// importAttachment(importFile, dialog.getValue())
case None =>
log.debug(s"ask about to import attachment '${importFile.getName}'")
val dialog = new Dialog.Import(PropertyDialog.this.getShell(), Messages.importFileDialogTitle_text.format(importFile.getName()),
Messages.attachmentName_text, importFile.getName(), new Dialog.Validator, None)
// if (Window.currentShell.withValue(Some(dialog.getShell)) { dialog.open() } == org.eclipse.jface.window.Window.OK)
// importAttachment(importFile, dialog.getValue())
}
/** Import an attachment */
protected def importAttachment(importFile: File, attachmentName: String) = Option(data.value) match {
case Some(previous) =>
log.debug(s"replace attachment with '${importFile.getName}'")
data.value = SolidAttachment(attachmentName, previous.created, System.currentTimeMillis(), element, importFile)
case None =>
log.debug(s"import attachment '${importFile.getName}'")
data.value = SolidAttachment(importFile.getName(), element, importFile)
}
/** Update the general section */
@log
protected def updateGeneralSection(attachment: Option[SolidAttachment]) {
val size = attachment.map(_.size match {
case Some(size) if (size > 1024 * 1024) =>
"%.2fMb".format(size.toFloat / (1024 * 1024))
case Some(size) if (size > 1024) =>
"%.2fKb".format(size.toFloat / 1024)
case Some(size) =>
size + "b"
case None =>
CoreMessages.nodata_text
}) getOrElse CoreMessages.nodata_text
val created = attachment.map(a => Util.dateString(new Date(a.created))) getOrElse CoreMessages.nodata_text
val modified = attachment.map(a => Util.dateString(new Date(a.modified))) getOrElse CoreMessages.nodata_text
val mime = CoreMessages.nodata_text
val application = CoreMessages.nodata_text
log.___glance("!??" + Program.findProgram(".pdf"))
//log.___glance("!??" + Program.launch(""))
log.___glance("!!!" + Program.getExtensions().mkString("\\n"))
getTxtFileSize().setText(size)
getTxtFileCreated().setText(created)
getTxtFileModified().setText(modified)
getTxtFileFormat().setText(mime)
getTxtFileApplication().setText(application)
}
/** Update buttons */
@log
protected def updateActions(attachment: Option[SolidAttachment]) {
attachment match {
case Some(attachment) =>
ActionImport.setEnabled(true)
ActionExport.setEnabled(true)
ActionDelete.setEnabled(true)
ActionEdit.setEnabled(true)
case None =>
ActionImport.setEnabled(true)
ActionExport.setEnabled(false)
ActionDelete.setEnabled(false)
ActionEdit.setEnabled(false)
ActionEdit.setChecked(false)
}
}
/** Update logo label */
@log
protected def updateLogo(attachment: Option[SolidAttachment]) {
attachment match {
case Some(attachment) =>
//getLblLogo().setImage(Resources.Image.appbar_page_question_large)
case None =>
//getLblLogo().setImage(Resources.Image.appbar_page_add_large)
}
}
/** Update the preview block */
@log
protected def updatePreview(attachment: Option[SolidAttachment]) {
getLblPreview().setText(Messages.noPreview_text)
}
/** Update the summary section */
@log
protected def updateSummarySection(attachment: Option[SolidAttachment]) {
val documentTitle = CoreMessages.nodata_text
val author = CoreMessages.nodata_text
val authorTitle = CoreMessages.nodata_text
val description = CoreMessages.nodata_text
getTxtDocumentTitle().setText(documentTitle)
getTxtAuthor().setText(author)
getTxtAuthorTitle().setText(authorTitle)
getTxtDescription().setText(description)
}
class DNDSource extends DragSourceListener() {
@volatile protected var tmpFile: Option[File] = None
def dragStart(event: DragSourceEvent) {
// Only start the drag if there is actual data - this data will be what is dropped on the target.
event.doit = Option(data.value).nonEmpty
}
def dragSetData(event: DragSourceEvent) {
event.data = null
event.doit = false
// Provide the data of the requested type.
if (FileTransfer.getInstance().isSupportedType(event.dataType)) {
exportToTemp().foreach { file =>
event.data = Array[String](file.getAbsolutePath())
event.doit = true
}
} else if (TextTransfer.getInstance().isSupportedType(event.dataType)) {
Option(data.value) foreach { attachment =>
event.data = Array[String](attachment.name)
event.doit = true
}
}
}
def dragFinished(event: DragSourceEvent) {
tmpFile.foreach { file =>
log.debug("clean DND temporary file " + file)
tmpFile = None
file.listFiles().foreach(_.delete)
file.delete()
}
}
}
class DNDTarget extends DropTargetListener() {
def dragEnter(event: DropTargetEvent) = drag(event)
def dragOver(event: DropTargetEvent) {}
def dragOperationChanged(event: DropTargetEvent) = drag(event)
def dragLeave(event: DropTargetEvent) {}
def dropAccept(event: DropTargetEvent) {}
def drop(event: DropTargetEvent) {
if (event.data == null) {
event.detail = DND.DROP_NONE
return
}
if (FileTransfer.getInstance().isSupportedType(event.currentDataType))
event.data.asInstanceOf[Array[String]].headOption.foreach(file => importAttachmentInteractive(new File(file)))
else
log.info("unsupported dnd type")
}
protected def drag(event: DropTargetEvent) {
event.detail match {
case DND.DROP_DEFAULT if (event.operations & DND.DROP_COPY) != 0 =>
event.detail = DND.DROP_COPY
case DND.DROP_DEFAULT if (event.operations & DND.DROP_MOVE) != 0 =>
event.detail = DND.DROP_COPY
case DND.DROP_NONE =>
event.detail = DND.DROP_COPY
case _ =>
event.detail = DND.DROP_NONE
}
val supported = for (i <- 0 until event.dataTypes.length)
yield FileTransfer.getInstance().isSupportedType(event.dataTypes(i))
if (supported.forall(_ == false))
event.detail = DND.DROP_NONE
}
}
object ActionImport extends Action(Messages.import_text) {
@log
def apply() = App.exec {
val dialog = new FileDialog(PropertyDialog.this.getShell(), SWT.NONE)
if (Option(PropertyDialog.this.data.value).isEmpty)
dialog.setText(Messages.importFileTitle_text)
else
dialog.setText(Messages.replaceFileTitle_text)
// val file = Option(Window.currentShell.withValue(Some(dialog.getParent)) { dialog.open() })
// file.foreach(f => area.importAttachmentInteractive(new File(f)))
}
override def run = apply()
}
object ActionExport extends Action(Messages.export_text) {
@log
def apply() = App.exec {
val dialog = new FileDialog(PropertyDialog.this.getShell(), SWT.SAVE)
dialog.setText(Messages.exportFileTitle_text.format(PropertyDialog.this.data.value.name))
dialog.setOverwrite(true)
dialog.setFileName(PropertyDialog.this.data.value.name)
//val file = Option(Window.currentShell.withValue(Some(dialog.getParent)) { dialog.open() })
//file.foreach(f => area.exportAttachment(new File(f)))
}
override def run = apply()
}
object ActionDelete extends Action(Messages.delete_text) {
@log
def apply() = {
SolidAttachment.clear(PropertyDialog.this.data.value)
PropertyDialog.this.data.value = null
}
override def run = apply()
}
object ActionEdit extends Action(Messages.edit_text) {
def apply() = {
log.___gaze("BOOM")
// spool
// monitor
// close
}
override def run = apply()
}
}
object Dialog extends Loggable {
/** Import dialog based on InputDialog */
class Import(parentShell: Shell, dialogTitle: String, dialogMessage: String, initialValue: String, validator: IInputValidator,
val resetName: Option[String]) extends InputDialog(parentShell, dialogTitle, dialogMessage, initialValue, validator) {
@volatile var resetNameButton: Option[Button] = None
override protected def createButtonsForButtonBar(parent: Composite) {
resetNameButton = Option(createButton(parent, IDialogConstants.CLIENT_ID, Messages.resetName_text, true))
updateResetNameButton()
super.createButtonsForButtonBar(parent)
}
override protected def createDialogArea(parent: Composite): Control = {
val result = super.createDialogArea(parent)
getText.addModifyListener(new ModifyListener {
def modifyText(e: ModifyEvent) = updateResetNameButton
})
result
}
protected def updateResetNameButton() = resetName match {
case Some(name) if getText().getText().trim == name =>
resetNameButton.foreach(_.setEnabled(false))
case None =>
resetNameButton.foreach(_.setEnabled(false))
case _ =>
resetNameButton.foreach(_.setEnabled(true))
}
}
/**
* This class validates a String. It makes sure that the String is between 5 and 8
* characters
*/
class Validator extends IInputValidator {
val pattern = org.digimead.tabuddy.model.dsl.attachment.solid.SolidAttachment.validName.pattern
/**
* Validates the String. Returns null for no error, or an error message
*
* @param newText the String to validate
* @return String
*/
def isValid(newText: String): String = {
val len = newText.length()
// Determine if input is too short or too long
if (len < 1) return "* " + Messages.tooShort
if (len > 255) return "* " + Messages.tooLong
if (!pattern.matcher(newText).matches()) return "Invalid"
// Input must be OK
return null
}
}
}
| digimead/digi-TABuddy-desktop-type-solidAttachment | src/main/scala/org/digimead/tabuddy/desktop/logic/payload/template/attachment/solid/PropertyDialog.scala | Scala | agpl-3.0 | 21,104 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.config
import org.junit.Test
import scala.collection.JavaConversions._
import org.apache.samza.config.StorageConfig._
import org.junit.Assert.assertFalse
import org.junit.Assert.assertTrue
import org.junit.Assert.assertEquals
import org.junit.Assert.fail
class TestStorageConfig {
@Test
def testIsChangelogSystem {
val configMap = Map[String, String](
FACTORY.format("store1") -> "some.factory.Class",
CHANGELOG_STREAM.format("store1") -> "system1.stream1",
FACTORY.format("store2") -> "some.factory.Class")
val config = new MapConfig(configMap)
assertFalse(config.isChangelogSystem("system3"))
assertFalse(config.isChangelogSystem("system2"))
assertTrue(config.isChangelogSystem("system1"))
}
@Test
def testIsChangelogSystemSetting {
val configMap = Map[String, String](
FACTORY.format("store1") -> "some.factory.Class",
CHANGELOG_STREAM.format("store1") -> "system1.stream1",
CHANGELOG_SYSTEM -> "system2",
CHANGELOG_STREAM.format("store2") -> "stream2",
CHANGELOG_STREAM.format("store4") -> "stream4",
FACTORY.format("store2") -> "some.factory.Class")
val config = new MapConfig(configMap)
assertFalse(config.isChangelogSystem("system3"))
assertTrue(config.isChangelogSystem("system2"))
assertTrue(config.isChangelogSystem("system1"))
assertEquals("system1.stream1", config.getChangelogStream("store1").getOrElse(""));
assertEquals("system2.stream2", config.getChangelogStream("store2").getOrElse(""));
val configMapErr = Map[String, String](CHANGELOG_STREAM.format("store4")->"stream4")
val configErr = new MapConfig(configMapErr)
try {
configErr.getChangelogStream("store4").getOrElse("")
fail("store4 has no system defined. Should've failed.");
} catch {
case e: Exception => // do nothing, it is expected
}
}
} | nickpan47/samza | samza-core/src/test/scala/org/apache/samza/config/TestStorageConfig.scala | Scala | apache-2.0 | 2,712 |
class BasicCredentialsProvider(accessKey: String, secretKey: String) extends CredentialsProvider {
private val credentials: Credentials = Credentials(accessKey, secretKey)
override def getCredentials: Credentials = credentials
override def refresh: Unit = {}
}
object BasicCredentialsProvider {
def apply(accessKey: String, secretKey: String): BasicCredentialsProvider =
new BasicCredentialsProvider(accessKey, secretKey)
def apply(credentials: Credentials): BasicCredentialsProvider =
new BasicCredentialsProvider(credentials.getAWSAccessKeyId, credentials.getAWSSecretKey)
}
| hirokikonishi/awscala | aws/core/src/main/scala/BasicCredentialsProvider.scala | Scala | apache-2.0 | 597 |
package com.goticks
import akka.actor.{Props, ActorSystem}
import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest.{WordSpecLike, MustMatchers}
class TickerSellerSpec extends TestKit(ActorSystem("testTickets"))
with WordSpecLike
with MustMatchers
with ImplicitSender
with StopSystemAfterAll {
"The TicketSeller" must {
"Sell tickets until they are sold out" in {
import TicketSeller._
def mkTickets = (1 to 10).map(i=>Ticket(i)).toVector
val event = "RHCP"
val ticketingActor = system.actorOf(TicketSeller.props(event))
ticketingActor ! Add(mkTickets)
ticketingActor ! Buy(1)
expectMsg(Tickets(event, Vector(Ticket(1))))
val nrs = (2 to 10)
nrs.foreach(_ => ticketingActor ! Buy(1))
val tickets = receiveN(9)
tickets.zip(nrs).foreach { case (Tickets(event, Vector(Ticket(id))), ix) => id must be(ix) }
ticketingActor ! Buy(1)
expectMsg(Tickets(event))
}
"Sell tickets in batches until they are sold out" in {
import TicketSeller._
val firstBatchSize = 10
def mkTickets = (1 to (10 * firstBatchSize)).map(i=>Ticket(i)).toVector
val event = "Madlib"
val ticketingActor = system.actorOf(TicketSeller.props(event))
ticketingActor ! Add(mkTickets)
ticketingActor ! Buy(firstBatchSize)
val bought = (1 to firstBatchSize).map(Ticket).toVector
expectMsg(Tickets(event, bought))
val secondBatchSize = 5
val nrBatches = 18
val batches = (1 to nrBatches * secondBatchSize)
batches.foreach(_ => ticketingActor ! Buy(secondBatchSize))
val tickets = receiveN(nrBatches)
tickets.zip(batches).foreach {
case (Tickets(event, bought), ix) =>
bought.size must equal(secondBatchSize)
val last = ix * secondBatchSize + firstBatchSize
val first = ix * secondBatchSize + firstBatchSize - (secondBatchSize - 1)
bought.map(_.id) must equal((first to last).toVector)
case _ =>
}
ticketingActor ! Buy(1)
expectMsg(Tickets(event))
ticketingActor ! Buy(10)
expectMsg(Tickets(event))
}
}
}
| gilbutITbook/006877 | chapter-up-and-running/src/test/scala/com/goticks/TicketSellerSpec.scala | Scala | mit | 2,262 |
package polyite.schedule.sampling
import polyite.config.Config
import polyite.util.Rat
import polyite.config.MinimalConfig.NumGeneratorsLimit
import polyite.config.MinimalConfig.NumGeneratorsLimit
import polyite.schedule.DomainCoeffInfo
/**
* Some sampling strategies may need to inspect the list of polyhedra that model a region of the schedule search space.
* The result of the inspection may be stored in an instance of {@code SamplingStrategyParams}.
*/
trait SamplingStrategyParams {}
/**
* Interface for schedule sampling strategies. A sampling strategy randomly selects a (rational) schedule coefficient
* vector from a polyhedron of possible schedule coefficient vectors.
*/
trait SamplingStrategy {
/**
* The search space construction yields descriptions of search space regions. These descriptions consist of lists of
* polyhedra that are represented by linearly affine constraints. Sampling strategies may use this method to
* convert polyhedra to the representation that they use and deliver an according implementation of {@code Polyhedron}.
*
* @param p the polyhedron to translate. Note, that {@code p} may be connected to a different isl context than Polyite's global one.
* @param conf Polyite configuration.
*/
def preparePolyhedron(p : isl.Set, conf : Config) : Polyhedron
/**
* Some sampling strategies may need to inspect the list of polyhedra that model a region of the schedule search space.
* The result of the inspection may be stored in an instance of {@code SamplingStrategyParams}. Implement this method
* to perform any such inspection. The returned object will be passed to
* {@code SamplingStrategyParams.sampleCoeffVect(Polyhedron, DomainCoeffInfo, Config, SamplingStrategyParams)} whenever
* it is being called to sample a schedule coefficient vector from one of the polyhedra in {@code region}.
* @param region the search space region to analyze in the representation that resulted from calling {@code SamplingStrategy.preparePolyhedron(Set, Config)}
* for each polyhedron in the region's representation. Note, that the polyhedra in {@code region} may be connected to a different isl context than Polyite's global one.
* @param conf Polyite configuration
* @param numRaysLimit parameter that is particularly related to Chernikova sampling.
* {@code numRaysLimit} is the maximum number of rays that may have a coefficient unequal to zero in the linear combination
* that forms a schedule coefficient vector.
* @param numLinesLimit parameter that is particularly related to Chernikova sampling.
* {@code numLinesLimit} is the maximum number of lines that may have a coefficient unequal to zero in the linear
* combination that forms a schedule coefficient vector.
*/
def prepareSamplingStrategyParams(region : Iterable[Polyhedron], conf : Config, numRaysLimit : NumGeneratorsLimit,
numLinesLimit : NumGeneratorsLimit) : SamplingStrategyParams
/**
* Alternativ to {@code SampligStrategy.prepareSamplingStrategyParams(Iterable[Polyhedron], Config, NumGeneratorsLimit, NumGeneratorsLimit)}
* that will be called in order to generate the sampling strategy parameters independent of a particular
* search space region.
*/
def createSamplingStrategyParamsFromConf(conf : Config) : SamplingStrategyParams
/**
* Sample a schedule coefficient vector from {@code p}. The polyhedron is represented in the format created by
* {@code SamplingStrategy.preparePolyhedron(Set, Config)}.
* @param p polyhedron in the format created by {@code SamplingStrategy.preparePolyhedron(Set, Config)}. Note, that
* {@code p} may be connected to a different isl context than Polyite's global one.
* @param domInfo description of the schedule coefficient vector space
* @param conf Polyite configuration
* @param params result of search space region inspection by {@code SampligStrategy.prepareSamplingStrategyParams(Iterable[Polyhedron], Config, NumGeneratorsLimit, NumGeneratorsLimit)}.
* @return the sampled schedule coefficient vector as a list of rational numbers and in case of Chernikova sampling
* a set of generators with coefficients that form the yielded schedule coefficient vector. Leave this set empty
* for other sampling techniques.
*/
def sampleCoeffVect(p : Polyhedron, domInfo : DomainCoeffInfo, conf : Config, params : SamplingStrategyParams) : (List[Rat], Set[ScheduleSummand])
} | stganser/polyite | src/polyite/schedule/sampling/SamplingStrategy.scala | Scala | mit | 4,480 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import java.util.concurrent.locks.ReentrantReadWriteLock
import com.yammer.metrics.core.Gauge
import kafka.api.LeaderAndIsr
import kafka.api.Request
import kafka.controller.KafkaController
import kafka.log.{LogAppendInfo, LogConfig}
import kafka.metrics.KafkaMetricsGroup
import kafka.server._
import kafka.utils.CoreUtils.{inReadLock, inWriteLock}
import kafka.utils._
import kafka.zk.AdminZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.{NotEnoughReplicasException, NotLeaderForPartitionException, PolicyViolationException}
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.protocol.Errors._
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.EpochEndOffset._
import org.apache.kafka.common.requests.{EpochEndOffset, LeaderAndIsrRequest}
import org.apache.kafka.common.utils.Time
import scala.collection.JavaConverters._
import scala.collection.Map
/**
* Data structure that represents a topic partition. The leader maintains the AR, ISR, CUR, RAR
*/
class Partition(val topic: String,
val partitionId: Int,
time: Time,
replicaManager: ReplicaManager,
val isOffline: Boolean = false) extends Logging with KafkaMetricsGroup {
val topicPartition = new TopicPartition(topic, partitionId)
// Do not use replicaManager if this partition is ReplicaManager.OfflinePartition
private val localBrokerId = if (!isOffline) replicaManager.config.brokerId else -1
private val logManager = if (!isOffline) replicaManager.logManager else null
private val zkClient = if (!isOffline) replicaManager.zkClient else null
// allReplicasMap includes both assigned replicas and the future replica if there is ongoing replica movement
private val allReplicasMap = new Pool[Int, Replica]
// The read lock is only required when multiple reads are executed and needs to be in a consistent manner
private val leaderIsrUpdateLock = new ReentrantReadWriteLock
private var zkVersion: Int = LeaderAndIsr.initialZKVersion
@volatile private var leaderEpoch: Int = LeaderAndIsr.initialLeaderEpoch - 1
@volatile var leaderReplicaIdOpt: Option[Int] = None
@volatile var inSyncReplicas: Set[Replica] = Set.empty[Replica]
/* Epoch of the controller that last changed the leader. This needs to be initialized correctly upon broker startup.
* One way of doing that is through the controller's start replica state change command. When a new broker starts up
* the controller sends it a start replica command containing the leader for each partition that the broker hosts.
* In addition to the leader, the controller can also send the epoch of the controller that elected the leader for
* each partition. */
private var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1
this.logIdent = s"[Partition $topicPartition broker=$localBrokerId] "
private def isReplicaLocal(replicaId: Int) : Boolean = replicaId == localBrokerId || replicaId == Request.FutureLocalReplicaId
private val tags = Map("topic" -> topic, "partition" -> partitionId.toString)
// Do not create metrics if this partition is ReplicaManager.OfflinePartition
if (!isOffline) {
newGauge("UnderReplicated",
new Gauge[Int] {
def value = {
if (isUnderReplicated) 1 else 0
}
},
tags
)
newGauge("InSyncReplicasCount",
new Gauge[Int] {
def value = {
if (isLeaderReplicaLocal) inSyncReplicas.size else 0
}
},
tags
)
newGauge("UnderMinIsr",
new Gauge[Int] {
def value = {
if (isUnderMinIsr) 1 else 0
}
},
tags
)
newGauge("ReplicasCount",
new Gauge[Int] {
def value = {
if (isLeaderReplicaLocal) assignedReplicas.size else 0
}
},
tags
)
newGauge("LastStableOffsetLag",
new Gauge[Long] {
def value = {
leaderReplicaIfLocal.map { replica =>
replica.highWatermark.messageOffset - replica.lastStableOffset.messageOffset
}.getOrElse(0)
}
},
tags
)
}
private def isLeaderReplicaLocal: Boolean = leaderReplicaIfLocal.isDefined
def isUnderReplicated: Boolean =
isLeaderReplicaLocal && inSyncReplicas.size < assignedReplicas.size
def isUnderMinIsr: Boolean = {
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
inSyncReplicas.size < leaderReplica.log.get.config.minInSyncReplicas
case None =>
false
}
}
/**
* Create the future replica if 1) the current replica is not in the given log directory and 2) the future replica
* does not exist. This method assumes that the current replica has already been created.
*
* @param logDir log directory
* @return true iff the future replica is created
*/
def maybeCreateFutureReplica(logDir: String): Boolean = {
// The readLock is needed to make sure that while the caller checks the log directory of the
// current replica and the existence of the future replica, no other thread can update the log directory of the
// current replica or remove the future replica.
inReadLock(leaderIsrUpdateLock) {
val currentReplica = getReplica().get
if (currentReplica.log.get.dir.getParent == logDir)
false
else if (getReplica(Request.FutureLocalReplicaId).isDefined) {
val futureReplicaLogDir = getReplica(Request.FutureLocalReplicaId).get.log.get.dir.getParent
if (futureReplicaLogDir != logDir)
throw new IllegalStateException(s"The future log dir $futureReplicaLogDir of $topicPartition is different from the requested log dir $logDir")
false
} else {
getOrCreateReplica(Request.FutureLocalReplicaId)
true
}
}
}
def getOrCreateReplica(replicaId: Int = localBrokerId, isNew: Boolean = false): Replica = {
allReplicasMap.getAndMaybePut(replicaId, {
if (isReplicaLocal(replicaId)) {
val adminZkClient = new AdminZkClient(zkClient)
val prop = adminZkClient.fetchEntityConfig(ConfigType.Topic, topic)
val config = LogConfig.fromProps(logManager.defaultConfig.originals,
prop)
val log = logManager.getOrCreateLog(topicPartition, config, isNew, replicaId == Request.FutureLocalReplicaId)
val checkpoint = replicaManager.highWatermarkCheckpoints(log.dir.getParent)
val offsetMap = checkpoint.read()
if (!offsetMap.contains(topicPartition))
info(s"No checkpointed highwatermark is found for partition $topicPartition")
val offset = math.min(offsetMap.getOrElse(topicPartition, 0L), log.logEndOffset)
new Replica(replicaId, topicPartition, time, offset, Some(log))
} else new Replica(replicaId, topicPartition, time)
})
}
def getReplica(replicaId: Int = localBrokerId): Option[Replica] = Option(allReplicasMap.get(replicaId))
def leaderReplicaIfLocal: Option[Replica] =
leaderReplicaIdOpt.filter(_ == localBrokerId).flatMap(getReplica)
def addReplicaIfNotExists(replica: Replica): Replica =
allReplicasMap.putIfNotExists(replica.brokerId, replica)
def assignedReplicas: Set[Replica] =
allReplicasMap.values.filter(replica => Request.isValidBrokerId(replica.brokerId)).toSet
def allReplicas: Set[Replica] =
allReplicasMap.values.toSet
private def removeReplica(replicaId: Int) {
allReplicasMap.remove(replicaId)
}
def removeFutureLocalReplica() {
inWriteLock(leaderIsrUpdateLock) {
allReplicasMap.remove(Request.FutureLocalReplicaId)
}
}
// Return true iff the future log has caught up with the current log for this partition
// Only ReplicaAlterDirThread will call this method and ReplicaAlterDirThread should remove the partition
// from its partitionStates if this method returns true
def maybeReplaceCurrentWithFutureReplica(): Boolean = {
val replica = getReplica().get
val futureReplica = getReplica(Request.FutureLocalReplicaId).get
if (replica.logEndOffset == futureReplica.logEndOffset) {
// The write lock is needed to make sure that while ReplicaAlterDirThread checks the LEO of the
// current replica, no other thread can update LEO of the current replica via log truncation or log append operation.
inWriteLock(leaderIsrUpdateLock) {
if (replica.logEndOffset == futureReplica.logEndOffset) {
logManager.replaceCurrentWithFutureLog(topicPartition)
replica.log = futureReplica.log
futureReplica.log = None
allReplicasMap.remove(Request.FutureLocalReplicaId)
true
} else false
}
} else false
}
def delete() {
// need to hold the lock to prevent appendMessagesToLeader() from hitting I/O exceptions due to log being deleted
inWriteLock(leaderIsrUpdateLock) {
allReplicasMap.clear()
inSyncReplicas = Set.empty[Replica]
leaderReplicaIdOpt = None
removePartitionMetrics()
logManager.asyncDelete(topicPartition)
logManager.asyncDelete(topicPartition, isFuture = true)
}
}
def getLeaderEpoch: Int = this.leaderEpoch
/**
* Make the local replica the leader by resetting LogEndOffset for remote replicas (there could be old LogEndOffset
* from the time when this broker was the leader last time) and setting the new leader and ISR.
* If the leader replica id does not change, return false to indicate the replica manager.
*/
def makeLeader(controllerId: Int, partitionStateInfo: LeaderAndIsrRequest.PartitionState, correlationId: Int): Boolean = {
val (leaderHWIncremented, isNewLeader) = inWriteLock(leaderIsrUpdateLock) {
val newAssignedReplicas = partitionStateInfo.basePartitionState.replicas.asScala.map(_.toInt)
// record the epoch of the controller that made the leadership decision. This is useful while updating the isr
// to maintain the decision maker controller's epoch in the zookeeper path
controllerEpoch = partitionStateInfo.basePartitionState.controllerEpoch
// add replicas that are new
val newInSyncReplicas = partitionStateInfo.basePartitionState.isr.asScala.map(r => getOrCreateReplica(r, partitionStateInfo.isNew)).toSet
// remove assigned replicas that have been removed by the controller
(assignedReplicas.map(_.brokerId) -- newAssignedReplicas).foreach(removeReplica)
inSyncReplicas = newInSyncReplicas
info(s"$topicPartition starts at Leader Epoch ${partitionStateInfo.basePartitionState.leaderEpoch} from offset ${getReplica().get.logEndOffset.messageOffset}. Previous Leader Epoch was: $leaderEpoch")
//We cache the leader epoch here, persisting it only if it's local (hence having a log dir)
leaderEpoch = partitionStateInfo.basePartitionState.leaderEpoch
newAssignedReplicas.foreach(id => getOrCreateReplica(id, partitionStateInfo.isNew))
zkVersion = partitionStateInfo.basePartitionState.zkVersion
val isNewLeader = leaderReplicaIdOpt.map(_ != localBrokerId).getOrElse(true)
val leaderReplica = getReplica().get
val curLeaderLogEndOffset = leaderReplica.logEndOffset.messageOffset
val curTimeMs = time.milliseconds
// initialize lastCaughtUpTime of replicas as well as their lastFetchTimeMs and lastFetchLeaderLogEndOffset.
(assignedReplicas - leaderReplica).foreach { replica =>
val lastCaughtUpTimeMs = if (inSyncReplicas.contains(replica)) curTimeMs else 0L
replica.resetLastCaughtUpTime(curLeaderLogEndOffset, curTimeMs, lastCaughtUpTimeMs)
}
if (isNewLeader) {
// construct the high watermark metadata for the new leader replica
leaderReplica.convertHWToLocalOffsetMetadata()
// mark local replica as the leader after converting hw
leaderReplicaIdOpt = Some(localBrokerId)
// reset log end offset for remote replicas
assignedReplicas.filter(_.brokerId != localBrokerId).foreach(_.updateLogReadResult(LogReadResult.UnknownLogReadResult))
}
// we may need to increment high watermark since ISR could be down to 1
(maybeIncrementLeaderHW(leaderReplica), isNewLeader)
}
// some delayed operations may be unblocked after HW changed
if (leaderHWIncremented)
tryCompleteDelayedRequests()
isNewLeader
}
/**
* Make the local replica the follower by setting the new leader and ISR to empty
* If the leader replica id does not change, return false to indicate the replica manager
*/
def makeFollower(controllerId: Int, partitionStateInfo: LeaderAndIsrRequest.PartitionState, correlationId: Int): Boolean = {
inWriteLock(leaderIsrUpdateLock) {
val newAssignedReplicas = partitionStateInfo.basePartitionState.replicas.asScala.map(_.toInt)
val newLeaderBrokerId: Int = partitionStateInfo.basePartitionState.leader
// record the epoch of the controller that made the leadership decision. This is useful while updating the isr
// to maintain the decision maker controller's epoch in the zookeeper path
controllerEpoch = partitionStateInfo.basePartitionState.controllerEpoch
// add replicas that are new
newAssignedReplicas.foreach(r => getOrCreateReplica(r, partitionStateInfo.isNew))
// remove assigned replicas that have been removed by the controller
(assignedReplicas.map(_.brokerId) -- newAssignedReplicas).foreach(removeReplica)
inSyncReplicas = Set.empty[Replica]
leaderEpoch = partitionStateInfo.basePartitionState.leaderEpoch
zkVersion = partitionStateInfo.basePartitionState.zkVersion
if (leaderReplicaIdOpt.isDefined && leaderReplicaIdOpt.get == newLeaderBrokerId) {
false
}
else {
leaderReplicaIdOpt = Some(newLeaderBrokerId)
true
}
}
}
/**
* Update the follower's state in the leader based on the last fetch request. See
* [[kafka.cluster.Replica#updateLogReadResult]] for details.
*
* @return true if the leader's log start offset or high watermark have been updated
*/
def updateReplicaLogReadResult(replica: Replica, logReadResult: LogReadResult): Boolean = {
val replicaId = replica.brokerId
// No need to calculate low watermark if there is no delayed DeleteRecordsRequest
val oldLeaderLW = if (replicaManager.delayedDeleteRecordsPurgatory.delayed > 0) lowWatermarkIfLeader else -1L
replica.updateLogReadResult(logReadResult)
val newLeaderLW = if (replicaManager.delayedDeleteRecordsPurgatory.delayed > 0) lowWatermarkIfLeader else -1L
// check if the LW of the partition has incremented
// since the replica's logStartOffset may have incremented
val leaderLWIncremented = newLeaderLW > oldLeaderLW
// check if we need to expand ISR to include this replica
// if it is not in the ISR yet
val leaderHWIncremented = maybeExpandIsr(replicaId, logReadResult)
val result = leaderLWIncremented || leaderHWIncremented
// some delayed operations may be unblocked after HW or LW changed
if (result)
tryCompleteDelayedRequests()
debug(s"Recorded replica $replicaId log end offset (LEO) position ${logReadResult.info.fetchOffsetMetadata.messageOffset}.")
result
}
/**
* Check and maybe expand the ISR of the partition.
* A replica will be added to ISR if its LEO >= current hw of the partition.
*
* Technically, a replica shouldn't be in ISR if it hasn't caught up for longer than replicaLagTimeMaxMs,
* even if its log end offset is >= HW. However, to be consistent with how the follower determines
* whether a replica is in-sync, we only check HW.
*
* This function can be triggered when a replica's LEO has incremented.
*
* @return true if the high watermark has been updated
*/
def maybeExpandIsr(replicaId: Int, logReadResult: LogReadResult): Boolean = {
inWriteLock(leaderIsrUpdateLock) {
// check if this replica needs to be added to the ISR
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
val replica = getReplica(replicaId).get
val leaderHW = leaderReplica.highWatermark
if (!inSyncReplicas.contains(replica) &&
assignedReplicas.map(_.brokerId).contains(replicaId) &&
replica.logEndOffset.offsetDiff(leaderHW) >= 0) {
val newInSyncReplicas = inSyncReplicas + replica
info(s"Expanding ISR from ${inSyncReplicas.map(_.brokerId).mkString(",")} " +
s"to ${newInSyncReplicas.map(_.brokerId).mkString(",")}")
// update ISR in ZK and cache
updateIsr(newInSyncReplicas)
replicaManager.isrExpandRate.mark()
}
// check if the HW of the partition can now be incremented
// since the replica may already be in the ISR and its LEO has just incremented
maybeIncrementLeaderHW(leaderReplica, logReadResult.fetchTimeMs)
case None => false // nothing to do if no longer leader
}
}
}
/*
* Returns a tuple where the first element is a boolean indicating whether enough replicas reached `requiredOffset`
* and the second element is an error (which would be `Errors.NONE` for no error).
*
* Note that this method will only be called if requiredAcks = -1 and we are waiting for all replicas in ISR to be
* fully caught up to the (local) leader's offset corresponding to this produce request before we acknowledge the
* produce request.
*/
def checkEnoughReplicasReachOffset(requiredOffset: Long): (Boolean, Errors) = {
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
// keep the current immutable replica list reference
val curInSyncReplicas = inSyncReplicas
def numAcks = curInSyncReplicas.count { r =>
if (!r.isLocal)
if (r.logEndOffset.messageOffset >= requiredOffset) {
trace(s"Replica ${r.brokerId} received offset $requiredOffset")
true
}
else
false
else
true /* also count the local (leader) replica */
}
trace(s"$numAcks acks satisfied with acks = -1")
val minIsr = leaderReplica.log.get.config.minInSyncReplicas
if (leaderReplica.highWatermark.messageOffset >= requiredOffset) {
/*
* The topic may be configured not to accept messages if there are not enough replicas in ISR
* in this scenario the request was already appended locally and then added to the purgatory before the ISR was shrunk
*/
if (minIsr <= curInSyncReplicas.size)
(true, Errors.NONE)
else
(true, Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND)
} else
(false, Errors.NONE)
case None =>
(false, Errors.NOT_LEADER_FOR_PARTITION)
}
}
/**
* Check and maybe increment the high watermark of the partition;
* this function can be triggered when
*
* 1. Partition ISR changed
* 2. Any replica's LEO changed
*
* The HW is determined by the smallest log end offset among all replicas that are in sync or are considered caught-up.
* This way, if a replica is considered caught-up, but its log end offset is smaller than HW, we will wait for this
* replica to catch up to the HW before advancing the HW. This helps the situation when the ISR only includes the
* leader replica and a follower tries to catch up. If we don't wait for the follower when advancing the HW, the
* follower's log end offset may keep falling behind the HW (determined by the leader's log end offset) and therefore
* will never be added to ISR.
*
* Returns true if the HW was incremented, and false otherwise.
* Note There is no need to acquire the leaderIsrUpdate lock here
* since all callers of this private API acquire that lock
*/
private def maybeIncrementLeaderHW(leaderReplica: Replica, curTime: Long = time.milliseconds): Boolean = {
val allLogEndOffsets = assignedReplicas.filter { replica =>
curTime - replica.lastCaughtUpTimeMs <= replicaManager.config.replicaLagTimeMaxMs || inSyncReplicas.contains(replica)
}.map(_.logEndOffset)
val newHighWatermark = allLogEndOffsets.min(new LogOffsetMetadata.OffsetOrdering)
val oldHighWatermark = leaderReplica.highWatermark
if (oldHighWatermark.messageOffset < newHighWatermark.messageOffset || oldHighWatermark.onOlderSegment(newHighWatermark)) {
leaderReplica.highWatermark = newHighWatermark
debug(s"High watermark updated to $newHighWatermark")
true
} else {
debug(s"Skipping update high watermark since new hw $newHighWatermark is not larger than old hw $oldHighWatermark." +
s"All LEOs are ${allLogEndOffsets.mkString(",")}")
false
}
}
/**
* The low watermark offset value, calculated only if the local replica is the partition leader
* It is only used by leader broker to decide when DeleteRecordsRequest is satisfied. Its value is minimum logStartOffset of all live replicas
* Low watermark will increase when the leader broker receives either FetchRequest or DeleteRecordsRequest.
*/
def lowWatermarkIfLeader: Long = {
if (!isLeaderReplicaLocal)
throw new NotLeaderForPartitionException("Leader not local for partition %s on broker %d".format(topicPartition, localBrokerId))
val logStartOffsets = allReplicas.collect {
case replica if replicaManager.metadataCache.isBrokerAlive(replica.brokerId) || replica.brokerId == Request.FutureLocalReplicaId => replica.logStartOffset
}
CoreUtils.min(logStartOffsets, 0L)
}
/**
* Try to complete any pending requests. This should be called without holding the leaderIsrUpdateLock.
*/
private def tryCompleteDelayedRequests() {
val requestKey = new TopicPartitionOperationKey(topicPartition)
replicaManager.tryCompleteDelayedFetch(requestKey)
replicaManager.tryCompleteDelayedProduce(requestKey)
replicaManager.tryCompleteDelayedDeleteRecords(requestKey)
}
def maybeShrinkIsr(replicaMaxLagTimeMs: Long) {
val leaderHWIncremented = inWriteLock(leaderIsrUpdateLock) {
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
val outOfSyncReplicas = getOutOfSyncReplicas(leaderReplica, replicaMaxLagTimeMs)
if(outOfSyncReplicas.nonEmpty) {
val newInSyncReplicas = inSyncReplicas -- outOfSyncReplicas
assert(newInSyncReplicas.nonEmpty)
info("Shrinking ISR from %s to %s".format(inSyncReplicas.map(_.brokerId).mkString(","),
newInSyncReplicas.map(_.brokerId).mkString(",")))
// update ISR in zk and in cache
updateIsr(newInSyncReplicas)
// we may need to increment high watermark since ISR could be down to 1
replicaManager.isrShrinkRate.mark()
maybeIncrementLeaderHW(leaderReplica)
} else {
false
}
case None => false // do nothing if no longer leader
}
}
// some delayed operations may be unblocked after HW changed
if (leaderHWIncremented)
tryCompleteDelayedRequests()
}
def getOutOfSyncReplicas(leaderReplica: Replica, maxLagMs: Long): Set[Replica] = {
/**
* there are two cases that will be handled here -
* 1. Stuck followers: If the leo of the replica hasn't been updated for maxLagMs ms,
* the follower is stuck and should be removed from the ISR
* 2. Slow followers: If the replica has not read up to the leo within the last maxLagMs ms,
* then the follower is lagging and should be removed from the ISR
* Both these cases are handled by checking the lastCaughtUpTimeMs which represents
* the last time when the replica was fully caught up. If either of the above conditions
* is violated, that replica is considered to be out of sync
*
**/
val candidateReplicas = inSyncReplicas - leaderReplica
val laggingReplicas = candidateReplicas.filter(r => (time.milliseconds - r.lastCaughtUpTimeMs) > maxLagMs)
if (laggingReplicas.nonEmpty)
debug("Lagging replicas are %s".format(laggingReplicas.map(_.brokerId).mkString(",")))
laggingReplicas
}
def appendRecordsToFutureReplica(records: MemoryRecords) {
getReplica(Request.FutureLocalReplicaId).get.log.get.appendAsFollower(records)
}
def appendRecordsToFollower(records: MemoryRecords) {
// The read lock is needed to prevent the follower replica from being updated while ReplicaAlterDirThread
// is executing maybeDeleteAndSwapFutureReplica() to replace follower replica with the future replica.
inReadLock(leaderIsrUpdateLock) {
getReplica().get.log.get.appendAsFollower(records)
}
}
def appendRecordsToLeader(records: MemoryRecords, isFromClient: Boolean, requiredAcks: Int = 0): LogAppendInfo = {
val (info, leaderHWIncremented) = inReadLock(leaderIsrUpdateLock) {
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
val log = leaderReplica.log.get
val minIsr = log.config.minInSyncReplicas
val inSyncSize = inSyncReplicas.size
// Avoid writing to leader if there are not enough insync replicas to make it safe
if (inSyncSize < minIsr && requiredAcks == -1) {
throw new NotEnoughReplicasException("Number of insync replicas for partition %s is [%d], below required minimum [%d]"
.format(topicPartition, inSyncSize, minIsr))
}
val info = log.appendAsLeader(records, leaderEpoch = this.leaderEpoch, isFromClient)
// probably unblock some follower fetch requests since log end offset has been updated
replicaManager.tryCompleteDelayedFetch(TopicPartitionOperationKey(this.topic, this.partitionId))
// we may need to increment high watermark since ISR could be down to 1
(info, maybeIncrementLeaderHW(leaderReplica))
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition %s on broker %d"
.format(topicPartition, localBrokerId))
}
}
// some delayed operations may be unblocked after HW changed
if (leaderHWIncremented)
tryCompleteDelayedRequests()
info
}
def logStartOffset: Long = {
inReadLock(leaderIsrUpdateLock) {
leaderReplicaIfLocal.map(_.log.get.logStartOffset).getOrElse(-1)
}
}
/**
* Update logStartOffset and low watermark if 1) offset <= highWatermark and 2) it is the leader replica.
* This function can trigger log segment deletion and log rolling.
*
* Return low watermark of the partition.
*/
def deleteRecordsOnLeader(offset: Long): Long = {
inReadLock(leaderIsrUpdateLock) {
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
if (!leaderReplica.log.get.config.delete)
throw new PolicyViolationException("Records of partition %s can not be deleted due to the configured policy".format(topicPartition))
leaderReplica.maybeIncrementLogStartOffset(offset)
lowWatermarkIfLeader
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition %s on broker %d"
.format(topicPartition, localBrokerId))
}
}
}
/**
* Truncate the local log of this partition to the specified offset and checkpoint the recovery point to this offset
*
* @param offset offset to be used for truncation
* @param isFuture True iff the truncation should be performed on the future log of this partition
*/
def truncateTo(offset: Long, isFuture: Boolean) {
// The read lock is needed to prevent the follower replica from being truncated while ReplicaAlterDirThread
// is executing maybeDeleteAndSwapFutureReplica() to replace follower replica with the future replica.
inReadLock(leaderIsrUpdateLock) {
logManager.truncateTo(Map(topicPartition -> offset), isFuture = isFuture)
}
}
/**
* Delete all data in the local log of this partition and start the log at the new offset
*
* @param newOffset The new offset to start the log with
* @param isFuture True iff the truncation should be performed on the future log of this partition
*/
def truncateFullyAndStartAt(newOffset: Long, isFuture: Boolean) {
// The read lock is needed to prevent the follower replica from being truncated while ReplicaAlterDirThread
// is executing maybeDeleteAndSwapFutureReplica() to replace follower replica with the future replica.
inReadLock(leaderIsrUpdateLock) {
logManager.truncateFullyAndStartAt(topicPartition, newOffset, isFuture = isFuture)
}
}
/**
* @param leaderEpoch Requested leader epoch
* @return The last offset of messages published under this leader epoch.
*/
def lastOffsetForLeaderEpoch(leaderEpoch: Int): EpochEndOffset = {
inReadLock(leaderIsrUpdateLock) {
leaderReplicaIfLocal match {
case Some(leaderReplica) =>
new EpochEndOffset(NONE, leaderReplica.epochs.get.endOffsetFor(leaderEpoch))
case None =>
new EpochEndOffset(NOT_LEADER_FOR_PARTITION, UNDEFINED_EPOCH_OFFSET)
}
}
}
private def updateIsr(newIsr: Set[Replica]) {
val newLeaderAndIsr = new LeaderAndIsr(localBrokerId, leaderEpoch, newIsr.map(_.brokerId).toList, zkVersion)
val (updateSucceeded,newVersion) = ReplicationUtils.updateLeaderAndIsr(zkClient, topic, partitionId,
newLeaderAndIsr, controllerEpoch, zkVersion)
if(updateSucceeded) {
replicaManager.recordIsrChange(topicPartition)
inSyncReplicas = newIsr
zkVersion = newVersion
trace("ISR updated to [%s] and zkVersion updated to [%d]".format(newIsr.mkString(","), zkVersion))
} else {
replicaManager.failedIsrUpdatesRate.mark()
info("Cached zkVersion [%d] not equal to that in zookeeper, skip updating ISR".format(zkVersion))
}
}
/**
* remove deleted log metrics
*/
def removePartitionMetrics() {
removeMetric("UnderReplicated", tags)
removeMetric("UnderMinIsr", tags)
removeMetric("InSyncReplicasCount", tags)
removeMetric("ReplicasCount", tags)
removeMetric("LastStableOffsetLag", tags)
}
override def equals(that: Any): Boolean = that match {
case other: Partition => partitionId == other.partitionId && topic == other.topic && isOffline == other.isOffline
case _ => false
}
override def hashCode: Int =
31 + topic.hashCode + 17 * partitionId + (if (isOffline) 1 else 0)
override def toString(): String = {
val partitionString = new StringBuilder
partitionString.append("Topic: " + topic)
partitionString.append("; Partition: " + partitionId)
partitionString.append("; Leader: " + leaderReplicaIdOpt)
partitionString.append("; AllReplicas: " + allReplicasMap.keys.mkString(","))
partitionString.append("; InSyncReplicas: " + inSyncReplicas.map(_.brokerId).mkString(","))
partitionString.toString
}
}
| themarkypantz/kafka | core/src/main/scala/kafka/cluster/Partition.scala | Scala | apache-2.0 | 32,015 |
package es.weso.wiFetcher.fetchers
import java.io.File
import java.io.FileInputStream
import java.io.InputStream
import scala.collection.mutable.ListBuffer
import org.apache.log4j.Logger
import es.weso.reconciliator.CountryReconciliator
import es.weso.wiFetcher.configuration.Configuration
import es.weso.wiFetcher.dao.entity.DatasetDAOImpl
import es.weso.wiFetcher.dao.file.CountryDAOImpl
import es.weso.wiFetcher.dao.poi.IndicatorDAOImpl
import es.weso.wiFetcher.dao.poi.SecondaryObservationDAOImpl
import es.weso.wiFetcher.dao.poi.ProviderDAOImpl
import es.weso.wiFetcher.dao.poi.RegionDAOImpl
import es.weso.wiFetcher.dao.poi.SubIndexDAOImpl
import es.weso.wiFetcher.entities.Country
import es.weso.wiFetcher.entities.Dataset
import es.weso.wiFetcher.entities.Indicator
import es.weso.wiFetcher.entities.issues._
import es.weso.wiFetcher.entities.Observation
import es.weso.wiFetcher.entities.ObservationStatus.ObservationStatus
import es.weso.wiFetcher.entities.Provider
import es.weso.wiFetcher.entities.Region
import es.weso.wiFetcher.entities.traits.Component
import es.weso.wiFetcher.entities.traits.SubIndex
import es.weso.wiFetcher.generator.ModelGenerator
import es.weso.wiFetcher.utils.IssueManagerUtils
import es.weso.wiFetcher.utils.FilterIssue
import es.weso.wiFetcher.dao.poi.PrimaryObservationDAOImpl
import es.weso.wiFetcher.generator.CSVGenerator
import es.weso.wiFetcher.entities.traits.Index
/**
* This class is the center of the application. Receive all requests of users
* and execute all operations until obtain the result
*/
case class SpreadsheetsFetcher(structure: File, raw: File) {
import SpreadsheetsFetcher._
private implicit val currentFetcher = this
val issueManager = new IssueManagerUtils()
val components: ListBuffer[Component] = ListBuffer.empty
val subIndexes: ListBuffer[SubIndex] = ListBuffer.empty
val primaryIndicators: ListBuffer[Indicator] = ListBuffer.empty
val secondaryIndicators: ListBuffer[Indicator] = ListBuffer.empty
val countries: ListBuffer[Country] = ListBuffer.empty
val regions: ListBuffer[Region] = ListBuffer.empty
val providers: ListBuffer[Provider] = ListBuffer.empty
val datasets: ListBuffer[Dataset] = ListBuffer.empty
val observations: ListBuffer[Observation] = ListBuffer.empty
val index : ListBuffer[Index] = ListBuffer.empty
//Load all structure information
loadStructure(structure)
if(!issueManager.asSeq.isEmpty)
issueManager.addWarn("There were problems parsing structure file, so ttl " +
"generated maybe is not complete.",
Some("Structure file"))
//Load all observations
loadObservations(raw)
def issues: Seq[Issue] = {
issueManager.addFilter(FilterIssue(col=Some(0),cell=Some("MEAN")))
issueManager.addFilter(FilterIssue(col=Some(0),cell=Some("Mean")))
issueManager.addFilter(FilterIssue(col=Some(0),cell=Some("SD")))
issueManager.addFilter(FilterIssue(col=Some(0),cell=Some("s.d.")))
issueManager.addFilter(FilterIssue(col=Some(0),cell=Some("OBSERVATIONS")))
issueManager.addFilter(FilterIssue(col=Some(0),cell=Some("MEAN OF COUNTRIES WITH 5 YEARS DATA")))
issueManager.addFilter(FilterIssue(cell=Some("STDEV")))
issueManager.addFilter(FilterIssue(cell=Some("Country Column")))
issueManager.addFilter(FilterIssue(cell=Some("Datasets_Average")))
issueManager.addFilter(FilterIssue(cell=Some("Questions")))
issueManager.addFilter(FilterIssue(cell=Some("Column")))
issueManager.filteredAsSeq
}
/**
* This method has to create a jena model with all information and stores it
* in a local file
*/
def storeAsTTL(baseUri: String, namespace: String, year : String, timestamp : Long) =
ModelGenerator(baseUri, namespace, year).generateJenaModel(this, timestamp)
/**
* This method load all structure about Web Index information
*/
private def loadStructure(f: File) {
safeLoadInformation(f, loadProviderInformation)
safeLoadInformation(f, loadSubIndexInformation)
safeLoadInformation(f, loadIndicatorInformation)
loadDatasetInformation(secondaryIndicators.toList)
loadCountryInformation(Configuration.getCountryFile, true)
safeLoadInformation(f, loadRegionInformation)
}
/**
* This method saves generated errors during the process in a csv file
*/
def saveReport(timestamp : Long) : (Seq[Issue], String) = {
val csvSchema = Array("Type", "Message", "Path", "sheetName", "Column", "Row", "Cell")
val csvGenerator = CSVGenerator(csvSchema)
val finalIssues = issues
finalIssues.foreach(issue => {
val typ = issue match {
case e: Error => "Error"
case e: Warn => "Warning"
}
val value = Array(typ, issue.message, issue.path.getOrElse(""), issue.sheetName.getOrElse(""), issue.col.getOrElse("").toString, issue.row.getOrElse("").toString, issue.cell.getOrElse(""))
csvGenerator.addValue(value)
})
val path = csvGenerator.save(timestamp)
(finalIssues, path)
}
/**
* This method loads all observation form an excel file
*/
private def loadObservations(f: File) {
safeLoadInformation(f, loadSecondaryObservationInformation)
safeLoadInformation(f, loadPrimaryObservationInformation)
}
/**
* This method loads all dataset information
*/
private def loadDatasetInformation(indicators: List[Indicator]) {
val datasetDao = new DatasetDAOImpl(indicators)
datasets ++= datasetDao.getDatasets
}
/**
* This method loads all primary observation information
*/
private def loadPrimaryObservationInformation(is : InputStream) {
val primaryObservationDao = new PrimaryObservationDAOImpl(is)
observations ++= primaryObservationDao.getObservations
}
/**
* This method loads all secondary observation information
*/
private def loadSecondaryObservationInformation(is: InputStream) {
val secondaryObservationDao = new SecondaryObservationDAOImpl(is)
observations ++= secondaryObservationDao.getObservations
}
/**
* This method loads the information that is contained in a file using the
* process that receive as a parameter
*/
private def safeLoadInformation(file: File, proccess: (InputStream) => Unit) {
val is = new FileInputStream(file)
try {
proccess(is)
} finally {
is.close
}
}
/**
* This method loads all information about subindexes and components
*/
private def loadSubIndexInformation(is: InputStream) {
val subIndexDao = new SubIndexDAOImpl(is)
components ++= subIndexDao.getComponents
subIndexes ++= subIndexDao.getSubIndexes
index ++= subIndexDao.getIndexes
}
/**
* This method loads all information about indicators
*/
private def loadIndicatorInformation(is: InputStream) {
val indicatorDao = new IndicatorDAOImpl(is)
primaryIndicators ++= indicatorDao.getPrimaryIndicators
secondaryIndicators ++= indicatorDao.getSecondaryIndicators
}
/**
* This method loads all information about countries
*/
private def loadCountryInformation(uri: String, relativePath: Boolean) {
val countryDao = new CountryDAOImpl(uri, relativePath)
countries ++= countryDao.getCountries
}
/**
* This method loads all information about regions
*/
private def loadRegionInformation(is: InputStream) {
val regionDao = new RegionDAOImpl(is)
regions ++= regionDao.getRegions
}
/**
* This method loads all information about providers
*/
private def loadProviderInformation(is: InputStream) {
val providerDao = new ProviderDAOImpl(is)
providers ++= providerDao.getProviders
}
/**
* This method obtains a country given a name. It uses countryReconciliator
* object to obtain the corresponding country. Return None if the country
* doesn't exist
*/
def obtainCountry(regionName: String): Option[Country] = {
logger.info("Obtaining country with name: " + regionName)
if (regionName == null || regionName.isEmpty) {
logger.error("The name of the country cannot be null o empty")
throw new IllegalArgumentException("The name of the country cannot " +
"be null o empty")
}
countryReconciliator.searchCountry(regionName) match {
case Some(name) => countries.find(c => c.name.equals(name))
case None => None
}
}
/**
* Obtains an indicator given it's id. Return None if the indicator
* doesn't exist
*/
def obtainIndicatorById(id: String): Option[Indicator] = {
val combined: ListBuffer[Indicator] = ListBuffer.empty
combined.insertAll(0, primaryIndicators)
combined.insertAll(0, secondaryIndicators)
combined.find(indicator => indicator.id.equals(id))
}
/**
* This method obtains a component given it's id. Return None if the indicator
* doesn't exist
*/
def obtainComponent(componentId: String, row : Int, col : Int): Option[Component] = {
if(componentId.isEmpty()) {
issueManager.addError("Component of a indicator cannot be empty",
Some("Structure file"), Some("Indicators"), Some(col), Some(row))
None
} else {
val result = components.find(component => component.id.equals(componentId))
if(!result.isDefined)
issueManager.addError("Not exist component " + componentId,
Some("Structure file"), Some("Indicators"), Some(col), Some(row))
result
}
}
/**
* This method obtains a provider given it's id. Return None if the provider
* doesn't exist
*/
def obtainProvider(providerId : String, row : Int, col : Int) : ListBuffer[Provider] = {
val providersLocal : ListBuffer[Provider] = ListBuffer.empty
if(providerId.isEmpty()) {
issueManager.addError("Provider of a indicator cannot be empty",
Some("Structure file"), Some("Indicators"), Some(col), Some(row))
} else {
val parts = providerId.split("/")
parts.foreach(pvr =>{
val result = providers.find(provider => provider.id.equals(pvr))
if(!result.isDefined) {
val prov = obtainProviderByName(pvr, row, col)
if(prov.isDefined)
providersLocal += prov.get
else
issueManager.addError("Not exist provider " + pvr,
Some("Structure file"), Some("Indicators"), Some(col), Some(row))
} else {
providersLocal += result.get
}
})
}
providersLocal
}
/**
* This method obtains a provider given it's name
*/
def obtainProviderByName(providerName : String, row : Int, col : Int) : Option[Provider] = {
val result = providers.find(provider => provider.name.equalsIgnoreCase(providerName.trim))
result
}
def getDatasets(): List[Dataset] = {
return datasets.toList
}
def getDatasetById(id: String): Dataset = {
datasets.filter(_.id == id).head
}
def getObservationsByStatus(status: ObservationStatus): List[Observation] = {
observations.filter(_.status == status).toList
}
}
object SpreadsheetsFetcher {
private val countryReconciliator =
new CountryReconciliator(Configuration.getCountryReconciliatorFile, true)
private val logger: Logger = Logger.getLogger(this.getClass())
} | weso/wiFetcher | app/es/weso/wiFetcher/fetchers/SpreadsheetsFetcher.scala | Scala | apache-2.0 | 11,128 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv.laws.discipline
import imp.imp
import kantan.codecs.laws.CodecValue.{IllegalValue, LegalValue}
import kantan.csv.{DecodeError, ParseError, ReadError}
import kantan.csv.laws.{Cell, IllegalCell, IllegalRow, LegalCell, LegalRow}
import org.scalacheck._, Arbitrary.{arbitrary => arb}
import org.scalacheck.rng.Seed
object arbitrary extends ArbitraryInstances
trait ArbitraryInstances extends kantan.codecs.laws.discipline.ArbitraryInstances {
val csv: Gen[List[List[String]]] = arb[List[List[Cell]]].map(_.map(_.map(_.value)))
implicit def arbTuple1[A: Arbitrary]: Arbitrary[Tuple1[A]] =
Arbitrary(imp[Arbitrary[A]].arbitrary.map(Tuple1.apply))
// - Errors ----------------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
val genOutOfBoundsError: Gen[DecodeError.OutOfBounds] = Gen.posNum[Int].map(DecodeError.OutOfBounds.apply)
val genTypeError: Gen[DecodeError.TypeError] = genException.map(DecodeError.TypeError.apply)
val genDecodeError: Gen[DecodeError] = Gen.oneOf(genOutOfBoundsError, genTypeError)
val genIOError: Gen[ParseError.IOError] = genIoException.map(ParseError.IOError.apply)
val genNoSuchElement: Gen[ParseError.NoSuchElement.type] = Gen.const(ParseError.NoSuchElement)
val genParseError: Gen[ParseError] = Gen.oneOf(genIOError, genNoSuchElement)
val genReadError: Gen[ReadError] = Gen.oneOf(genParseError, genDecodeError)
implicit val arbTypeError: Arbitrary[DecodeError.TypeError] = Arbitrary(genTypeError)
implicit val arbIOError: Arbitrary[ParseError.IOError] = Arbitrary(genIOError)
implicit val arbNoSuchElement: Arbitrary[ParseError.NoSuchElement.type] = Arbitrary(genNoSuchElement)
implicit val arbOutOfBounds: Arbitrary[DecodeError.OutOfBounds] = Arbitrary(genOutOfBoundsError)
implicit val arbDecodeError: Arbitrary[DecodeError] = Arbitrary(genDecodeError)
implicit val arbParseError: Arbitrary[ParseError] = Arbitrary(genParseError)
implicit val arbReadError: Arbitrary[ReadError] = Arbitrary(Gen.oneOf(genDecodeError, genParseError))
implicit val cogenCsvIOError: Cogen[ParseError.IOError] = Cogen[String].contramap(_.message)
implicit val cogenCsvNoSuchElement: Cogen[ParseError.NoSuchElement.type] = Cogen[Unit].contramap(_ => ())
implicit val cogenCsvParseError: Cogen[ParseError] = Cogen { (seed: Seed, err: ParseError) =>
err match {
case error: ParseError.NoSuchElement.type => cogenCsvNoSuchElement.perturb(seed, error)
case error: ParseError.IOError => cogenCsvIOError.perturb(seed, error)
}
}
implicit val cogenCsvOutOfBounds: Cogen[DecodeError.OutOfBounds] = Cogen[Int].contramap(_.index)
implicit val cogenCsvTypeError: Cogen[DecodeError.TypeError] = Cogen[String].contramap(_.message)
implicit val cogenCsvDecodeError: Cogen[DecodeError] = Cogen { (seed: Seed, err: DecodeError) =>
err match {
case error: DecodeError.OutOfBounds => cogenCsvOutOfBounds.perturb(seed, error)
case error: DecodeError.TypeError => cogenCsvTypeError.perturb(seed, error)
}
}
implicit val cogenCsvReadError: Cogen[ReadError] = Cogen { (seed: Seed, err: ReadError) =>
err match {
case error: DecodeError => cogenCsvDecodeError.perturb(seed, error)
case error: ParseError => cogenCsvParseError.perturb(seed, error)
}
}
// - Codec values ----------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
implicit def arbLegalRow[A](implicit arb: Arbitrary[LegalCell[A]]): Arbitrary[LegalRow[A]] = Arbitrary {
arb.arbitrary.map { c =>
LegalValue(Seq(c.encoded), c.decoded)
}
}
implicit def arbIllegalRow[A](implicit arb: Arbitrary[IllegalCell[A]]): Arbitrary[IllegalRow[A]] = Arbitrary {
arb.arbitrary.map { c =>
IllegalValue(Seq(c.encoded))
}
}
}
| nrinaudo/tabulate | laws/shared/src/main/scala/kantan/csv/laws/discipline/arbitrary.scala | Scala | mit | 4,872 |
package com.seanshubin.schulze
import com.seanshubin.black_box_test_5.Fixture
class RankedPreferencesFixture extends Fixture {
def run(input: Iterable[String]): Iterable[String] = {
val entries = input.map(Parser.parseCandidateAndRankEntry).toSeq
val candidateNames = entries.map(Parser.nameFromCandidateAndRank)
val rankingMap: Map[String, Int] = Map(entries: _*)
val preferenceStrengths = PreferenceStrengths.fromRanks(candidateNames, rankingMap)
Formatter.preferenceStrengthsToMultipleLineString(candidateNames, preferenceStrengths)
}
}
| SeanShubin/schulze | test/src/main/scala/com/seanshubin/schulze/RankedPreferencesFixture.scala | Scala | unlicense | 566 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.calcite.rel.core.JoinRelType
import org.apache.calcite.sql.fun.SqlStdOperatorTable.{EQUALS, LESS_THAN_OR_EQUAL}
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConversions._
class FlinkRelMdColumnOriginNullCountTest extends FlinkRelMdHandlerTestBase {
@Test
def testGetColumnOriginNullCountOnTableScan(): Unit = {
Array(studentLogicalScan, studentFlinkLogicalScan, studentBatchScan, studentStreamScan)
.foreach { scan =>
assertEquals(0.0, mq.getColumnOriginNullCount(scan, 0))
assertEquals(0.0, mq.getColumnOriginNullCount(scan, 1))
assertEquals(6.0, mq.getColumnOriginNullCount(scan, 2))
assertEquals(0.0, mq.getColumnOriginNullCount(scan, 3))
assertNull(mq.getColumnOriginNullCount(scan, 4))
assertEquals(0.0, mq.getColumnOriginNullCount(scan, 5))
assertNull(mq.getColumnOriginNullCount(scan, 6))
}
val ts = relBuilder.scan("MyTable3").build()
assertEquals(1.0, mq.getColumnOriginNullCount(ts, 0))
assertEquals(0.0, mq.getColumnOriginNullCount(ts, 1))
}
@Test
def testGetColumnOriginNullCountOnSnapshot(): Unit = {
(0 until flinkLogicalSnapshot.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.getColumnOriginNullCount(flinkLogicalSnapshot, idx))
}
}
@Test
def testGetColumnOriginNullCountOnProject(): Unit = {
assertEquals(0.0, mq.getColumnOriginNullCount(logicalProject, 0))
assertEquals(0.0, mq.getColumnOriginNullCount(logicalProject, 1))
assertNull(mq.getColumnOriginNullCount(logicalProject, 2))
assertNull(mq.getColumnOriginNullCount(logicalProject, 3))
assertNull(mq.getColumnOriginNullCount(logicalProject, 4))
assertNull(mq.getColumnOriginNullCount(logicalProject, 5))
assertNull(mq.getColumnOriginNullCount(logicalProject, 6))
assertNull(mq.getColumnOriginNullCount(logicalProject, 7))
assertEquals(0.0, mq.getColumnOriginNullCount(logicalProject, 8))
assertEquals(0.0, mq.getColumnOriginNullCount(logicalProject, 9))
assertEquals(0.0, mq.getColumnOriginNullCount(logicalProject, 10))
assertNull(mq.getColumnOriginNullCount(logicalProject, 11))
val ts = relBuilder.scan("MyTable3").build()
relBuilder.push(ts)
val projects = List(
relBuilder.call(EQUALS, relBuilder.field(0), relBuilder.literal(1)),
relBuilder.field(0),
relBuilder.field(1),
relBuilder.literal(true),
relBuilder.literal(null))
val project = relBuilder.project(projects).build()
assertEquals(null, mq.getColumnOriginNullCount(project, 0))
assertEquals(1.0, mq.getColumnOriginNullCount(project, 1))
assertEquals(0.0, mq.getColumnOriginNullCount(project, 2))
assertEquals(0.0, mq.getColumnOriginNullCount(project, 3))
assertEquals(1.0, mq.getColumnOriginNullCount(project, 4))
}
@Test
def testGetColumnOriginNullCountOnCalc(): Unit = {
// only filter
relBuilder.push(studentLogicalScan)
// id <= 2
val expr = relBuilder.call(LESS_THAN_OR_EQUAL, relBuilder.field(0), relBuilder.literal(2))
val calc1 = createLogicalCalc(
studentLogicalScan, studentLogicalScan.getRowType, relBuilder.fields(), List(expr))
(0 until calc1.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.getColumnOriginNullCount(calc1, idx))
}
val ts = relBuilder.scan("MyTable3").build()
relBuilder.push(ts)
val projects = List(
relBuilder.call(EQUALS, relBuilder.field(0), relBuilder.literal(1)),
relBuilder.field(0),
relBuilder.field(1),
relBuilder.literal(true),
relBuilder.literal(null))
val outputRowType = relBuilder.project(projects).build().getRowType
val calc2 = createLogicalCalc(ts, outputRowType, projects, List())
assertEquals(null, mq.getColumnOriginNullCount(calc2, 0))
assertEquals(1.0, mq.getColumnOriginNullCount(calc2, 1))
assertEquals(0.0, mq.getColumnOriginNullCount(calc2, 2))
assertEquals(0.0, mq.getColumnOriginNullCount(calc2, 3))
assertEquals(1.0, mq.getColumnOriginNullCount(calc2, 4))
}
@Test
def testGetColumnOriginNullCountOnJoin(): Unit = {
val innerJoin1 = relBuilder.scan("MyTable3").scan("MyTable4")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
assertEquals(1.0, mq.getColumnOriginNullCount(innerJoin1, 0))
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin1, 1))
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin1, 2))
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin1, 3))
val innerJoin2 = relBuilder.scan("MyTable3").scan("MyTable4")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin2, 0))
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin2, 1))
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin2, 2))
assertEquals(0.0, mq.getColumnOriginNullCount(innerJoin2, 3))
Array(logicalLeftJoinOnUniqueKeys, logicalRightJoinNotOnUniqueKeys,
logicalFullJoinWithEquiAndNonEquiCond, logicalSemiJoinNotOnUniqueKeys,
logicalSemiJoinWithEquiAndNonEquiCond).foreach { join =>
(0 until join.getRowType.getFieldCount).foreach { idx =>
assertNull(mq.getColumnOriginNullCount(join, idx))
}
}
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnOriginNullCountTest.scala | Scala | apache-2.0 | 6,276 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.vertx.scala.core.sockjs
import org.vertx.java.core.sockjs.{EventBusBridgeHook => JEventBusBridgeHook}
import org.vertx.scala.core.json.JsonObject
import org.vertx.scala.core._
import org.vertx.scala.core.FunctionConverters._
/**
* A hook that you can use to receive various events on the EventBusBridge.
*
* @author Galder Zamarreño
*/
final class EventBusBridgeHook private[scala] (val asJava: JEventBusBridgeHook) extends AnyVal {
/**
* Called when a new socket is created
* You can override this method to do things like check the origin header of a socket before
* accepting it
* @param sock The socket
* @return true to accept the socket, false to reject it
*/
def handleSocketCreated(sock: SockJSSocket): Boolean =
asJava.handleSocketCreated(sock.asJava)
/**
* The socket has been closed
* @param sock The socket
*/
def handleSocketClosed(sock: SockJSSocket): Unit =
asJava.handleSocketClosed(sock.asJava)
/**
* Client is sending or publishing on the socket
* @param sock The sock
* @param send if true it's a send else it's a publish
* @param msg The message
* @param address The address the message is being sent/published to
* @return true To allow the send/publish to occur, false otherwise
*/
def handleSendOrPub(sock: SockJSSocket, send: Boolean, msg: JsonObject, address: String): Boolean =
asJava.handleSendOrPub(sock.asJava, send, msg, address)
/**
* Called before client registers a handler
* @param sock The socket
* @param address The address
* @return true to let the registration occur, false otherwise
*/
def handlePreRegister(sock: SockJSSocket, address: String): Boolean =
asJava.handlePreRegister(sock.asJava, address)
/**
* Called after client registers a handler
* @param sock The socket
* @param address The address
*/
def handlePostRegister(sock: SockJSSocket, address: String): Unit =
asJava.handlePostRegister(sock.asJava, address)
/**
* Client is unregistering a handler
* @param sock The socket
* @param address The address
*/
def handleUnregister(sock: SockJSSocket, address: String): Boolean =
asJava.handleUnregister(sock.asJava, address)
/**
* Called before authorisation - you can override authorisation here if you don't want the default
* @param message The auth message
* @param sessionID The session ID
* @param handler Handler - call this when authorisation is complete
* @return true if you wish to override authorisation
*/
def handleAuthorise(message: JsonObject, sessionID: String, handler: AsyncResult[Boolean] => Unit): Boolean =
asJava.handleAuthorise(message, sessionID, asyncResultConverter((x: java.lang.Boolean) => x.booleanValue)(handler))
}
object EventBusBridgeHook {
def apply(internal: JEventBusBridgeHook) = new EventBusBridgeHook(internal)
} | vert-x/mod-lang-scala | src/main/scala/org/vertx/scala/core/sockjs/EventBusBridgeHook.scala | Scala | apache-2.0 | 3,507 |
import sbt._
object Dependencies {
val http4s = Seq(
"org.http4s" %% "http4s-circe" % "0.18.0-M1",
"org.http4s" %% "http4s-client" % "0.18.0-M1"
)
val diffson = Seq(
"org.gnieh" %% "diffson-circe" % "2.2.2"
)
val circe = Seq(
"io.circe" %% "circe-literal" % "0.9.0-M1",
"io.circe" %% "circe-generic" % "0.9.0-M1"
)
val fs2 = Seq(
"co.fs2" %% "fs2-core" % "0.10.0-M6"
)
val scalatest = Seq(
"org.scalatest" %% "scalatest" % "3.0.1" % "test"
)
val testBlazeHttp = Seq(
"org.http4s" %% "http4s-blaze-client" % "0.18.0-M1" % "test"
)
val testLogging = Seq(
"ch.qos.logback" % "logback-classic" % "1.2.3" % "test"
)
}
| solidninja/openshift-scala-api | project/Dependencies.scala | Scala | mit | 684 |
package models.services
import models.gift.{Comment, Gift}
import scala.concurrent.Future
import scala.language.postfixOps
trait EventNotificationService {
def publishComment(eventId: Long, comment: Comment): Future[Unit]
def publishGift(gift: Gift): Future[Unit]
}
| epot/Gifter | app/models/services/EventNotificationService.scala | Scala | mit | 286 |
package com.twitter.finagle.stats
import com.twitter.conversions.time._
import com.twitter.finagle.builder.{ClientBuilder, ServerBuilder}
import com.twitter.finagle.integration.StringCodec
import com.twitter.finagle.Service
import com.twitter.util.{Await, Future, Promise}
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import org.junit.runner.RunWith
import org.mockito.Mockito._
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import scala.collection.mutable.ArrayBuffer
@RunWith(classOf[JUnitRunner])
class StatsReceiverTest extends FunSuite {
test("RollupStatsReceiver counter/stats") {
val mem = new InMemoryStatsReceiver
val receiver = new RollupStatsReceiver(mem)
receiver.counter("toto", "titi", "tata").incr()
assert(mem.counters(Seq("toto")) == 1)
assert(mem.counters(Seq("toto", "titi")) == 1)
assert(mem.counters(Seq("toto", "titi", "tata")) == 1)
receiver.counter("toto", "titi", "tutu").incr()
assert(mem.counters(Seq("toto")) == 2)
assert(mem.counters(Seq("toto", "titi")) == 2)
assert(mem.counters(Seq("toto", "titi", "tata")) == 1)
assert(mem.counters(Seq("toto", "titi", "tutu")) == 1)
}
test("Broadcast Counter/Stat") {
class MemCounter extends Counter {
var c = 0
def incr(delta: Int) { c += delta }
}
val c1 = new MemCounter
val c2 = new MemCounter
val broadcastCounter = BroadcastCounter(Seq(c1, c2))
assert(c1.c == 0)
assert(c2.c == 0)
broadcastCounter.incr()
assert(c1.c == 1)
assert(c2.c == 1)
class MemStat extends Stat {
var values: Seq[Float] = ArrayBuffer.empty[Float]
def add(f: Float) { values = values :+ f }
}
val s1 = new MemStat
val s2 = new MemStat
val broadcastStat = BroadcastStat(Seq(s1, s2))
assert(s1.values === Seq.empty)
assert(s2.values === Seq.empty)
broadcastStat.add(1F)
assert(s1.values === Seq(1F))
assert(s2.values === Seq(1F))
}
test("StatsReceiver time") {
val receiver = spy(new InMemoryStatsReceiver)
receiver.time("er", "mah", "gerd") { () }
verify(receiver, times(1)).stat("er", "mah", "gerd")
receiver.time(TimeUnit.NANOSECONDS, "er", "mah", "gerd") { () }
verify(receiver, times(2)).stat("er", "mah", "gerd")
val stat = receiver.stat("er", "mah", "gerd")
verify(receiver, times(3)).stat("er", "mah", "gerd")
receiver.time(TimeUnit.DAYS, stat) { () }
verify(receiver, times(3)).stat("er", "mah", "gerd")
}
test("StatsReceiver timeFuture") {
val receiver = spy(new InMemoryStatsReceiver)
Await.ready((receiver.timeFuture("2", "chainz") { Future.Unit }), 1.second)
verify(receiver, times(1)).stat("2", "chainz")
Await.ready((receiver.timeFuture(TimeUnit.MINUTES, "2", "chainz") { Future.Unit }), 1.second)
verify(receiver, times(2)).stat("2", "chainz")
val stat = receiver.stat("2", "chainz")
verify(receiver, times(3)).stat("2", "chainz")
Await.result((receiver.timeFuture(TimeUnit.HOURS, stat) { Future.Unit }), 1.second)
verify(receiver, times(3)).stat("2", "chainz")
}
test("Scoped equality") {
val sr = new InMemoryStatsReceiver
assert(sr == sr)
assert(sr.scope("foo") != sr.scope("bar"))
}
test("Scoped forwarding to NullStatsReceiver") {
assert(NullStatsReceiver.scope("foo").scope("bar").isNull)
}
test("Forwarding to LoadedStatsReceiver") {
val prev = LoadedStatsReceiver.self
LoadedStatsReceiver.self = NullStatsReceiver
val dsr = DefaultStatsReceiver // StatsReceiverProxy
val csr = ClientStatsReceiver // NameTranslatingStatsReceiver
val ssr = ServerStatsReceiver // NameTranslatingStatsReceiver
try {
assert(dsr.isNull, "DefaultStatsReceiver should be null")
assert(csr.isNull, "ClientStatsReceiver should be null")
assert(ssr.isNull, "ServerStatsReceiver should be null")
val mem = new InMemoryStatsReceiver
LoadedStatsReceiver.self = mem
assert(!dsr.isNull, "DefaultStatsReceiver should not be null")
assert(!csr.isNull, "ClientStatsReceiver should not be null")
assert(!ssr.isNull, "ServerStatsReceiver should not be null")
dsr.counter("req").incr()
csr.counter("req").incr()
ssr.counter("req").incr()
assert(mem.counters(Seq("req")) == 1)
assert(mem.counters(Seq("clnt", "req")) == 1)
assert(mem.counters(Seq("srv", "req")) == 1)
} finally {
LoadedStatsReceiver.self = prev
}
}
test("rollup statsReceiver work in action") {
val never = new Service[String, String] {
def apply(request: String) = new Promise[String]
}
val address = new InetSocketAddress(0)
val server = ServerBuilder()
.codec(StringCodec)
.bindTo(address)
.name("FinagleServer")
.build(never)
val mem = new InMemoryStatsReceiver
val client = ClientBuilder()
.name("client")
.hosts(server.localAddress)
.codec(StringCodec)
.requestTimeout(10.millisecond)
.hostConnectionLimit(1)
.hostConnectionMaxWaiters(1)
.reportTo(mem)
.build()
// generate com.twitter.finagle.IndividualRequestTimeoutException
Await.ready(client("hi"))
Await.ready(server.close())
// generate com.twitter.finagle.WriteException$$anon$1
Await.ready(client("hi"))
val aggregatedFailures = mem.counters(Seq("client", "failures"))
val otherFailuresSum = {
val failures = mem.counters filter { case (names, _) =>
names.startsWith(Seq("client", "failures"))
}
failures.values.sum - aggregatedFailures
}
assert(aggregatedFailures == otherFailuresSum)
assert(aggregatedFailures == 2)
}
}
| firebase/finagle | finagle-core/src/test/scala/com/twitter/finagle/stats/StatsReceiverTest.scala | Scala | apache-2.0 | 5,725 |
package dotty.tools.dotc
package typer
import dotty.tools.dotc.ast.{ Trees, tpd }
import core._
import Types._, Contexts._, Flags._, Symbols._, Annotations._, Trees._, NameOps._
import Decorators._
import Variances._
import config.Printers.variances
/** Provides `check` method to check that all top-level definitions
* in tree are variance correct. Does not recurse inside methods.
* The method should be invoked once for each Template.
*/
object VarianceChecker {
private case class VarianceError(tvar: Symbol, required: Variance)
def check(tree: tpd.Tree)(implicit ctx: Context) =
new VarianceChecker()(ctx).Traverser.traverse(tree)
}
class VarianceChecker()(implicit ctx: Context) {
import VarianceChecker._
import tpd._
private object Validator extends TypeAccumulator[Option[VarianceError]] {
private var base: Symbol = _
/** Is no variance checking needed within definition of `base`? */
def ignoreVarianceIn(base: Symbol): Boolean = (
base.isTerm
|| base.is(Package)
|| base.is(Local)
)
/** The variance of a symbol occurrence of `tvar` seen at the level of the definition of `base`.
* The search proceeds from `base` to the owner of `tvar`.
* Initially the state is covariant, but it might change along the search.
*/
def relativeVariance(tvar: Symbol, base: Symbol, v: Variance = Covariant): Variance = /*ctx.traceIndented(i"relative variance of $tvar wrt $base, so far: $v")*/ {
if (base == tvar.owner) v
else if ((base is Param) && base.owner.isTerm)
relativeVariance(tvar, paramOuter(base.owner), flip(v))
else if (ignoreVarianceIn(base.owner)) Bivariant
else if (base.isAliasType) relativeVariance(tvar, base.owner, Invariant)
else relativeVariance(tvar, base.owner, v)
}
/** The next level to take into account when determining the
* relative variance with a method parameter as base. The method
* is always skipped. If the method is a constructor, we also skip
* its class owner, because constructors are not checked for variance
* relative to the type parameters of their own class. On the other
* hand constructors do count for checking the variance of type parameters
* of enclosing classes. I believe the Scala 2 rules are too lenient in
* that respect.
*/
private def paramOuter(meth: Symbol) =
if (meth.isConstructor) meth.owner.owner else meth.owner
/** Check variance of abstract type `tvar` when referred from `base`. */
private def checkVarianceOfSymbol(tvar: Symbol): Option[VarianceError] = {
val relative = relativeVariance(tvar, base)
if (relative == Bivariant) None
else {
val required = compose(relative, this.variance)
def tvar_s = s"$tvar (${varianceString(tvar.flags)} ${tvar.showLocated})"
def base_s = s"$base in ${base.owner}" + (if (base.owner.isClass) "" else " in " + base.owner.enclosingClass)
ctx.log(s"verifying $tvar_s is ${varianceString(required)} at $base_s")
ctx.log(s"relative variance: ${varianceString(relative)}")
ctx.log(s"current variance: ${this.variance}")
ctx.log(s"owner chain: ${base.ownersIterator.toList}")
if (tvar is required) None
else Some(VarianceError(tvar, required))
}
}
/** For PolyTypes, type parameters are skipped because they are defined
* explicitly (their TypeDefs will be passed here.) For MethodTypes, the
* same is true of the parameters (ValDefs).
*/
def apply(status: Option[VarianceError], tp: Type): Option[VarianceError] = ctx.traceIndented(s"variance checking $tp of $base at $variance", variances) {
if (status.isDefined) status
else tp match {
case tp: TypeRef =>
val sym = tp.symbol
if (sym.variance != 0 && base.isContainedIn(sym.owner)) checkVarianceOfSymbol(sym)
else if (sym.isAliasType) this(status, sym.info)
else foldOver(status, tp)
case tp: MethodType =>
this(status, tp.resultType) // params will be checked in their TypeDef nodes.
case tp: PolyType =>
this(status, tp.resultType) // params will be checked in their ValDef nodes.
case AnnotatedType(annot, _) if annot.symbol == defn.UncheckedVarianceAnnot =>
status
//case tp: ClassInfo =>
// ??? not clear what to do here yet. presumably, it's all checked at local typedefs
case _ =>
foldOver(status, tp)
}
}
def validateDefinition(base: Symbol): Option[VarianceError] = {
val saved = this.base
this.base = base
try apply(None, base.info)
finally this.base = saved
}
}
private object Traverser extends TreeTraverser {
def checkVariance(sym: Symbol) = Validator.validateDefinition(sym) match {
case Some(VarianceError(tvar, required)) =>
ctx.error(
i"${varianceString(tvar.flags)} $tvar occurs in ${varianceString(required)} position in type ${sym.info} of $sym",
sym.pos)
case None =>
}
override def traverse(tree: Tree)(implicit ctx: Context) = {
def sym = tree.symbol
// No variance check for private/protected[this] methods/values.
def skip = !sym.exists || sym.is(Local)
tree match {
case defn: MemberDef if skip =>
ctx.debuglog(s"Skipping variance check of ${sym.showDcl}")
case tree: TypeDef =>
checkVariance(sym)
traverseChildren(tree)
case tree: ValDef =>
checkVariance(sym)
case DefDef(_, tparams, vparamss, _, _) =>
checkVariance(sym)
tparams foreach traverse
vparamss foreach (_ foreach traverse)
case Template(_, _, _, body) =>
traverseChildren(tree)
case _ =>
}
}
}
}
| folone/dotty | src/dotty/tools/dotc/typer/VarianceChecker.scala | Scala | bsd-3-clause | 5,872 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary.api
import com.eevolution.context.dictionary.domain.model.LabelPrinterFunction
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 10/11/17.
*/
/**
* Label Printer Function Service
*/
trait LabelPrinterFunctionService extends api.Service[LabelPrinterFunction, Int] {
//Definition
}
| adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/LabelPrinterFunctionService.scala | Scala | gpl-3.0 | 1,263 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.intg
import org.ensime.api._
import org.ensime.core.RefactoringHandlerTestUtils
import org.ensime.fixture._
import org.ensime.util.EnsimeSpec
import org.ensime.util.ensimefile.Implicits.DefaultCharset
import org.ensime.util.file._
class ReverseLookupsSpec extends EnsimeSpec
with IsolatedProjectFixture
with IsolatedEnsimeConfigFixture
with IsolatedTestKitFixture
with RefactoringHandlerTestUtils {
override def original: EnsimeConfig = EnsimeConfigFixture.SimpleTestProject
"FindUsages" should "find usages using reverse lookups info" in
withEnsimeConfig { implicit config =>
withTestKit { implicit testKit =>
withProject { (project, asyncHelper) =>
import testKit._
val sourceRoot = scalaMain(config)
val fooFile = sourceRoot / "org/example/Foo.scala"
// uses of `testMethod`
project ! UsesOfSymbolAtPointReq(Left(fooFile), 119)
val uses = expectMsgType[ERangePositions]
uses.positions.map(usage => (s"${File(usage.file).getName}", usage.offset, usage.start, usage.end)) should contain theSameElementsAs List(
("Foo.scala", 114, 110, 172),
("Foo.scala", 273, 269, 283),
("package.scala", 94, 80, 104)
)
}
}
}
"Refactor Rename" should "make use of reverse lookups information" in
withEnsimeConfig { implicit config =>
withTestKit { implicit testKit =>
withProject { (project, asyncHelper) =>
import testKit._
val sourceRoot = scalaMain(config)
val fooFile = sourceRoot / "org/example/Foo.scala"
val packageFile = sourceRoot / "org/example/package.scala"
project ! RefactorReq(1234, RenameRefactorDesc("notATestMethod", fooFile, 119, 119), interactive = false)
expectMsgPF() {
case response @ RefactorDiffEffect(1234, RefactorType.Rename, diff) =>
val relevantExpectedPartFoo =
s"""|@@ -9,3 +9,3 @@
| class Foo extends Bar {
|- def testMethod(i: Int, s: String) = {
|+ def notATestMethod(i: Int, s: String) = {
| i + s.length
|@@ -16,3 +16,3 @@
| println("Hello, " + foo.x)
|- println(foo.testMethod(7, "seven"))
|+ println(foo.notATestMethod(7, "seven"))
|
|""".stripMargin
val relevantExpectedPartPackage =
s"""|@@ -6,3 +6,3 @@
|
|- new Foo.Foo().testMethod(1, "")
|+ new Foo.Foo().notATestMethod(1, "")
| }
|""".stripMargin
val diffContents = diff.canon.readString()
val expectedContentsFoo = expectedDiffContent(fooFile.getPath, relevantExpectedPartFoo)
val expectedContentsPackage = expectedDiffContent(packageFile.getPath, relevantExpectedPartPackage)
val expectedContents = s"$expectedContentsFoo\n$expectedContentsPackage"
if (diffContents == expectedContents) true
else fail(s"Different diff content than expected. \n Actual content: '$diffContents' \n ExpectedRelevantContent: '$expectedContents'")
}
}
}
}
}
| hzenginx/ensime-server | core/src/it/scala/org/ensime/intg/ReverseLookupsSpec.scala | Scala | gpl-3.0 | 3,534 |
package breeze
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import generic.{URFunc, UReduceable, UFunc}
import linalg.operators.{OpSub, BinaryOp}
import linalg.{NumericOps, QuasiTensor, Tensor}
import scala.math._
import scala.{math=>m}
import org.apache.commons.math3.special.{Gamma => G, Erf}
/**
* Provides some functions left out of java.lang.math.
*
* @author dlwh, afwlehmann
*/
package object numerics extends UniversalFuncs {
val inf, Inf = Double.PositiveInfinity
val nan, NaN = Double.NaN
/**
* Evaluates the log of the generalized beta function.
* \\sum_a lgamma(c(a))- lgamma(c.sum)
*/
val lbeta:URFunc[Double, Double] = new URFunc[Double, Double] {
def apply(cc: TraversableOnce[Double]): Double = {
var sum = 0.0
var lgSum = 0.0
for(v <- cc) {
sum += v
lgSum += lgamma(v)
}
lgSum - lgamma(sum)
}
override def apply(arr: Array[Double], offset: Int, stride: Int, length: Int, isUsed: (Int) => Boolean): Double = {
var off = offset
var sum = 0.0
var lgSum = 0.0
var i = 0
while(i < length) {
if(isUsed(off)) {
sum += arr(off)
lgSum += lgamma(arr(off))
}
i += 1
off += stride
}
lgSum - lgamma(sum)
}
}
/**
* Computes the log of the gamma function.
*
* @return an approximation of the log of the Gamma function of x.
*/
val lgamma:UFunc[Double, Double] = UFunc(G.logGamma _)
/**
* The derivative of the log gamma function
*/
val digamma = UFunc(G.digamma _)
/**
* The second derivative of the log gamma function
*/
val trigamma = UFunc(G.trigamma _)
/**
* An approximation to the error function
*/
val erf = UFunc{ Erf.erf _ }
/**
* An approximation to the complementary error function: erfc(x) = 1 - erfc(x)
*/
val erfc = UFunc{ Erf.erfc _ }
/**
* The imaginary error function for real argument x.
*
* Adapted from http://www.mathworks.com/matlabcentral/newsreader/view_thread/24120
* verified against mathematica
*
* @return
*/
val erfi:UFunc[Double, Double] = new UFunc[Double, Double]{
def apply(x:Double):Double = {
if(x < 0) -apply(-x)
else { // taylor expansion
var y = x
val x2 = x * x
var xx = x
var f = 1.0
var n = 0
while (n < 100) {
n += 1
f /= n
xx *= x2
val del = f * xx/(2*n+1)
if(del < 1E-8) n = 101
y += del
}
y = y*2/m.sqrt(Pi)
y
}
}
}
/**
* Inverse erf
*/
val erfinv = UFunc{ Erf.erfInv _ }
/**
* Inverse erfc
*/
val erfcinv = UFunc{ Erf.erfcInv _ }
/**
* regularized incomplete gamma function \\int_0x \\exp(-t)pow(t,a-1) dt / Gamma(a)
* @param a
* @param x
* @see http://commons.apache.org/proper/commons-math/apidocs/org/apache/commons/math3/special/Gamma.html#regularizedGammaP(double, double)
*/
def gammp(a: Double, x: Double) = G.regularizedGammaP(a, x)
/**
* log Incomplete gamma function = \\log \\int_0x \\exp(-t)pow(t,a-1) dt
*
* Based on NR
*/
def lgamma(a: Double, x: Double) = _lgamma(a, x)
/**
* log Incomplete gamma function = \\log \\int_0x \\exp(-t)pow(t,a-1) dt
* May store lgamma(a) in lgam(0) if it's non-null and needs to be computed.
* Based on NR
*/
private def _lgamma(a: Double, x:Double, lgam: Array[Double] = null):Double = {
if (x < 0.0 || a <= 0.0) throw new IllegalArgumentException()
else if(x == 0) 0.0
else if (x < a + 1.0) {
var ap = a
var del, sum = 1.0/a
var n = 0
var result = Double.NaN
while(n < 100) {
ap += 1
del *= x/ap
sum += del
if (scala.math.abs(del) < scala.math.abs(sum)*1E-7) {
result = -x+a*m.log(x) + m.log(sum)
n = 100
}
n += 1
}
if(lgam != null) lgam(0) = Double.NaN
if(result.isNaN) throw new ArithmeticException("Convergence failed")
else result
} else {
val gln = lgamma(a)
var b = x+1.0-a
var c = 1.0/1.0e-30
var d = 1.0/b
var h = d
var n = 0
while(n < 100) {
n += 1
val an = -n*(n-a)
b += 2.0
d = an*d+b
if (scala.math.abs(d) < 1E-30) d = 1E-30
c = b+an/c
if (scala.math.abs(c) < 1E-30) c = 1E-30
d = 1.0/d
val del = d*c
h *= del
if (scala.math.abs(del-1.0) < 1E-7) n = 101
}
if(lgam != null) lgam(0) = gln
if (n == 100) throw new ArithmeticException("Convergence failed")
else logDiff(gln, -x+a*log(x) + m.log(h))
}
}
/**
* Sums together things in log space.
* @return log(exp(a) + exp(b))
*/
def logSum(a: Double, b: Double) = {
if (a.isNegInfinity) b
else if (b.isNegInfinity) a
else if (a < b) b + scala.math.log1p(exp(a - b))
else a + scala.math.log1p(exp(b - a))
}
/**
* Sums together things in log space.
* @return log(\\sum exp(a_i))
*/
def logSum(a: Double, b: Double, c: Double*): Double = {
if (c.length == 0)
logSum(a, b)
else
logSum(logSum(a, b) +: c)
}
/**
* Sums together things in log space.
* @return log(\\sum exp(a_i))
*/
def logSum(iter: Iterator[Double], max: Double): Double = {
require(iter.hasNext)
if (max.isInfinite) {
max
} else {
val aux = (0.0 /: iter) {
(acc, x) => if (x.isNegInfinity) acc else acc + exp(x-max)
}
if (aux != 0)
max + scala.math.log(aux)
else
max
}
}
/**
* Sums together things in log space.
* @return log(\\sum exp(a_i))
*/
def logSum(a: Seq[Double]): Double = {
a.length match {
case 0 => Double.NegativeInfinity
case 1 => a(0)
case 2 => logSum(a(0), a(1))
case _ => logSum(a.iterator, a reduceLeft (_ max _))
}
}
/**
* Sums together the first length elements in log space.
* The length parameter is used to make things faster.
*
* This method needs to be fast. Don't scala-ify it.
* @return log(\\sum^length exp(a_i))
*/
def logSum(a: Array[Double], length: Int):Double = {
length match {
case 0 => Double.NegativeInfinity
case 1 => a(0)
case 2 => logSum(a(0),a(1))
case _ =>
val m = max(a, length)
if(m.isInfinite) m
else {
var i = 0
var accum = 0.0
while(i < length) {
accum += scala.math.exp(a(i) - m)
i += 1
}
m + scala.math.log(accum)
}
}
}
/** fast versions of max. Useful for the fast logsum. */
def max(a: Array[Double], length: Int) = {
var i = 1
var max = a(0)
while(i < length) {
if(a(i) > max) max = a(i)
i += 1
}
max
}
/**
* The sigmoid function: 1/(1 + exp(-x))
*
*
*/
def sigmoid = UFunc { (x:Double) => 1/(1+scala.math.exp(-x)) }
/**
* Takes the difference of two doubles in log space. Requires a > b.
* Note that this only works if a and b are close in value. For a >> b,
* this will almost certainly do nothing. (exp(30) - exp(1) \\approx exp(30))
*
* @return log(exp(a) - exp(b))
*/
def logDiff(a: Double, b: Double): Double = {
require(a >= b)
if (a > b) a + log(1.0 - exp(b-a))
else Double.NegativeInfinity
}
/**
* Computes the polynomial P(x) with coefficients given in the passed in array.
* coefs(i) is the coef for the x_i term.
*/
def polyval(coefs: Array[Double], x: Double) = {
var i = coefs.length-1
var p = coefs(i)
while (i>0) {
i -= 1
p = p*x + coefs(i)
}
p
}
/**
* closeTo for Doubles.
*/
def closeTo(a: Double, b: Double, relDiff: Double = 1E-4) = {
a == b || (scala.math.abs(a-b) < scala.math.max(scala.math.max(scala.math.abs(a),scala.math.abs(b)) ,1) * relDiff)
}
/**
* The indicator function. 1.0 iff b, else 0.0
*/
val I: UFunc[Boolean, Double] = new UFunc[Boolean, Double] {
def apply(b: Boolean) = if (b) 1.0 else 0.0
}
/**
* The indicator function in log space: 0.0 iff b else Double.NegativeInfinity
*/
val logI: UFunc[Boolean, Double] = new UFunc[Boolean, Double] {
def apply(b: Boolean) = if(b) 0.0 else Double.NegativeInfinity
}
}
trait UniversalFuncs {
import scala.{math=>m}
// TODO: these probably need to be manually specced out because boxing hurts so much
val exp = UFunc(m.exp _)
val log = UFunc(m.log _)
val log1p = UFunc(m.log1p _)
val sqrt = UFunc(m.sqrt _)
val sin = UFunc(m.sin _)
val cos = UFunc(m.cos _)
val tan = UFunc(m.tan _)
val asin = UFunc(m.asin _)
val acos = UFunc(m.acos _)
val atan = UFunc(m.atan _)
val toDegrees = UFunc(m.toDegrees _)
val toRadians = UFunc(m.toRadians _)
val floor = UFunc(m.floor _)
val ceil = UFunc(m.ceil _)
val round = UFunc(m.round _)
val rint = UFunc(m.rint _)
val signum = UFunc(m.signum(_:Double))
val abs = UFunc(m.abs(_:Double))
} | ktakagaki/breeze | src/main/scala/breeze/numerics/package.scala | Scala | apache-2.0 | 9,583 |
package scutil.lang
import scala.collection.Factory
import scutil.lang.tc._
object Validated {
def valid[E,T](value:T):Validated[E,T] = Valid(value)
def invalid[E,T](problems:E):Validated[E,T] = Invalid(problems)
def invalidNes[E,T](problems:E):Validated[Nes[E],T] = Invalid(Nes one problems)
//------------------------------------------------------------------------------
def switch[E,T](ok:Boolean, problems: =>E, value: =>T):Validated[E,T] =
if (ok) valid(value)
else invalid(problems)
implicit final class MergeableValidated[T](peer:Validated[T,T]) {
def merge:T =
peer match {
case Invalid(x) => x
case Valid(x) => x
}
}
//------------------------------------------------------------------------------
//## typeclass instances
implicit def ValidatedApplicative[S:Semigroup]:Applicative[Validated[S,_]] =
new Applicative[Validated[S,_]] {
override def pure[A](it:A):Validated[S,A] = Validated valid it
override def ap[A,B](func:Validated[S,A=>B])(it:Validated[S,A]):Validated[S,B] = func ap it
}
implicit def ValidatedSemigroup[S:Semigroup,T]:Semigroup[Validated[S,T]] =
Semigroup instance (_ or _)
//------------------------------------------------------------------------------
final case class Invalid[E](problems:E) extends Validated[E,Nothing]
final case class Valid[T](value:T) extends Validated[Nothing,T]
}
sealed trait Validated[+E,+T] {
def cata[X](invalid:E=>X, valid:T=>X):X =
this match {
case Validated.Invalid(x) => invalid(x)
case Validated.Valid(x) => valid(x)
}
//------------------------------------------------------------------------------
def isValid:Boolean =
this match {
case Validated.Invalid(x) => false
case Validated.Valid(x) => true
}
def isInvalid:Boolean =
!isValid
//------------------------------------------------------------------------------
def exists(pred:Predicate[T]):Boolean =
this match {
case Validated.Invalid(x) => false
case Validated.Valid(x) => pred(x)
}
def forall(pred:Predicate[T]):Boolean =
this match {
case Validated.Invalid(x) => true
case Validated.Valid(x) => pred(x)
}
//------------------------------------------------------------------------------
def iterator:Iterator[T] =
this match {
case Validated.Invalid(x) => Iterator.empty
case Validated.Valid(x) => Iterator single x
}
def foreach(effect:Effect[T]):Unit =
this match {
case Validated.Invalid(x) => ()
case Validated.Valid(x) => effect(x)
}
def map[U](func:T=>U):Validated[E,U] =
this match {
case Validated.Invalid(x) => Validated.invalid(x)
case Validated.Valid(x) => Validated.valid(func(x))
}
def flatMap[EE>:E,U](func:T=>Validated[EE,U]):Validated[EE,U] =
this match {
case Validated.Invalid(x) => Validated.invalid(x)
case Validated.Valid(x) => func(x)
}
def flatten[EE>:E,U](implicit ev: T <:< Validated[EE,U]):Validated[EE,U] =
flatMap(ev)
def ap[EE>:E:Semigroup,U,V](that:Validated[EE,U])(implicit ev: T <:< (U=>V)):Validated[EE,V] =
(this map2 that)(_(_))
def product[EE>:E:Semigroup,U](that:Validated[EE,U]):Validated[EE,(T,U)] =
(this map2 that)((_,_))
def map2[EE>:E,U,V](that:Validated[EE,U])(func:(T,U)=>V)(implicit cc:Semigroup[EE]):Validated[EE,V] =
(this, that) match {
case (Validated.Invalid(a), Validated.Valid(_)) => Validated.Invalid(a)
case (Validated.Valid(_), Validated.Invalid(b)) => Validated.Invalid(b)
case (Validated.Invalid(a), Validated.Invalid(b)) => Validated.Invalid(cc.combine(a, b))
case (Validated.Valid(a), Validated.Valid(b)) => Validated.Valid(func(a, b))
}
/** handy replacement for tried.toSeq.flatten abusing Factory as a Zero typeclass */
def flattenMany[U,CC[_]](implicit ev: T <:< CC[U], factory:Factory[U,CC[U]]):CC[U] =
// toOption.flattenMany
this map ev match {
case Validated.Invalid(_) => factory.newBuilder.result()
case Validated.Valid(cc) => cc
}
//------------------------------------------------------------------------------
def swap:Validated[T,E] =
this match {
case Validated.Invalid(x) => Validated.valid(x)
case Validated.Valid(x) => Validated.invalid(x)
}
def withSwapped[EE,TT](func:Validated[T,E]=>Validated[TT,EE]):Validated[EE,TT] =
func(swap).swap
def bimap[EE,TT](invalidFunc:E=>EE, validFunc:T=>TT):Validated[EE,TT] =
this match {
case Validated.Invalid(x) => Validated.invalid(invalidFunc(x))
case Validated.Valid(x) => Validated.valid(validFunc(x))
}
//------------------------------------------------------------------------------
def invalidMap[EE](func:E=>EE):Validated[EE,T] =
this match {
case Validated.Invalid(x) => Validated.invalid(func(x))
case Validated.Valid(x) => Validated.valid(x)
}
def invalidFlatMap[EE,TT>:T](func:E=>Validated[EE,TT]):Validated[EE,TT] =
this match {
case Validated.Invalid(x) => func(x)
case Validated.Valid(x) => Validated.valid(x)
}
def invalidFlatten[EE,TT>:T](implicit ev: E <:< Validated[EE,TT]):Validated[EE,TT] =
invalidFlatMap(ev)
def invalidToOption:Option[E] =
this match {
case Validated.Invalid(x) => Some(x)
case Validated.Valid(x) => None
}
//------------------------------------------------------------------------------
// NOTE cats' orElse drops errors, this is like cats' <+>
def or[EE>:E,TT>:T](that:Validated[EE,TT])(implicit cc:Semigroup[EE]):Validated[EE,TT] =
(this, that) match {
case (Validated.Invalid(a), Validated.Invalid(b)) => Validated.invalid(cc.combine(a, b))
case (Validated.Valid(a), _) => Validated.valid(a)
case (_, Validated.Valid(b)) => Validated.valid(b)
}
def getOrElse[TT>:T](that: =>TT):TT =
this match {
case Validated.Invalid(x) => that
case Validated.Valid(x) => x
}
def getOrRescue[TT>:T](func:E=>TT):TT =
this match {
case Validated.Invalid(x) => func(x)
case Validated.Valid(x) => x
}
def getOrError(s: =>String):T =
getOrElse(sys error s)
def getOrThrow(func:E=>Throwable):T =
this match {
case Validated.Invalid(x) => throw func(x)
case Validated.Valid(x) => x
}
//------------------------------------------------------------------------------
def rescue[TT>:T](func:E=>Option[TT]):Validated[E,TT] =
this match {
case Validated.Invalid(x) => func(x) map Validated.valid getOrElse Validated.invalid(x)
case Validated.Valid(x) => Validated.valid(x)
}
def reject[EE>:E](func:T=>Option[EE]):Validated[EE,T] =
this match {
case Validated.Invalid(x) => Validated.invalid(x)
case Validated.Valid(x) => func(x) map Validated.invalid getOrElse Validated.valid(x)
}
def validByOr[EE>:E](func:Predicate[T], invalid: =>EE):Validated[EE,T] =
this match {
case Validated.Invalid(x) => Validated.invalid(x)
case Validated.Valid(x) => if (func(x)) Validated.valid(x) else Validated.invalid(invalid)
}
def validNotByOr[EE>:E](func:Predicate[T], invalid: =>EE):Validated[EE,T] =
this match {
case Validated.Invalid(x) => Validated.invalid(x)
case Validated.Valid(x) => if (!func(x)) Validated.valid(x) else Validated.invalid(invalid)
}
def collapseOr[EE>:E,TT](func:T=>Option[TT], invalid: =>EE):Validated[EE,TT] =
this match {
case Validated.Invalid(x) => Validated.invalid(x)
case Validated.Valid(x) => func(x) map Validated.valid getOrElse Validated.invalid(invalid)
}
def collectOr[EE>:E,TT](func:PartialFunction[T,TT], invalid: =>EE):Validated[EE,TT] =
collapseOr(func.lift, invalid)
//------------------------------------------------------------------------------
def validEffect(effect:Effect[T]):this.type = {
this foreach effect
this
}
def invalidEffect(effect:Effect[E]):this.type = {
invalidToOption foreach effect
this
}
//------------------------------------------------------------------------------
def toEither:Either[E,T] =
this match {
case Validated.Invalid(x) => Left(x)
case Validated.Valid(x) => Right(x)
}
def toOption:Option[T] =
this match {
case Validated.Invalid(x) => None
case Validated.Valid(x) => Some(x)
}
def toSeq:Seq[T] =
toVector
def toList:List[T] =
this match {
case Validated.Invalid(x) => Nil
case Validated.Valid(x) => List(x)
}
def toVector:Vector[T] =
this match {
case Validated.Invalid(x) => Vector.empty
case Validated.Valid(x) => Vector(x)
}
//------------------------------------------------------------------------------
def toEitherT[F[_],EE>:E,TT>:T](implicit F:Applicative[F]):EitherT[F,EE,TT] =
EitherT fromEither toEither
}
| ritschwumm/scutil | modules/core/src/main/scala/scutil/lang/Validated.scala | Scala | bsd-2-clause | 8,580 |
package org.crashstars.spark
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.crashstars.common.Logging
/**
* Created by anavidad on 13/10/15.
*/
class SparkUtils(conf: SparkConf) extends Logging {
def withSparkContext(testCode: (SparkContext) => Any): Unit = {
val sc = new SparkContext(conf)
testCode(sc)
if (sc != null) sc.stop()
}
def withSparkSQLContext(testCode: (SparkContext, SQLContext) => Any): Unit = {
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
testCode(sc, sqlContext)
if (sc != null) sc.stop()
}
}
| anavidad3/PoC-spark-scala-maven | src/main/scala/org/crashstars/spark/SparkUtils.scala | Scala | apache-2.0 | 631 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.integration.torch
import com.intel.analytics.bigdl.dllib.nn.{ClassNLLCriterion, MSECriterion, ParallelCriterion}
import com.intel.analytics.bigdl.dllib.tensor.{Storage, Tensor}
import com.intel.analytics.bigdl.dllib.utils.{T, Table}
import com.intel.analytics.bigdl.dllib.utils.Engine
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class ParallelCriterionSpec extends TorchSpec {
"A ParallelCriterion " should "generate correct output and grad" in {
torchCheck()
val seed = 100
Random.setSeed(seed)
val pc = new ParallelCriterion[Double]()
val input1 = Tensor[Double](2, 10).apply1(_ => Random.nextDouble())
val input2 = Tensor[Double](2, 10).apply1(_ => Random.nextDouble())
val input = T()
input(1.0) = input1
input(2.0) = input2
val target1 = Tensor[Double](Storage(Array(2.0, 5.0)))
val target2 = Tensor[Double](2, 10).apply1(_ => Random.nextDouble())
val target = T()
target(1.0) = target1
target(2.0) = target2
val nll = new ClassNLLCriterion[Double]()
val mse = new MSECriterion[Double]()
pc.add(nll, 0.3).add(mse, 0.2)
val start = System.nanoTime()
val loss = pc.forward(input, target)
val gradOutput = pc.backward(input, target)
val scalaTime = System.nanoTime() - start
val code = """
nll = nn.ClassNLLCriterion()
mse = nn.MSECriterion()
pc = nn.ParallelCriterion():add(nll, 0.3):add(mse, 0.2)
loss = pc:forward(input, target)
gradOutput = pc:backward(input, target)
gradOutput1 = gradOutput[1]
gradOutput2 = gradOutput[2]
""".stripMargin
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target),
Array("loss", "gradOutput1", "gradOutput2"))
val luaLoss = torchResult("loss").asInstanceOf[Double]
val luaGradOutput1 = torchResult("gradOutput1").asInstanceOf[Tensor[Double]]
val luaGradOutput2 = torchResult("gradOutput2").asInstanceOf[Tensor[Double]]
val luaGradOutput = T(luaGradOutput1, luaGradOutput2)
luaLoss should be (loss)
luaGradOutput should be (gradOutput)
println("Test case : ParallelCriterion, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/integration/torch/ParallelCriterionSpec.scala | Scala | apache-2.0 | 2,867 |
package net.tomasherman.specus.server.api.net
import net.tomasherman.specus.common.api.net.Packet
/**
* This file is part of Specus.
*
* Specus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Specus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with Specus. If not, see <http://www.gnu.org/licenses/>.
*
*/
/** Thrown when packet encoder is not found */
class PacketEncoderNotFoundException(val packet: Packet) extends Exception
/** Thrown when packet decoder is not found */
class BufferDecoderNotFoundException(val packetId: Byte) extends Exception
/** Thrown when something goes wrong with packet decoding */
class DecodingErrorException(val expected: String, val value: Any) extends Exception
class MagicCodecConstructorParameterNotSupported(val clazz:Class[_]) extends Exception | tomasherman/specus | server_api/src/main/scala/net/exceptions.scala | Scala | gpl-3.0 | 1,258 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.orca.net
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.T
import com.intel.analytics.bigdl.dllib.common.zooUtils
import com.intel.analytics.bigdl.dllib.keras.Predictable
import com.intel.analytics.bigdl.dllib.net.NetUtils
import com.intel.analytics.bigdl.orca.tfpark.{GraphRunner, TFUtils}
import org.slf4j.LoggerFactory
import org.tensorflow.framework.{GraphDef, MetaGraphDef}
import org.tensorflow.op.Ops
import org.tensorflow.op.core.Placeholder
import org.tensorflow.{DataType, Graph, SavedModelBundle}
import scala.reflect.ClassTag
private[bigdl] class TFNetForInference(graphRunner: GraphRunner,
inputs: Array[String],
inputTypes: Array[Int],
outputs: Array[String],
outputTypes: Array[Int],
variables: Array[String],
variableTypes: Array[Int],
variableAssignPlaceholders: Array[String],
assignVariableOps: Array[String],
initWeights: Array[Tensor[Float]],
initOp: Option[String])
extends AbstractModule[Activity, Activity, Float] with Predictable[Float] {
protected val module: Module[Float] = this
implicit val ev = TensorNumeric.NumericFloat
implicit val tag: ClassTag[Float] = ClassTag.Float
System.setProperty("bigdl.ModelBroadcastFactory",
"com.intel.analytics.bigdl.orca.tfpark.TFModelBroadcastFactory")
override def parameters(): (Array[Tensor[Float]], Array[Tensor[Float]]) = {
(weights, gradWeights)
}
private val weights = initWeights
private val gradWeights = variables.map(_ => Tensor[Float]())
private val graphOutputs = {
val graphOuts = Vector.newBuilder[Tensor[Float]]
var i = 0
while (i < outputs.length) {
graphOuts += Tensor[Float]()
i += 1
}
graphOuts.result()
}
output = {
if (outputs.length == 1) {
graphOutputs(0)
} else {
val out = T()
var i = 0
while (i < outputs.length) {
out.insert(graphOutputs(i))
i += 1
}
out
}
}
gradInput = {
if (inputs.length == 1) {
Tensor[Float]()
} else {
val t = T()
var i = 0
while (i < inputs.length) {
t.insert(Tensor[Float]())
i = i + 1
}
t
}
}
private def runInitOp(): Unit = {
graphRunner.run(
input = Vector.empty,
inputNames = Vector.empty,
inputTypes = Vector.empty,
output = Vector.empty,
outputNames = Vector.empty,
outputTypes = Vector.empty,
targets = Vector(initOp.get))
}
private def setVariableIntoTF(weights: Array[Tensor[Float]],
inputNames: Array[String],
variableTypes: Array[DataType],
assignOps: Array[String]) = {
graphRunner.run(
input = weights.toVector,
inputNames = inputNames.toVector,
inputTypes = variableTypes.toVector,
output = Vector.empty,
outputNames = Vector.empty,
outputTypes = Vector.empty,
targets = assignOps.toVector)
}
@transient
private lazy val variableInited = {
if (weights.length > 0) {
setVariableIntoTF(weights, variableAssignPlaceholders,
variableTypes.map(TFUtils.tfenum2datatype), assignVariableOps)
}
true
}
@transient
private lazy val tableInited = {
if (initOp.isDefined) {
runInitOp()
}
true
}
override def updateOutput(input: Activity): Activity = {
zooUtils.timeIt("updateOutput") {
assert(variableInited)
assert(tableInited)
val feeds = zooUtils.activity2VectorBuilder(input)
val types = inputTypes.toVector.map(TFUtils.tfenum2datatype)
val outputTypes = Vector.fill(outputs.length)(DataType.FLOAT)
graphRunner.run(input = feeds.result(), inputNames = inputs.toVector, inputTypes = types,
output = graphOutputs, outputNames = outputs.toVector, outputTypes = outputTypes,
targets = Vector.empty)
}
output
}
override def updateGradInput(
input: Activity,
gradOutput: Activity): Activity = {
NetUtils.generateZeroGrad(input, gradInput)
gradInput
}
}
object TFNetForInference {
TFNet
private val INIT_OP_SIGNATURE_KEY = "__saved_model_init_op"
private val MAIN_OP_KEY = "saved_model_main_op"
private val LEGACY_INIT_OP_KEY = "legacy_init_op"
private val DEFAULT_TAG = "serve"
private val DEFAULT_SIGNATURE = "serving_default"
val logger = LoggerFactory.getLogger(getClass)
import scala.collection.JavaConverters._
val frameworkDataType2Class = Map(
org.tensorflow.framework.DataType.DT_FLOAT -> classOf[java.lang.Float],
org.tensorflow.framework.DataType.DT_INT32 -> classOf[java.lang.Integer],
org.tensorflow.framework.DataType.DT_INT64 -> classOf[java.lang.Long],
org.tensorflow.framework.DataType.DT_BOOL -> classOf[java.lang.Boolean],
org.tensorflow.framework.DataType.DT_STRING -> classOf[java.lang.String]
)
val frameworkDataType2DataType = Map(
org.tensorflow.framework.DataType.DT_FLOAT -> org.tensorflow.DataType.FLOAT,
org.tensorflow.framework.DataType.DT_INT32 -> org.tensorflow.DataType.INT32,
org.tensorflow.framework.DataType.DT_INT64 -> org.tensorflow.DataType.INT64,
org.tensorflow.framework.DataType.DT_BOOL -> org.tensorflow.DataType.BOOL,
org.tensorflow.framework.DataType.DT_STRING -> org.tensorflow.DataType.STRING
)
/*
load TensorFlow's saved_model
TensorFlow's Java API provides a SavedModelBundle function that will
return a graph and a session. However, we cannot use the graph and
session in TFNet as both of them are not serializable, thus cannot
be broadcast to executors.
To solve this problem, the follow approach is implemented:
Step 1. Find all the variables in the graph and get those variable
values out using the returned session
Step 2. Pass those variables to TFNet as a model's weights
Step 3. Export the returned graph as byte array and pass to TFNet
Step 4. Create a new graph and session on executor
Step 5. Initialize the variables on executor using model's weights
For enable reading and re-assigning variables values, additional
operations need to be added to the graph. For each variable, a read
operation (for resource variable), a assign operation and a placeholder
along with the assign operation are added to the graph.
*/
def fromSavedModel(modelPath: String, tag: Option[String],
signature: Option[String],
inputs: Option[Array[String]],
outputs: Option[Array[String]],
sessionConfig: Array[Byte],
tableInitOp: Option[String] = None): TFNetForInference = {
if (tag.isEmpty) {
logger.warn(s"Loading TensorFlow SavedModel: " +
s"SavedModel tag is not defined, using <$DEFAULT_TAG>")
}
val savedModelBundle = SavedModelBundle.load(modelPath, tag.getOrElse(DEFAULT_TAG))
val metaGraphDef = MetaGraphDef.parseFrom(savedModelBundle.metaGraphDef())
val initOp = if (tableInitOp.isDefined) tableInitOp else getInitOp(metaGraphDef)
val (inputNames, outputNames) = if (inputs.isEmpty || outputs.isEmpty) {
if (inputs.isEmpty) {
logger.warn("Loading TensorFlow SavedModel: inputs is not defined, finding inputs " +
"in signature")
}
if (outputs.isEmpty) {
logger.warn("Loading TensorFlow SavedModel: outputs is not defined, finding outputs " +
"in signature")
}
if (signature.isEmpty) {
logger.warn("Loading TensorFlow SavedModel: SavedModel signature is not defined," +
s"using default signature <$DEFAULT_SIGNATURE>")
}
val (inputNameCandidate, outputNameCandidate) = getInputOutputNames(metaGraphDef,
signature.getOrElse(DEFAULT_SIGNATURE))
(inputs.getOrElse(inputNameCandidate), outputs.getOrElse(outputNameCandidate))
} else {
(inputs.get, outputs.get)
}
val graph = savedModelBundle.graph()
val ops = Ops.create(graph).withSubScope("analytics-zoo")
val variableTypes = Set("Variable", "VariableV2", "VarHandleOp")
val graphBytes = graph.toGraphDef
val graphDef = GraphDef.parseFrom(graphBytes)
// the following map function add a read operation, an assign operation
// and a placeholder for each variable in the graph
val newOps = graphDef.getNodeList.asScala.filter { node =>
variableTypes(node.getOp)
}.map { x =>
val name = x.getName
val dataType = x.getAttrMap.get("dtype").getType
val opType = x.getOp
val operation = graph.operation(name)
val dataTypeClass = frameworkDataType2Class(dataType)
val operationOutput = operation.output(0)
if (opType == "VarHandleOp") {
val readVariable = ops.readVariableOp(operationOutput, dataTypeClass)
val floatVariable = ops.dtypes.cast(readVariable, classOf[java.lang.Float])
val placeholder = ops.placeholder(dataTypeClass,
Placeholder.shape(readVariable.asOutput().shape()))
// do it manually to get a reference of the op and get the op name
val builder = graph.opBuilder("AssignVariableOp",
ops.scope().makeOpName("AssignVariableOp"))
builder.addInput(operationOutput)
builder.addInput(placeholder.asOutput())
val assignOp = builder.build()
(floatVariable.asOutput().op().name(),
placeholder.asOutput().op().name(), assignOp.name(),
dataType, operationOutput.shape(), operation.name())
} else {
val readVariable = operationOutput
val floatVariable = ops.dtypes.cast(readVariable, classOf[java.lang.Float])
val placeholder = ops.placeholder(dataTypeClass,
Placeholder.shape(operationOutput.shape()))
// do it manually to get a reference of the op and get the op name
val builder = graph.opBuilder("Assign",
ops.scope().makeOpName("Assign"))
builder.addInput(operationOutput)
builder.addInput(placeholder.asOutput())
val assignOp = builder.build()
(floatVariable.asOutput().op().name(),
placeholder.asOutput().op().name(), assignOp.name(),
dataType, operationOutput.shape(), operation.name())
}
}
val readVariableNames = newOps.map(_._1)
val placeholderNames = newOps.map(_._2)
val assign = newOps.map(_._3)
val dataTypes = newOps.map(_._4)
val dataShapes = newOps.map(x => (x._5, x._6))
val graphdef = GraphDef.parseFrom(graph.toGraphDef)
val graphRunner = new GraphRunner(
graph.toGraphDef,
null, null, null, null,
sessionConfig)
val session = savedModelBundle.session()
// The ideal approach would be fetching all the variables all at once, but
// some of the variable might be not be in the saved_model, thus the variable
// is not initialized and cannot be fetched. Currently, there is no way to know
// which one is not initialized until we fetch it and get an exception.
val weights = readVariableNames.zip(dataShapes).map { case (name, (shape, originalName)) =>
val runner = session.runner()
runner.fetch(name)
try {
val value = runner.run()
val bigdlTensor = Tensor[Float]()
TFUtils.tf2bigdl(value.get(0), bigdlTensor)
value.get(0).close()
bigdlTensor
} catch {
case _: Exception =>
TFNetForInference.logger.warn(s"Cannot find variable value for <$originalName>, " +
s"using default value zero")
val shapeArr = new Array[Int](shape.numDimensions())
var i = 0
while (i < shape.numDimensions()) {
shapeArr(i) = shape.size(i).toInt
i += 1
}
Tensor[Float](sizes = shapeArr)
}
}.toArray
val inputTypes = inputNames.map(name2type(graph))
val outputTypes = outputNames.map(name2type(graph))
// clean up native resources
savedModelBundle.close()
new TFNetForInference(graphRunner = graphRunner,
inputs = inputNames,
inputTypes = inputTypes,
outputs = outputNames,
outputTypes = outputTypes,
variables = readVariableNames.toArray,
variableTypes = dataTypes.map(_.getNumber).toArray,
variableAssignPlaceholders = placeholderNames.toArray,
assignVariableOps = assign.toArray,
initWeights = weights, initOp)
}
private def name2type(graph: Graph)(name: String) = {
val opAndPort = name.split(":")
val op = opAndPort.head
val port = opAndPort(1)
val opRef = graph.operation(op)
if (opRef == null) {
throw new IllegalArgumentException(s"Cannot find input op <$name>")
}
TFUtils.tfdatatype2enum(opRef.output(port.toInt).dataType())
}
private def getInputOutputNames(metaGraphDef: MetaGraphDef,
signature: String): (Array[String], Array[String]) = {
val signatureDef = metaGraphDef.getSignatureDefOrThrow(signature)
val inputMap = signatureDef.getInputsMap
val sortedInputKeys = inputMap.keySet().asScala.toArray.sorted
val inputNames = sortedInputKeys.map(inputMap.get(_).getName)
val outputMap = signatureDef.getOutputsMap
val sortedOutputKeys = outputMap.keySet().asScala.toArray.sorted
val outputNames = sortedOutputKeys.map(outputMap.get(_).getName)
(inputNames, outputNames)
}
def getOpFromSignatureDef(metaGraphDef: MetaGraphDef, signatureKey: String): Option[String] = {
if (metaGraphDef.containsSignatureDef(signatureKey)) {
val signatureDef = metaGraphDef.getSignatureDefOrThrow(signatureKey)
val tensorInfo = signatureDef.getOutputsOrThrow(signatureKey)
Some(tensorInfo.getName)
} else {
None
}
}
def getOpFromCollection(metaGraphDef: MetaGraphDef, opKey: String): Option[String] = {
if (metaGraphDef.containsCollectionDef(opKey)) {
val collectionDef = metaGraphDef.getCollectionDefOrThrow(opKey)
val name = collectionDef.getNodeList.getValue(0)
Some(name)
} else {
None
}
}
def getInitOp(metaGraphDef: MetaGraphDef): Option[String] = {
val signatureOp = getOpFromSignatureDef(metaGraphDef, INIT_OP_SIGNATURE_KEY)
val mainOp = getOpFromCollection(metaGraphDef, MAIN_OP_KEY)
val legacyIntOP = getOpFromCollection(metaGraphDef, LEGACY_INIT_OP_KEY)
val result = List(signatureOp, mainOp, legacyIntOP).flatten
result.headOption
}
}
| intel-analytics/BigDL | scala/orca/src/main/scala/com/intel/analytics/bigdl/orca/net/TFNetForInference.scala | Scala | apache-2.0 | 15,654 |
package org.rovak.steamclient.steam3
import steam.SteamId
import rovak.steamkit.steam.language.EPersonaState
object Commands {
/**
* Sends a chat message
*
* @param steamId SteamId to send to
* @param message Message to send
*/
case class SendChatMessage(steamId: SteamId, message: String)
/**
* Changes the status of the bot
*
* @param name Name
* @param status Status
*/
case class Status(name: String, status: EPersonaState)
}
| Rovak/scala-steamkit | steamkit/src/main/scala/org/rovak/steamclient/steam3/Commands.scala | Scala | mit | 472 |
/**
* User: Alexander Slesarenko
* Date: 11/17/13
*/
package scalan.meta
import scala.language.implicitConversions
import scala.tools.nsc.Global
import scala.reflect.internal.util.RangePosition
import scala.reflect.internal.util.OffsetPosition
import scalan.meta.ScalanAst._
import java.util.regex.Pattern
trait ScalanParsers {
val global: Global
type Compiler = global.type
lazy val compiler: Compiler = global
import compiler._
implicit def nameToString(name: Name): String = name.toString
implicit class OptionListOps[A](opt: Option[List[A]]) {
def flatList: List[A] = opt.toList.flatten
}
private def positionString(tree: Tree) = {
tree.pos match {
case pos: RangePosition =>
val path = pos.source.file.canonicalPath
s"file $path at ${pos.line}:${pos.column} (start ${pos.point - pos.start} before, end ${pos.end - pos.point} after)"
case pos: OffsetPosition =>
val path = pos.source.file.canonicalPath
s"file $path at ${pos.line}:${pos.column}"
case pos => pos.toString
}
}
def !!!(msg: String, tree: Tree) = {
val fullMsg = s"$msg at ${positionString(tree)}"
throw new IllegalStateException(fullMsg)
}
def !!!(msg: String) = Base.!!!(msg)
def ???(tree: Tree) = {
val pos = tree.pos
val msg = s"Unhandled case in ${positionString(tree)}:\\nAST: ${showRaw(tree)}\\n\\nCode for AST: $tree"
throw new IllegalStateException(msg)
}
def parse(name: String, tree: Tree) = tree match {
case pd: PackageDef =>
entityModule(pd)
case tree =>
throw new Exception(s"Unexpected tree in $name:\\n\\n$tree")
}
def config: CodegenConfig
def parseDeclaredImplementations(entities: List[STraitOrClassDef], moduleDefOpt: Option[ClassDef]) = {
val decls = for {
dslModule <- moduleDefOpt.toList
t <- entities
stdOpsTrait <- findClassDefByName(dslModule.impl.body, t.name + "Decls")
} yield {
(t.name, stdOpsTrait)
}
val m = decls.map { case (name, decl) =>
val methods = decl.impl.body.collect { case item: DefDef => item }
(name, SDeclaredImplementation(methods.map(methodDef(_))))
}.toMap
SDeclaredImplementations(m)
}
def findClassDefByName(trees: List[Tree], name: String) =
trees.collectFirst {
case cd: ClassDef if cd.name.toString == name => cd
}
def tpeUseExpr(arg: STpeArg): STpeExpr = STraitCall(arg.name, arg.tparams.map(tpeUseExpr))
def wrapperImpl(entity: STraitDef, bt: STpeExpr, doRep: Boolean): SClassDef = {
val entityName = entity.name
val entityImplName = entityName + "Impl"
val typeUseExprs = entity.tpeArgs.map(tpeUseExpr)
val valueType = if (doRep) STraitCall("Rep", List(bt)) else bt
SClassDef(
name = entityImplName,
tpeArgs = entity.tpeArgs,
args = SClassArgs(List(SClassArg(false, false, true, "wrappedValue", valueType, None))),
implicitArgs = entity.implicitArgs,
ancestors = List(STraitCall(entity.name, typeUseExprs)),
body = List(),
selfType = None,
companion = None,
// companion = defs.collectFirst {
// case c: STraitOrClassDef if c.name.toString == entityImplName + "Companion" => c
// },
true, Nil
)
}
def isInternalMethodOfCompanion(md: SMethodDef, declaringDef: STraitOrClassDef): Boolean = {
val moduleVarName = md.name + global.nme.MODULE_VAR_SUFFIX.toString
val hasClass = declaringDef.body.collectFirst({ case d: SClassDef if d.name == md.name => ()}).isDefined
val hasModule = declaringDef.body.collectFirst({ case d: SValDef if d.name == moduleVarName => ()}).isDefined
val hasMethod = declaringDef.body.collectFirst({ case d: SMethodDef if d.name == md.name => ()}).isDefined
hasClass && hasModule && hasMethod
}
def isInternalClassOfCompanion(cd: STraitOrClassDef, outer: STraitOrClassDef): Boolean = {
val moduleVarName = cd.name + global.nme.MODULE_VAR_SUFFIX.toString
if (cd.ancestors.nonEmpty) return false
val hasClass = outer.body.collectFirst({ case d: SClassDef if d.name == cd.name => ()}).isDefined
val hasModule = outer.body.collectFirst({ case d: SValDef if d.name == moduleVarName => ()}).isDefined
val hasMethod = outer.body.collectFirst({ case d: SMethodDef if d.name == cd.name => ()}).isDefined
hasClass && hasModule && hasMethod
}
def entityModule(fileTree: PackageDef) = {
val packageName = fileTree.pid.toString
val statements = fileTree.stats
val imports = statements.collect {
case i: Import => importStat(i)
}
val moduleTraitTree = statements.collect {
case cd: ClassDef if cd.mods.isTrait && !cd.name.contains("Dsl") => cd
} match {
case Seq(only) => only
case seq => !!!(s"There must be exactly one module trait in file, found ${seq.length}")
}
val moduleTrait = traitDef(moduleTraitTree, Some(moduleTraitTree))
val moduleName = moduleTrait.name
val hasDsl =
findClassDefByName(fileTree.stats, moduleName + "Dsl").isDefined
val dslStdModuleOpt = findClassDefByName(fileTree.stats, moduleName + "DslStd")
val dslExpModuleOpt = findClassDefByName(fileTree.stats, moduleName + "DslExp")
val hasDslStdModule = dslStdModuleOpt.isDefined
val hasDslExpModule = dslExpModuleOpt.isDefined
val defs = moduleTrait.body
val entityRepSynonym = defs.collectFirst { case t: STpeDef => t }
val traits = defs.collect {
case t: STraitDef if !(t.name.endsWith("Companion") || t.hasAnnotation("InternalType")) => t
}
val entity = traits.headOption.getOrElse {
throw new IllegalStateException(s"Invalid syntax of entity module trait $moduleName. First member trait must define the entity, but no member traits found.")
}
val concreteClasses = moduleTrait.getConcreteClasses.filterNot(isInternalClassOfCompanion(_, moduleTrait))
val classes = entity.optBaseType match {
case Some(bt) =>
wrapperImpl(entity, bt, true) :: concreteClasses
case None =>
concreteClasses
}
val methods = defs.collect {
case md: SMethodDef if !isInternalMethodOfCompanion(md, moduleTrait) => md
}
val declaredStdImplementations = parseDeclaredImplementations(traits ++ classes, dslStdModuleOpt)
val declaredExpImplementations = parseDeclaredImplementations(traits ++ classes, dslExpModuleOpt)
SEntityModuleDef(packageName, imports, moduleName,
entityRepSynonym, entity, traits, classes, methods,
moduleTrait.selfType, Nil,
Some(declaredStdImplementations),
Some(declaredExpImplementations),
hasDsl, hasDslStdModule, hasDslExpModule, moduleTrait.ancestors)
}
def importStat(i: Import): SImportStat = {
SImportStat(i.toString.stripPrefix("import "))
}
def isEvidenceParam(vd: ValDef) = vd.name.toString.startsWith("evidence$")
def tpeArgs(typeParams: List[TypeDef], possibleImplicits: List[ValDef]): List[STpeArg] = {
val evidenceTypes = possibleImplicits.filter(isEvidenceParam(_)).map(_.tpt)
def tpeArg(tdTree: TypeDef): STpeArg = {
val bound = tdTree.rhs match {
case TypeBoundsTree(low, high) =>
if (high.toString == "_root_.scala.Any")
None
else
optTpeExpr(high)
case tt: TypeTree => parseType(tt.tpe) match {
case STpeTypeBounds(_, STpePrimitive("Any", _)) => None
case STpeTypeBounds(_, hi) => Some(hi)
case tpe => ???(tdTree)
}
case _ => ???(tdTree)
}
val contextBounds = evidenceTypes.collect {
case AppliedTypeTree(tpt, List(arg)) if arg.toString == tdTree.name.toString =>
Some(tpt.toString)
case _ => None
}.flatten
val tparams = tdTree.tparams.map(tpeArg)
STpeArg(tdTree.name, bound, contextBounds, tparams, tdTree.mods.flags)
}
typeParams.map(tpeArg)
}
// exclude default parent
def ancestors(trees: List[Tree]) = trees.map(traitCall).filter(tr => !Set("AnyRef", "scala.AnyRef").contains(tr.name))
def findCompanion(name: String, parentScope: Option[ImplDef]) = parentScope match {
case Some(scope) => scope.impl.body.collect {
case c: ClassDef if config.isAlreadyRep && c.name.toString == name + "Companion" =>
if (c.mods.isTrait) traitDef(c, parentScope) else classDef(c, parentScope)
case m: ModuleDef if !config.isAlreadyRep && !m.mods.isSynthetic && m.name.toString == name => objectDef(m)
}.headOption
case None => None
}
def traitDef(td: ClassDef, parentScope: Option[ImplDef]): STraitDef = {
val tpeArgs = this.tpeArgs(td.tparams, Nil)
val ancestors = this.ancestors(td.impl.parents)
val body = td.impl.body.flatMap(optBodyItem(_, Some(td)))
val selfType = this.selfType(td.impl.self)
val name = td.name.toString
val companion = findCompanion(name, parentScope)
val annotations = parseAnnotations(td)((n,as) => STraitOrClassAnnotation(n,as.map(parseExpr)))
STraitDef(name, tpeArgs, ancestors, body, selfType, companion, annotations)
}
def classDef(cd: ClassDef, parentScope: Option[ImplDef]): SClassDef = {
val ancestors = this.ancestors(cd.impl.parents)
val constructor = (cd.impl.body.collect {
case dd: DefDef if dd.name == nme.CONSTRUCTOR => dd
}) match {
case Seq(only) => only
case seq => !!!(s"Class ${cd.name} should have 1 constructor but has ${seq.length} constructors", cd)
}
// TODO simplify
val (args, implicitArgs) = constructor.vparamss match {
case Seq() =>
(classArgs(List.empty), classArgs(List.empty))
case Seq(nonImplConArgs) =>
(classArgs(nonImplConArgs), classArgs(List.empty))
case Seq(nonImplConArgs, implConArgs) =>
(classArgs(nonImplConArgs), classArgs(implConArgs))
case seq => !!!(s"Constructor of class ${cd.name} has more than 2 parameter lists, not supported")
}
val tpeArgs = this.tpeArgs(cd.tparams, constructor.vparamss.lastOption.getOrElse(Nil))
val body = cd.impl.body.flatMap(optBodyItem(_, Some(cd)))
val selfType = this.selfType(cd.impl.self)
val isAbstract = cd.mods.hasAbstractFlag
val name = cd.name.toString
val companion = findCompanion(name, parentScope)
val annotations = parseAnnotations(cd)((n,as) => STraitOrClassAnnotation(n,as.map(parseExpr)))
SClassDef(cd.name, tpeArgs, args, implicitArgs, ancestors, body, selfType, companion, isAbstract, annotations)
}
def objectDef(od: ModuleDef): SObjectDef = {
val ancestors = this.ancestors(od.impl.parents)
val body = od.impl.body.flatMap(optBodyItem(_, Some(od)))
SObjectDef(od.name, ancestors, body)
}
def classArgs(vds: List[ValDef]): SClassArgs = SClassArgs(vds.filter(!isEvidenceParam(_)).map(classArg))
def classArg(vd: ValDef): SClassArg = {
val tpe = tpeExpr(vd.tpt)
val default = optExpr(vd.rhs)
val isOverride = vd.mods.isAnyOverride
val isVal = vd.mods.isParamAccessor
val annotations = parseAnnotations(vd)((n,as) => new SArgAnnotation(n, as.map(parseExpr)))
SClassArg(vd.mods.isImplicit, isOverride, isVal, vd.name, tpe, default, annotations)
}
def traitCall(tree: Tree): STraitCall = tree match {
case ident: Ident =>
STraitCall(ident.name, List())
case select: Select =>
STraitCall(select.name, List())
case AppliedTypeTree(tpt, args) =>
STraitCall(tpt.toString, args.map(tpeExpr))
case tt: TypeTree =>
val parsedType = parseType(tt.tpe)
parsedType match {
case call: STraitCall => call
case STpePrimitive(name, _) => STraitCall(name, List())
case _ =>
throw new IllegalArgumentException(parsedType.toString)
}
case tree => ???(tree)
}
def isExplicitMethod(md: DefDef): Boolean = {
if (nme.isConstructorName(md.name)) false
else if (md.mods.isSynthetic) false
else if (md.mods.isCaseAccessor) false
else if (md.mods.isParamAccessor) false
else true
}
def optBodyItem(tree: Tree, parentScope: Option[ImplDef]): Option[SBodyItem] = tree match {
case i: Import =>
Some(importStat(i))
case md: DefDef =>
if (isExplicitMethod(md))
md.tpt match {
case AppliedTypeTree(tpt, _) if tpt.toString == "Elem" =>
Some(methodDef(md, true))
case _ =>
Some(methodDef(md))
}
else
None
case td: TypeDef =>
val tpeArgs = this.tpeArgs(td.tparams, Nil)
val rhs = tpeExpr(td.rhs)
Some(STpeDef(td.name, tpeArgs, rhs))
case td: ClassDef if td.mods.isTrait =>
Some(traitDef(td, parentScope))
case cd: ClassDef if !cd.mods.isTrait =>
// don't include implicit conversion classes
if (!cd.mods.isImplicit)
Some(classDef(cd, parentScope))
else
None
case od: ModuleDef =>
Some(objectDef(od))
case vd: ValDef =>
if (!vd.mods.isParamAccessor) {
val tpeRes = optTpeExpr(vd.tpt)
val isImplicit = vd.mods.isImplicit
val isLazy = vd.mods.isLazy
Some(SValDef(vd.name, tpeRes, isLazy, isImplicit, parseExpr(vd.rhs)))
} else
None
case EmptyTree =>
None
// calls in constructor
case Select(_, _) =>
None
case Apply(_, _) =>
None
case tree => ???(tree)
}
object ExtractAnnotation {
def unapply(a: Tree): Option[(String, List[Tree])] = a match {
case Apply(Select(New(Ident(ident)), nme.CONSTRUCTOR), args) => Some((ident, args))
case _ => None
}
}
def parseAnnotations[A <: SAnnotation](md: MemberDef)(p: (String, List[Tree]) => A): List[A] = {
val annotations = md.mods.annotations.map {
case ExtractAnnotation (name, args) => p(name, args)
case a => !!! (s"Cannot parse annotation $a of MemberDef $md")
}
annotations
}
class HasAnnotation(annClass: String) {
def unapply(md: MemberDef): Option[List[Tree]] =
md.mods.annotations.collectFirst {
case ExtractAnnotation(name, args) if name == annClass => args
}
}
// val HasExternalAnnotation = new ExtractAnnotation("External")
// val HasConstructorAnnotation = new ExtractAnnotation("Constructor")
val HasArgListAnnotation = new HasAnnotation("ArgList")
val OverloadIdAnnotation = new HasAnnotation("OverloadId")
def methodDef(md: DefDef, isElem: Boolean = false) = {
val tpeArgs = this.tpeArgs(md.tparams, md.vparamss.lastOption.getOrElse(Nil))
val args0 = md.vparamss.map(methodArgs)
val args = if (!args0.isEmpty && args0.last.args.isEmpty) args0.init else args0
val tpeRes = optTpeExpr(md.tpt)
val isImplicit = md.mods.isImplicit
val isOverride = md.mods.isOverride
val optOverloadId = md match {
case OverloadIdAnnotation(List(Literal(Constant(overloadId)))) =>
Some(overloadId.toString)
case _ => None
}
val annotations = md.mods.annotations.map {
case ExtractAnnotation(name, args) => SMethodAnnotation(name, args.map(parseExpr))
case a => !!!(s"Cannot parse annotation $a of the method $md")
}
// val optExternal = md match {
// case HasExternalAnnotation(_) => Some(ExternalMethod)
// case HasConstructorAnnotation(_) => Some(ExternalConstructor)
// case _ => None
// }
val optBody: Option[SExpr] = optExpr(md.rhs)
val isTypeDesc = md.tpt match {
case AppliedTypeTree(tpt, _) if Set("Elem", "Cont").contains(tpt.toString) =>
true
case tpt =>
tpt.toString == "TypeDesc"
}
SMethodDef(md.name, tpeArgs, args, tpeRes, isImplicit, isOverride,
optOverloadId, annotations, optBody, isTypeDesc)
}
def methodArgs(vds: List[ValDef]): SMethodArgs = vds match {
case Nil => SMethodArgs(List.empty)
case vd :: _ =>
SMethodArgs(vds.filter(!isEvidenceParam(_)).map(methodArg))
}
def optTpeExpr(tree: Tree): Option[STpeExpr] = {
tree match {
case _ if tree.isEmpty => None
case _: ExistentialTypeTree => None
case tree => Some(tpeExpr(tree))
}
}
def formAppliedTypeTree(fullName: String, shortName: String, argTpeExprs: List[STpeExpr]) = {
val tuplePattern = """^(_root_.)?scala.Tuple(\\d+)$"""
val funcPattern = """^(_root_.)?scala.Function(\\d+)$"""
if (Pattern.matches(tuplePattern, fullName))
STpeTuple(argTpeExprs)
else if (Pattern.matches(funcPattern, fullName)) {
val domainTpeExpr = argTpeExprs.length match {
case 2 => argTpeExprs(0)
case n if n > 2 => STpeTuple(argTpeExprs.init)
case _ => !!!(s"fullName=$fullName shortName=$shortName argTpeExprs=$argTpeExprs")
}
STpeFunc(domainTpeExpr, argTpeExprs.last)
} else
STraitCall(shortName, argTpeExprs)
}
def tpeExpr(tree: Tree): STpeExpr = tree match {
case EmptyTree => STpeEmpty()
case ident: Ident =>
val name = ident.name.toString
STpePrimitives.getOrElse(name, STraitCall(name, List()))
case select: Select =>
STraitCall(select.name, List())
case AppliedTypeTree(tpt, args) =>
val argTpeExprs = args.map(tpeExpr)
val genericTypeString = tpt.toString
formAppliedTypeTree(genericTypeString, genericTypeString, argTpeExprs)
case tq"$tpt @$annot" => STpeAnnotated(tpeExpr(tpt), annot.toString)
case TypeBoundsTree(lo, hi) => STpeTypeBounds(tpeExpr(lo), tpeExpr(hi))
case SingletonTypeTree(ref) => STpeSingleton(parseExpr(ref))
case SelectFromTypeTree(qualifier, TypeName(name)) => STpeSelectFromTT(tpeExpr(qualifier), name)
case tq"..$parents { ..$defns }" => STpeCompound(parents.map(tpeExpr), defns.flatMap(defn => optBodyItem(defn, None)))
case tq"$tpt forSome { ..$defns }" => STpeExistential(tpeExpr(tpt), defns.flatMap(defn => optBodyItem(defn, None)))
case Bind(TypeName(name), body) => STpeBind(name, tpeExpr(body))
case tt: TypeTree => parseType(tt.tpe)
case tree => ???(tree)
}
def methodArg(vd: ValDef): SMethodArg = {
val tpe = tpeExpr(vd.tpt)
val default = optExpr(vd.rhs)
val annotations = parseAnnotations(vd)((n,as) => new SArgAnnotation(n, as.map(parseExpr)))
val isOverride = vd.mods.isAnyOverride
val isTypeDesc = tpe match {
case STraitCall(tname, _) if tname == "Elem" || tname == "Cont" => true
case _ => false
}
SMethodArg(vd.mods.isImplicit, isOverride, vd.name, tpe, default, annotations, isTypeDesc)
}
def selfType(vd: ValDef): Option[SSelfTypeDef] = {
val components = vd.tpt match {
case t if t.isEmpty =>
Nil
case CompoundTypeTree(Template(ancestors, _, _)) =>
ancestors.map(tpeExpr)
case t =>
List(tpeExpr(t))
}
if (components.isEmpty)
None
else
Some(SSelfTypeDef(vd.name.toString, components))
}
def optExpr(tree: Tree): Option[SExpr] = {
if (tree.isEmpty)
None
else
Some(parseExpr(tree))
}
def tree2Type(tree: Tree): Option[STpeExpr] = tree.tpe match {
case null => None
case tpe => Some(parseType(tpe))
}
def parseExpr(tree: Tree): SExpr = tree match {
case EmptyTree => SEmpty(tree2Type(tree))
case Literal(Constant(c)) => SConst(c, tree2Type(tree))
case Ident(TermName(name)) => SIdent(name, tree2Type(tree))
case q"$left = $right" => SAssign(parseExpr(left), parseExpr(right), tree2Type(tree))
case q"$name.super[$qual].$field" => SSuper(name, qual, field, tree2Type(tree))
case q"$expr.$tname" => SSelect(parseExpr(expr), tname, tree2Type(tree))
case Apply(Select(New(name), termNames.CONSTRUCTOR), args) =>
SContr(name.toString(), args.map(parseExpr), tree2Type(tree))
case Apply(Select(Ident(TermName("scala")), TermName(tuple)), args) if tuple.startsWith("Tuple") =>
STuple(args.map(parseExpr), tree2Type(tree))
case Block(init, last) => SBlock(init.map(parseExpr), parseExpr(last), tree2Type(tree))
case q"$mods val $tname: $tpt = $expr" =>
SValDef(tname, optTpeExpr(tpt), mods.isLazy, mods.isImplicit, parseExpr(expr))
case q"if ($cond) $th else $el" =>
SIf(parseExpr(cond), parseExpr(th), parseExpr(el), tree2Type(tree))
case q"$expr: $tpt" => SAscr(parseExpr(expr), tpeExpr(tpt), tree2Type(tree))
case q"(..$params) => $expr" =>
SFunc(params.map(param => parseExpr(param).asInstanceOf[SValDef]), parseExpr(expr), tree2Type(tree))
case q"$tpname.this" => SThis(tpname, tree2Type(tree))
case q"$expr: @$annot" => SAnnotated(parseExpr(expr), annot.toString, tree2Type(tree))
case TypeApply(fun: Tree, args: List[Tree]) =>
STypeApply(parseExpr(fun), args.map(tpeExpr), tree2Type(tree))
case q"$expr match { case ..$cases } " => parseMatch(expr, cases)
case q"{ case ..$cases }" => parseMatch(EmptyTree, cases)
case Apply(TypeApply(fun, targs), args) =>
SApply(parseExpr(fun), targs.map(tpeExpr), List(args.map(parseExpr)), tree2Type(tree))
case Apply(fun, args) =>
SApply(parseExpr(fun), Nil, List(args.map(parseExpr)), tree2Type(tree))
case bi => optBodyItem(bi, None) match {
case Some(item) => item
case None => throw new NotImplementedError(s"parseExpr: Error parsing of ${showRaw(bi)}")
}
}
def parseMatch(expr: Tree, cases: List[CaseDef]) = {
SMatch(parseExpr(expr), cases.map{_ match {
case cq"$pat if $guard => $body" => SCase(parsePattern(pat), parseExpr(guard), parseExpr(body))
case c => throw new NotImplementedError(s"parseExpr: match {case ${showRaw(c)}")
}})
}
object WildcardPattern {
def unapply(pat: Tree): Boolean = pat match {
case Bind(nme.WILDCARD, WildcardPattern()) => true
case Star(WildcardPattern()) => true
case x: Ident => treeInfo.isVarPattern(x)
case Alternative(ps) => ps forall unapply
case EmptyTree => true
case _ => false
}
}
def parsePattern(pat: Tree): SPattern = pat match {
case WildcardPattern() => SWildcardPattern()
case Apply(fun, pats) => SApplyPattern(parseExpr(fun), pats.map(parsePattern))
case Typed(Ident(termNames.WILDCARD), tpe) => STypedPattern(tpeExpr(tpe))
case Bind(TermName(name), expr) => SBindPattern(name, parsePattern(expr))
case Literal(Constant(c)) => SLiteralPattern(SConst(c))
case Ident(id) => SStableIdPattern(SIdent(id.toString))
case Select(qual, name) => SSelPattern(parseExpr(qual), name.toString)
case Alternative(alts) => SAltPattern(alts.map(parsePattern))
case _ => throw new NotImplementedError(s"parsePattern: ${showRaw(pat)}")
}
def parseType(tpe: Type): STpeExpr = tpe match {
case NoType | NoPrefix => STpeEmpty()
case const: ConstantType => STpeConst(const.value.value)
case thisType: ThisType => STpeThis(thisType.sym.nameString)
case tref: TypeRef => parseTypeRef(tref)
case single: SingleType => STpeSingle(parseType(single.pre), single.sym.nameString)
case TypeBounds(lo, hi) => STpeTypeBounds(parseType(lo), parseType(hi))
case ExistentialType(quant, under) =>
val quantified = quant map(q => STpeDef(q.nameString, Nil, STpeEmpty()))
val underlying = parseType(under)
STpeExistential(underlying, quantified)
case m: MethodType => parseMethodType(Nil, m)
case PolyType(tparams, m: MethodType) => parseMethodType(tparams, m)
case annot: AnnotatedType => parseType(annot.underlying)
case tpe => throw new NotImplementedError(showRaw(tpe, printTypes = Some(true)))
}
def parseMethodType(tparams: List[Symbol], m: MethodType): STpeMethod = {
val method = m //uncurry.transformInfo(m.typeSymbol, m)
val typeParams = tparams.map(_.nameString)
val params = method.params.map(param => parseType(param.tpe))
val res = parseType(method.resultType)
STpeMethod(typeParams, params, res)
}
def parseTypeRef(tref: TypeRef): STpeExpr = {
STpePrimitives.get(tref.sym.nameString) match {
case Some(prim) => prim
case None =>
val fullName = tref.sym.fullNameString
val shortName = tref.sym.nameString
val args = tref.args map parseType
formAppliedTypeTree(fullName, shortName, args)
}
}
} | scalan/scalan | meta/src/main/scala/scalan/meta/ScalanParsers.scala | Scala | apache-2.0 | 24,289 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* Beineng Ma <[email protected]>
*/
package com.thenetcircle.event_bus.story.builder
import com.thenetcircle.event_bus.TestBase
import com.thenetcircle.event_bus.event.extractor.DataFormat
import com.thenetcircle.event_bus.story.tasks.http.HttpSourceBuilder
import scala.concurrent.duration._
class HttpSourceBuilderTest extends TestBase {
behavior of "HttpSourceBuilder"
val builder = new HttpSourceBuilder
it should "work correctly with minimum config" in {
val task = storyBuilder.buildTaskWithBuilder("""{
| "port": 8888
|}""".stripMargin)(builder)
val settings = task.settings
settings.interface shouldEqual "0.0.0.0"
settings.port shouldEqual 8888
settings.format shouldEqual DataFormat.ACTIVITYSTREAMS
settings.succeededResponse shouldEqual "ok"
settings.serverSettings.get.idleTimeout shouldEqual 3.minutes
settings.serverSettings.get.requestTimeout shouldEqual 10.seconds
settings.serverSettings.get.bindTimeout shouldEqual 1.second
settings.serverSettings.get.lingerTimeout shouldEqual 1.minute
settings.serverSettings.get.maxConnections shouldEqual 1024
settings.serverSettings.get.pipeliningLimit shouldEqual 16
settings.serverSettings.get.backlog shouldEqual 1024
}
it should "build correct HttpSource with the default config" in {
val task = storyBuilder.buildTaskWithBuilder("""{
| "interface": "127.0.0.1",
| "port": 8888,
| "succeeded-response": "okoo",
| "server": {
| "max-connections": 1001,
| "request-timeout": "5 s"
| }
|}""".stripMargin)(builder)
val settings = task.settings
settings.interface shouldEqual "127.0.0.1"
settings.port shouldEqual 8888
settings.format shouldEqual DataFormat.ACTIVITYSTREAMS
settings.succeededResponse shouldEqual "okoo"
settings.serverSettings.get.idleTimeout shouldEqual 3.minutes
settings.serverSettings.get.requestTimeout shouldEqual 5.seconds
settings.serverSettings.get.bindTimeout shouldEqual 1.second
settings.serverSettings.get.lingerTimeout shouldEqual 1.minute
settings.serverSettings.get.maxConnections shouldEqual 1001
settings.serverSettings.get.pipeliningLimit shouldEqual 16
settings.serverSettings.get.backlog shouldEqual 1024
}
}
| thenetcircle/event-bus | core/src/test/scala/com/thenetcircle/event_bus/story/builder/HttpSourceBuilderTest.scala | Scala | apache-2.0 | 3,003 |
package ir.query
import ir.Query
import text.normalizer.NormalizedString
/**
* <pre>
* Created on 6/1/15.
* </pre>
* @param keywords keywords
* @author K.Sakamoto
*/
abstract class KeywordsQuery(val keywords: Seq[String]) extends Query {
override val query: NormalizedString
}
| ktr-skmt/FelisCatusZero | src/main/scala/ir/query/KeywordsQuery.scala | Scala | apache-2.0 | 287 |
package im.actor.server.push
import scala.concurrent.ExecutionContext
import slick.dbio.Effect.Read
import slick.dbio.{ DBIO, DBIOAction, NoStream }
import im.actor.api.rpc.peers.{ Peer, PeerType }
import im.actor.server.{ models, persist }
private[push] trait VendorPush {
protected def setPushCredentials(creds: models.push.ApplePushCredentials): DBIO[Int] =
persist.push.ApplePushCredentials.createOrUpdate(creds)
protected def setPushCredentials(creds: models.push.GooglePushCredentials): DBIO[Int] =
persist.push.GooglePushCredentials.createOrUpdate(creds)
protected def deletePushCredentials(authId: Long)(implicit ec: ExecutionContext): DBIO[Int] =
for {
a ← persist.push.ApplePushCredentials.delete(authId)
g ← persist.push.GooglePushCredentials.delete(authId)
} yield a + g
protected def getShowText(userId: Int, paramBase: String)(implicit ec: ExecutionContext): DBIOAction[Boolean, NoStream, Read] = {
persist.configs.Parameter.findValue(userId, s"${paramBase}.show_text") map {
case Some("true") ⇒ true
case Some("false") ⇒ false
case _ ⇒ true
}
}
protected def getChatNotificationEnabled(userId: Int, paramBase: String, originPeer: Peer)(implicit ec: ExecutionContext): DBIOAction[Boolean, NoStream, Read] = {
val peerStr = originPeer.`type` match {
case PeerType.Private ⇒ s"PRIVATE_${originPeer.id}"
case PeerType.Group ⇒ s"GROUP_${originPeer.id}"
}
persist.configs.Parameter.findValue(userId, s"${paramBase}.chat.${peerStr}.enabled") map {
case Some("true") ⇒ true
case Some("false") ⇒ false
case _ ⇒ true
}
}
} | lstNull/actor-platform | actor-server/actor-cqrs/src/main/scala/im/actor/server/push/VendorPush.scala | Scala | mit | 1,698 |
package edu.osu.cse.groenkeb.logic.model.rules
import edu.osu.cse.groenkeb.logic._
import edu.osu.cse.groenkeb.logic.dsl._
import edu.osu.cse.groenkeb.logic.model._
import edu.osu.cse.groenkeb.logic.proof._
import edu.osu.cse.groenkeb.logic.proof.rules._
case class ModelRule(val model: FirstOrderModel) extends BaseRule {
def major(sentence: Sentence) = sentence.isInstanceOf[AtomicSentence]
def yields(conc: Sentence) = conc match {
case AtomicSentence(atom) => model.verify(conc)
case Absurdity => true
case _ => false
}
def params(major: Option[Sentence] = None)(implicit context: ProofContext) = goal match {
case s:AtomicSentence if major == None and model.verify(s) => Some(EmptyParams)
case Absurdity => major match {
case Some(s:AtomicSentence) if !model.verify(s) => Some(UnaryParams(EmptyProof(s)))
case _ => None
}
case _ => None
}
def infer(args: RuleArgs)(implicit context: ProofContext) = goal match {
case AtomicSentence(atom) =>
if (!model.validate(atom)) throw new IllegalArgumentException(String.format("%s is not defined in the model", atom))
args match {
case EmptyArgs if model.verify(goal) => Some(Proof(goal, ModelRule.this, args, Set()))
case _ => None
}
case Absurdity =>
args match {
case UnaryArgs(Proof(s: AtomicSentence, IdentityRule, _, assumptions, _)) if !model.verify(s) =>
if (!model.validate(s.atom)) throw new IllegalArgumentException(String.format("%s is not defined in the model", s.atom))
Some(Proof(Absurdity, ModelRule.this, args, assumptions))
case _ => None
}
case _ => None
}
override def toString = "M"
}
| bgroenks96/AutoMoL | core/src/main/scala/edu/osu/cse/groenkeb/logic/model/rules/ModelRule.scala | Scala | mit | 1,711 |
package org.firedancer3d.scenegraph.geometricproperties
import org.firedancer3d.math._
abstract class Light
case class DirectionalLight(
direction: Vec3 = Vec3(1, 1, 1),
color: ColorRGB = ColorRGB(1, 1, 1),
intensity: Float = 1,
ambientIntensity: Float = 0) extends Light {
}
case class PointLight(
direction: Vec3 = Vec3(1, 1, 1),
location: Vec3 = Vec3(0, 0, 0),
color: ColorRGB = ColorRGB(1, 1, 1),
intensity: Float = 1,
ambientIntensity: Float = 0,
radius: Float = 100) extends Light {
}
case class SpotLight(
direction: Vec3 = Vec3(0, 0, -1),
location: Vec3 = Vec3(0, 0, 0),
color: ColorRGB = ColorRGB(1, 1, 1),
intensity: Float = 1,
ambientIntensity: Float = 0,
radius: Float = 100,
beamWidth: Double = 1.570796,
cutOffAngle: Double = 0.785398) extends Light {
} | cyberthinkers/FireDancer3D | firedancer3d_shared/src/main/scala/org/firedancer3d/scenegraph/geometricproperties/Light.scala | Scala | mit | 874 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master
private[spark] object DriverState extends Enumeration {
type DriverState = Value
// SUBMITTED: Submitted but not yet scheduled on a worker
// RUNNING: Has been allocated to a worker to run
// FINISHED: Previously ran and exited cleanly
// RELAUNCHING: Exited non-zero or due to worker failure, but has not yet started running again
// UNKNOWN: The state of the driver is temporarily not known due to master failure recovery
// KILLED: A user manually killed this driver
// FAILED: The driver exited non-zero and was not supervised
// ERROR: Unable to run or restart due to an unrecoverable error (e.g. missing jar file)
val SUBMITTED, RUNNING, FINISHED, RELAUNCHING, UNKNOWN, KILLED, FAILED, ERROR = Value
}
| yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/deploy/master/DriverState.scala | Scala | apache-2.0 | 1,572 |
/**
* Copyright 2014 Idio
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author David Przybilla [email protected]
**/
package org.idio.dbpedia.spotlight.stores
import org.dbpedia.spotlight.db.memory.{ MemoryResourceStore, MemoryStore, MemoryCandidateMapStore }
import java.io.{ File, FileInputStream }
import Array.concat
import org.dbpedia.spotlight.model.SurfaceForm
import org.idio.dbpedia.spotlight.utils.ArrayUtils
class CustomCandidateMapStore(var candidateMap: MemoryCandidateMapStore,
val pathtoFolder: String,
val resStore:
MemoryResourceStore) {
def this(pathtoFolder: String, resStore: MemoryResourceStore) {
this(MemoryStore.loadCandidateMapStore(new FileInputStream(new File(pathtoFolder, "candmap.mem")), resStore),
pathtoFolder, resStore)
}
def this(candidateMap: MemoryCandidateMapStore, resStore: MemoryResourceStore) {
this(candidateMap, "", resStore)
}
/*
* Tries to get the candidate array for the given surfaceForm.
* In case such candidate array does not exist it will create it.
*
* It looks if the given candidateID is inside the candidate array.
* if it is not it will add it
* */
def addOrCreate(surfaceFormID: Int, candidateID: Int, candidateCounts: Int) {
// try to get it, create the candidate array in case it doesnt exist
try {
this.candidateMap.candidates(surfaceFormID)
} catch {
case e: Exception => {
println("\\tcreating candidate map array for " + surfaceFormID)
val candidates: Array[Int] = Array(candidateID)
val counts: Array[Int] = Array(candidateCounts)
this.createCandidateMapForSurfaceForm(surfaceFormID, candidates, counts)
println("\\tcandidates")
this.candidateMap.candidates(surfaceFormID).foreach { candidate =>
println("\\t" + candidate)
}
return true
}
}
try {
// checking if the surfaceForm actually has a candidates array already
this.candidateMap.candidates(surfaceFormID).size
} catch {
case e: Exception => {
// creating the candidate array in case the sf did not have one before
println("\\tcreating candidate map array for " + surfaceFormID)
this.candidateMap.candidates(surfaceFormID) = Array[Int](candidateID)
this.candidateMap.candidateCounts(surfaceFormID) = Array[Int](candidateCounts)
println("\\tcandidates")
this.candidateMap.candidates(surfaceFormID).foreach { candidate =>
println("\\t" + candidate)
}
}
}
// if the candidate array exist, then check if the candidate Topic is inside
if (!this.checkCandidateInSFCandidates(surfaceFormID, candidateID)) {
println("\\tadding the candidate(" + candidateID + ") to candidates of " + surfaceFormID)
this.addNewCandidateToSF(surfaceFormID, candidateID, candidateCounts)
}
}
/*
* appends listOfCandidates to the end of the candidate
* appends listOfCounts to the end of candidateCounts
*
* This is used when a surfaceForm is introduced to the model.
* */
def createCandidateMapForSurfaceForm(surfaceFormID: Int, listOfCandidates: Array[Int], listOfCounts: Array[Int]) {
this.candidateMap.candidates = Array concat (this.candidateMap.candidates, Array(listOfCandidates))
this.candidateMap.candidateCounts = Array concat (this.candidateMap.candidateCounts, Array(listOfCounts))
}
/*
* returns the AVG candidate counts for a given SF
* This value is used when creating a new association between an SF and a Topic
* */
def getAVGSupportForSF(surfaceFormID: Int): Int = {
val candidateCounts = this.candidateMap.candidateCounts(surfaceFormID)
if (candidateCounts.isInstanceOf[Array[Int]]) {
return (candidateCounts.sum / candidateCounts.size.toDouble).toInt
}
return 0
}
/*
* Checks if a candidateId is already in the candidate array of a surfaceForm.
* */
def checkCandidateInSFCandidates(surfaceFormID: Int, candidateID: Int): Boolean = {
for (candidate: Int <- candidateMap.candidates(surfaceFormID)) {
if (candidate == candidateID)
return true
}
return false
}
/*
* Increments the candidates Counts for a given surfaceForm and candidate
* */
def updateCountsOfCandidate(surfaceFormID: Int, candidateID: Int, boostValue: Int) {
// update the candidate count value
println("updating candidate count value")
val indexOfCandidateInArray = this.candidateMap.candidates(surfaceFormID).indexWhere { case (x) => x == candidateID }
this.candidateMap.candidateCounts(surfaceFormID)(indexOfCandidateInArray) += boostValue
}
/*
* Add a new topic candidate to the list of candidates of a SurfaceForm
* */
def addNewCandidateToSF(surfaceFormID: Int, candidateID: Int, candidateCounts: Int) {
if (!this.checkCandidateInSFCandidates(surfaceFormID, candidateID)) {
this.candidateMap.candidates(surfaceFormID) = this.candidateMap.candidates(surfaceFormID) :+ candidateID
this.candidateMap.candidateCounts(surfaceFormID) = this.candidateMap.candidateCounts(surfaceFormID) :+ candidateCounts
return 1
}
return 0
}
/*
* Remove association between an SF and a DbpediaURI
* */
def removeAssociation(surfaceFormID: Int, candidateID: Int) {
val indexOfCandidateInArray = this.candidateMap.candidates(surfaceFormID).indexWhere { case (x) => x == candidateID }
this.candidateMap.candidates(surfaceFormID) = ArrayUtils.dropIndex(this.candidateMap.candidates(surfaceFormID), indexOfCandidateInArray)
this.candidateMap.candidateCounts(surfaceFormID) = ArrayUtils.dropIndex(this.candidateMap.candidateCounts(surfaceFormID), indexOfCandidateInArray)
}
/*
* get all candidates associated to sourceSurfaceForm
* and associates them also to destinationSurfaceForm
* */
def copyCandidates(sourceSurfaceForm: SurfaceForm, destinationSurfaceForm: SurfaceForm) {
// get the candidates associated to the sourceSF
var newDestinationCandidates = this.candidateMap.candidates(sourceSurfaceForm.id).clone()
var newDestinationCandidatesCounts = this.candidateMap.candidateCounts(sourceSurfaceForm.id).clone()
// add the candidates associated to the destinationSF but not to the sourceSF
val setOfCandidatesTopics: collection.immutable.Set[Int] = collection.immutable.Set[Int](newDestinationCandidates: _*)
val currentDestinationCandidates = this.candidateMap.candidates(destinationSurfaceForm.id).zip(this.candidateMap.candidateCounts(destinationSurfaceForm.id))
currentDestinationCandidates.foreach {
case (topicId, count) =>
// if candidate is not already in the new candidate list then add it
if (!setOfCandidatesTopics.contains(topicId)) {
newDestinationCandidates = newDestinationCandidates :+ topicId
newDestinationCandidatesCounts = newDestinationCandidatesCounts :+ count
}
}
// update the destinationSF candidate arrays
this.candidateMap.candidates(destinationSurfaceForm.id) = newDestinationCandidates
this.candidateMap.candidateCounts(destinationSurfaceForm.id) = newDestinationCandidatesCounts
}
}
| idio/spotlight-model-editor | src/main/scala/org/idio/dbpedia/spotlight/stores/CustomCandidateMapStore.scala | Scala | apache-2.0 | 7,823 |
import org.apache.spark.SparkContext
object Ch0501 {
def main(args: Array[String]): Unit = {
val sc = new SparkContext("local","Chapter 7")
println(s"Running Spark Version ${sc.version}")
//
val inFile = sc.textFile("/Users/ksankar//fdps-v3/data/Line_of_numbers.csv")
var stringsRDD = inFile.map(line => line.split(','))
stringsRDD.take(10)
val numbersRDD = stringsRDD.map(x => x.map(_.toDouble))
numbersRDD.take(3)
}
} | dineshpackt/Fast-Data-Processing-with-Spark-2 | code/Ch0501.scala | Scala | mit | 456 |
package Chapter08
object Inheritance {
// topics:
// extending a class
// overriding methods
// type checks and casts
// protected fields and methods
// superclass construction
// overriding fields
// anonymous subclasses
// abstract classes
// abstract fields
// construction order and early definitions
// the scala inheritance hierarchy
// object equality
// value classes
// only the PRIMARY constructor can call the PRIMARY superclass constructor;
// you can override fields;
// 'final' means can't be overridden or extended (in java final means immutable);
// extending a class
def extendingAClass = {
// keyword: extends
// final
class Person {}
// if final => can't extend or override
class Employee extends Person {
var salary = 0.0
}
}
// overriding methods
def overridingMethods = {
// modifier: override for non-abstract members
class Person(val name: String) {
override def toString: String = s"${getClass.getName} name: $name"
}
// override helps with diagnostics:
// - misspell the name;
// - provide a wrong param. type;
// - add a new method in super that clashes with a subclass (fragile base class problem)
// in java some people declare all methods final to solve "fragile base class"...
// but @Overrides annotation is better
// call super:
class Employee(name: String, val salary: Double) extends Person(name) {
override def toString: String = super.toString + s"; salary: $salary"
}
}
// type checks and casts
def typeChecksAndCasts = {
// isInstanceOf vs classOf vs pattern matching
val p = AnyRef
type Foo = List[String]
// check class type: if p ai System or its subclass
val s = if (p.isInstanceOf[Foo]) p.asInstanceOf[Foo] else sys.error("oops")
// check object type (runtime class), NOT a subclass
val o = if (p.getClass == classOf[Foo]) p.asInstanceOf[Foo] else sys.error("oops")
// pattern matching better
val pm = p match {
case s: Foo => s.asInstanceOf[Foo]
case _ => sys.error("oops")
}
}
// protected fields and methods
def protectedFieldsAndMethods = {
// access granted from any subclass (not package)
class Person {
protected var name = "" // class protected
protected[this] var backup = "" // object protected
}
}
// superclass construction
def superclassConstruction = {
// aux constructors must start with a call to a preceding constructor =>
// aux constructor can never call a super constructor, only primary can do this
class Person(val name: String, val age: Int) {}
class Employee(name: String, age: Int, val salary: Double)
extends Person(name, age) // call sup constructor // never like super(name, age)
{ ??? } // subclass constructor
// scala class can extend a java class
import java.nio.charset.Charset
import java.nio.file.{Files, Path}
class PathWriter(p: Path, cs: Charset)
extends java.io.PrintWriter(Files.newBufferedWriter(p, cs)) // call one of the java class constructors
{ ??? }
}
// overriding fields
def overridingFields = {
// class field = private field + getter/setter
// dumb but illustrative example
class Person(val name: String) {
override def toString: String = s"${getClass.getName} name: $name"
}
class SecretAgent(codename: String) extends Person(codename) {
override val name: String = "secret" // val over val
override val toString = "secret" // val over def
}
// common case: override abstract def with a val
abstract class Person2 {
def id: Int // contract, not defined => abstract
}
class Student(override val id: Int) extends Person2 {
???
}
// restrictions:
// - def over def;
// - val over def or val;
// - var over abstract var only
// so, if you implement getter/setter with a var, all subclasses are stuck with it!
}
// anonymous subclasses
def anonymousSubclasses = {
// new Foo { definitions } : object of a structural type
// that can be used as any other type
class Person(val name: String) {
override def toString: String = s"${getClass.getName} name: $name"
}
val alien = new Person("Fred") { def greeting: String = "Greetings, Earthling!" }
// n.b. parameter type declaration
def meet(p: Person{def greeting: String}) = {
println(s"$p says ${p.greeting}")
}
}
// abstract classes
def abstractClasses = {
// omit member body => abstract class;
abstract class Person(val name: String) {
def id: Int // turn all class to abstract
}
class Employee(name: String) extends Person(name) {
def id = name.hashCode // override not required
}
}
// abstract fields
def abstractFields = {
// field w/o a value
abstract class Person {
val id: Int // abstract field with an abstract getter
var name: String // + abstract setter
// generated java class has no fields
/*
scala> :javap -private Person
Compiled from "<console>"
public abstract class $line2.$read$$iw$$iw$Person {
public abstract int id();
public abstract java.lang.String name();
public abstract void name_$eq(java.lang.String);
public $line2.$read$$iw$$iw$Person();
}
*/
}
// no override required
class Employee(val id: Int) extends Person {
var name = ""
}
// customize by anonymous type (structural type)
val fred = new Person { val id = 42; var name = "Fred" }
}
// construction order and early definitions
def constructionOrderAndEarlyDefinitions = {
// using methods (getters) in constructor is a bad idea:
// jvm call overridden method from subclass while constructing superclass: by design
// example: override a val that used in sup constructor
class Creature {
val range: Int = 10
val env: Array[Int] = new Array[Int](range) // oops, call a range getter!
}
class Ant extends Creature {
override val range = 2 // field value initialized AFTER sup constructor
// and getter called by sup return 0
}
// remember: no method calls in constructor (no val getters)!
// remedies:
// - declare val as final;
// - declare val as lazy;
// - use the early definition
// early definition: init val fields of subclass BEFORE sup's constructor
class Ant2 extends { override val range = 2 } with Creature
// n.b. extends block with sup; like traits mixin
// use -Xcheckinit compiler flag to find access to uninitialized fields
}
// the scala inheritance hierarchy
def theScalaInheritanceHierarchy = {
// Any: root (isInstanceOf, equal, hashCode)
// AnyVal: value classes, primitives
// AnyRef: compound classes (java.lang.Object, add wait, notify, synchronized)
// Null: subtype of all ref. types (singleton null)
// Nothing: subtype of all types (no instances, useful for generics and exceptions)
// Nothing != void or Unit
// parameters of Any type placed in a tuple
def show(x: Any) { println(s"${x.getClass}: $x") }
show(1,2,3) // class scala.Tuple3: (1,2,3)
}
// object equality
def objectEquality = {
// when you implement a class, consider overriding methods
// 'equal' and 'hashCode'
// using class instances as map keys or set items require that
// good example
class Item(val description: String, val price: Double) {
// final: you should not extend equals, because of a symmetry problem:
// a equals b should be the same as b equals a, even if b is a subclass
final override def equals(other: Any): Boolean = other match {
case that: Item => { description == that.description && price == that.price }
case _ => false
}
// define hashCode as well, from the fields used in equals
final override def hashCode(): Int = (description, price).##
// ## method is null-safe: yields 0 for null
}
// in app code use '==' operator
}
// value classes
def valueClasses = {
// for classes with a single field, such as the wrapper for primitive types,
// it's inefficient to allocate a new object for every value
// hence: value classes, a trick that inline class methods
// value class properties:
// - extends AnyVal;
// - primary constructor has exactly one val param and no body;
// - has no other fields or constructors;
// - automatically provides equals and hashCode.
// value class may not be a local class
import Chapter08.{valueClasses => vc}
val mt = vc.MilTime(2359)
// class MilitaryTime(val time: Int) extends AnyVal {
// def minutes = time % 100
// def hours = time / 100
// override def toString: String = f"$time%04d"
// }
//
// // or, better, provide a factory with proper initialization (no body for constructor!)
// class MilTime private (val time: Int) extends AnyVal {
// def minutes = time % 100
// def hours = time / 100
// override def toString: String = f"$time%04d"
// }
//
// object MilTime {
// def apply(time: Int) = {
// if (0 <= time && time < 2400 && time % 100 < 60) new MilTime(time)
// else throw new IllegalArgumentException("time should be between 0000 and 2359 inclusive")
// }
// }
//
// // if you need a value class with a trait,
// // the trait must explicitly extend Any, and it may not have fields.
// // such traits are called 'universal traits'
//
// // example for overhead-free tiny types
// class Author(val name: String) extends AnyVal
// class Title(val value: String) extends AnyVal
// class Book(val author: Author, val title: Title) // can't switch author and title
}
}
object Inheritance_Exercises {
// 1. Extend the following BankAccount class to a CheckingAccount class
// that charges $1 for every deposit and withdrawal.
//
// class BankAccount(initialBalance: Double) {
// private var balance = initialBalance
// def currentBalance = balance
// def deposit(amount: Double) = { balance += amount; balance }
// def withdraw(amount: Double) = { balance -= amount; balance }
// }
def ex1 = {
class BankAccount(initialBalance: Double) {
private var balance = initialBalance
def currentBalance = balance
def deposit(amount: Double) = { balance += amount; balance }
def withdraw(amount: Double) = { balance -= amount; balance }
}
class CheckingAccount(initialBalance: Double)
extends BankAccount(initialBalance) {
// to charge deposit & withdrawal
override def deposit(amount: Double) = { super.deposit(amount - 1.0) }
override def withdraw(amount: Double) = { super.withdraw(amount + 1.0) }
}
}
// 2. Extend the BankAccount class of the preceding exercise into a class SavingsAccount
// that earns interest every month (when a method earnMonthlyInterest is called) and has
// three free deposits or withdrawals every month. Reset the transaction count in the
// earnMonthlyInterest method.
def ex2 = {
class BankAccount(initialBalance: Double) {
private var balance = initialBalance
def currentBalance = balance
def deposit(amount: Double) = { balance += amount; balance }
def withdraw(amount: Double) = { balance -= amount; balance }
}
class SavingsAccount(initialBalance: Double)
extends BankAccount(initialBalance) {
override def deposit(amount: Double) = { super.deposit(amount - charge()) }
override def withdraw(amount: Double) = { super.withdraw(amount + charge()) }
def earnMonthlyInterest(): Unit = { // mutator
freeCount = 4 // and decrement it to 3
deposit(currentBalance * intRate)
}
private def charge() = { // mutator
if (freeCount <= 0) 1.0 else { freeCount -= 1; 0.0 }
}
private[this] val intRate = 1.0/100.0
private[this] var freeCount = 3
}
}
// 3. Consult your favorite Java or C++ textbook which is sure to have an example of a toy
// inheritance hierarchy, perhaps involving employees, pets, graphical shapes, or the like.
// Implement the example in Scala.
def ex3 = {
// http://www.java2s.com/Tutorials/Java/Java_Object_Oriented_Design/0300__Java_Inheritance.htm
class Employee (var name: String = "Unknown")
class Manager extends Employee
val mgr = new Manager
mgr.name = "Tom"
println(s"Manager's name: ${mgr.name}")
}
// 4. Define an abstract class Item with methods price and description. A SimpleItem
// is an item whose price and description are specified in the constructor. Take advantage of the
// fact that a val can override a def. A Bundle is an item that contains other items. Its price is
// the sum of the prices in the bundle. Also provide a mechanism for adding items to the bundle
// and a suitable description method.
def ex4 = {
abstract class Item {
def price: Double
def description: String
}
class SimpleItem(val price: Double, val description: String) extends Item
class Bundle extends Item {
private[this] var items = List.empty[Item]
def add(item: Item): Unit = items :+= item
def price: Double = items.map(_.price).sum
def description: String = items.map(_.description).mkString(";")
}
}
// 5. Design a class Point whose x and y coordinate values can be provided in a constructor.
// Provide a subclass LabeledPoint whose constructor takes a label value and x and y
// coordinates, such as
//
// new LabeledPoint("Black Thursday", 1929, 230.07)
def ex5 = {
class Point(val x: Double = 0, val y: Double = 0)
class LabeledPoint(val label: String, x: Double, y: Double) extends Point(x, y)
}
// 6. Define an abstract class Shape with an abstract method centerPoint and subclasses
// Rectangle and Circle. Provide appropriate constructors for the subclasses and override
// the centerPoint method in each subclass.
def ex6 = {
import scala.util.Random.nextInt
class Point(val x: Int = 0, val y: Int = 0)
abstract class Shape { def centerPoint: Point }
// perhaps I should define constructors as two points for rectangle and point and radius for circle?
class Rectangle extends Shape { def centerPoint = new Point(nextInt(), nextInt()) }
class Circle extends Shape { def centerPoint = new Point(nextInt(), nextInt()) }
}
// 7. Provide a class Square that extends java.awt.Rectangle and has three constructors:
// one that constructs a square with a given corner point and width,
// one that constructs a square with corner (0, 0) and a given width,
// and one that constructs a square with corner (0, 0) and width 0.
def ex7 = {
class Point(val x: Int = 0, val y: Int = 0)
class Square(corner: Point, width: Int)
extends java.awt.Rectangle(corner.x, corner.y, width, width) {
def this(width: Int) = this(new Point(0,0), width)
def this() = this(new Point(0,0), 0)
}
}
// 8. Compile the Person and SecretAgent classes in Section 8.6, “Overriding Fields,” on
// page 95 and analyze the class files with javap. How many name fields are there? How many
// name getter methods are there? What do they get? (Hint: Use the -c and -private options.)
def ex8 = {
class Person(val name: String) {
override def toString = getClass.getName + "[name=" + name + "]"
}
// How many 'name' fields are there? 1
// How many 'name' getter methods are there? 1
// What do they get? name field value
/*
scala> :javap -c -private Person
Compiled from "<pastie>"
public class $line12.$read$$iw$$iw$Person {
private final java.lang.String name;
public java.lang.String name();
Code:
0: aload_0
1: getfield #18 // Field name:Ljava/lang/String;
4: areturn
public java.lang.String toString();
...
public $line12.$read$$iw$$iw$Person(java.lang.String);
...}
*/
class SecretAgent(codename: String) extends Person(codename) {
override val name = "secret" // Don't want to reveal name . . .
override val toString = "secret" // . . . or class name
}
// How many 'name' fields are there? 1
// How many 'name' getter methods are there? 1
// What do they get? name field value = secret
/*
scala> :javap -c -private SecretAgent
Compiled from "<pastie>"
public class $line13.$read$$iw$$iw$SecretAgent extends $line12.$read$$iw$$iw$Person {
private final java.lang.String name;
private final java.lang.String toString;
public java.lang.String name();
Code:
0: aload_0
1: getfield #26 // Field name:Ljava/lang/String;
4: areturn
public java.lang.String toString();
...
public $line13.$read$$iw$$iw$SecretAgent(java.lang.String);
Code:
0: aload_0
1: aload_1
2: invokespecial #35 // Method $line12/$read$$iw$$iw$Person."<init>":(Ljava/lang/String;)V
5: aload_0
6: ldc #37 // String secret
8: putfield #26 // Field name:Ljava/lang/String;
11: aload_0
12: ldc #37 // String secret
14: putfield #30 // Field toString:Ljava/lang/String;
17: return
}
*/
}
// 9. In the Creature class of Section 8.10, “Construction Order and Early Definitions,” on page 98,
// replace val range with a def
// What happens when you also use a def in the Ant subclass?
// What happens when you use a val in the subclass? Why?
def ex9 = {
class Creature {
def range: Int = 10
val env: Array[Int] = new Array[Int](range) // call to overridden method!
}
class AntDef extends Creature {
override def range = 2
}
class Ant extends Creature {
override val range = 2
}
// What happens when you also use a def in the Ant subclass?
// superclass constructor uses range from subclass, env.length = 2
// What happens when you use a val in the subclass?
// superclass constructor uses range from subclass, env.length = 0
// Why? Ant.val is undefined (= 0) in time when super constructor is executed
// scala> new Ant
// scala> res5.env.length
// res6: Int = 0
// scala> new AntDef
// scala> res7.env.length
// res8: Int = 2
}
// 10. The file scala/collection/immutable/Stack.scala contains the definition
//
// class Stack[A] protected (protected val elems: List[A])
//
// Explain the meanings of the protected keywords. (Hint: Review the discussion of private
// constructors in Chapter 5.)
def ex10 = {
// protected constructor for Stack
// Stack have protected list 'elems'
// these names accessible only from Stack subclasses in the same location
// A protected constructor can only be accessed by an auxiliary constructor or companion object
// from same or descendant class
}
// 11. Define a value class Point that packs integer x and y coordinates into a Long
// (which you should make private).
def ex11 = {
// value class may not be a local class
import Chapter08.valueClasses.exercise11
val p = exercise11.Point(0, 0)
// class Point private(private val xy: Long) extends AnyVal {
// def x: Int = (xy >> 32).toInt
// def y: Int = xy.toInt
// }
// object Point {
// def apply(x: Int, y: Int): Point = {
// val xy: Long = (x.toLong << 32) | (y & 0xffffffffL)
// new Point(xy)
// }
// }
}
}
package valueClasses {
// value class may not be a local class
class MilitaryTime(val time: Int) extends AnyVal {
def minutes = time % 100
def hours = time / 100
override def toString: String = f"$time%04d"
}
// or, better, provide a factory with proper initialization (no body for constructor!)
class MilTime private (val time: Int) extends AnyVal {
def minutes = time % 100
def hours = time / 100
override def toString: String = f"$time%04d"
}
object MilTime {
def apply(time: Int) = {
if (0 <= time && time < 2400 && time % 100 < 60) new MilTime(time)
else throw new IllegalArgumentException("time should be between 0000 and 2359 inclusive")
}
}
// if you need a value class with a trait,
// the trait must explicitly extend Any, and it may not have fields.
// such traits are called 'universal traits'
// example for overhead-free tiny types
class Author(val name: String) extends AnyVal
class Title(val value: String) extends AnyVal
class Book(val author: Author, val title: Title) // can't switch author and title
package exercise11 {
// TODO: add tests for corner cases / negative coords
class Point private(private val xy: Long) extends AnyVal {
def x: Int = (xy >> 32).toInt
def y: Int = xy.toInt
}
object Point {
def apply(x: Int, y: Int): Point = {
val xy: Long = (x.toLong << 32) | (y & 0xffffffffL)
new Point(xy)
}
}
}
}
| vasnake/scala-for-the-impatient | src/main/scala/Chapter08/Inheritance.scala | Scala | gpl-3.0 | 22,966 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import sbt.Keys._
object SparkCassandraBuild extends Build {
import Settings._
lazy val root = Project(
id = "root",
base = file("."),
settings = parentSettings,
aggregate = Seq(extension, examples)
)
lazy val extension = LibraryProject("spark-cassandra", Dependencies.extension)
lazy val examples = LibraryProject("spark-cassandra-examples", Dependencies.examples)
def LibraryProject(name: String, deps: Seq[ModuleID], cpd: Seq[ClasspathDep[ProjectReference]] = Seq.empty): Project =
Project(name, file(name), settings = defaultSettings ++ Seq(libraryDependencies ++= Dependencies.testkit ++ deps), dependencies = cpd)
}
object Dependencies {
object Compile {
import Versions._
val akkaCluster = "com.typesafe.akka" %% "akka-cluster" % Akka // ApacheV2
val cassSparkConnector = "com.datastax.spark" %% "spark-cassandra-connector" % Connector withSources() // ApacheV2
val lzf = "com.ning" % "compress-lzf" % Lzf // for spark
val sparkStreaming = "org.apache.spark" %% "spark-streaming" % Spark withSources() // ApacheV2
object Metrics {
val hdrHistogram = "org.hdrhistogram" % "HdrHistogram" % HdrHistogram % "test" // CC0
val latencyUtils = "org.latencyutils" % "LatencyUtils" % LatencyUtils % "test" // Free BSD
val metrics = "com.codahale.metrics" % "metrics-core" % CodahaleMetrics % "test" // ApacheV2
val metricsJvm = "com.codahale.metrics" % "metrics-jvm" % CodahaleMetrics % "test" // ApacheV2
}
object Test {
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % Akka % "test" // ApacheV2
val scalatest = "org.scalatest" %% "scalatest" % ScalaTest % "test" // ApacheV2
}
}
import Compile._
val metrics = Seq(Metrics.metrics, Metrics.metricsJvm, Metrics.latencyUtils, Metrics.hdrHistogram)
val testkit = Seq(Test.akkaTestKit, Test.scalatest)
val spark = Seq(lzf, sparkStreaming)
val extension = spark ++ metrics ++ Seq(akkaCluster, cassSparkConnector)
val examples = spark ++ metrics ++ Seq(akkaCluster, cassSparkConnector)
}
| helena/spark-cassandra | project/SparkCassandraBuild.scala | Scala | apache-2.0 | 3,270 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import java.io.File
import java.util
import java.util.concurrent._
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import scala.util.Random
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
import org.apache.spark.{SparkEnv, TaskContext}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionCoalescer, RDD}
import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SQLContext}
import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
import org.apache.spark.sql.execution.command.{CompactionModel, ExecutionErrors, UpdateTableModel}
import org.apache.spark.sql.execution.command.management.CommonLoadUtils
import org.apache.spark.sql.hive.DistributionUtil
import org.apache.spark.sql.optimizer.CarbonFilters
import org.apache.spark.sql.util.{CarbonException, SparkSQLUtil}
import org.apache.carbondata.common.constants.LoggerAction
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.{CarbonCommonConstants, SortScopeOptions}
import org.apache.carbondata.core.datamap.{DataMapStoreManager, Segment}
import org.apache.carbondata.core.datamap.status.DataMapStatusManager
import org.apache.carbondata.core.datastore.block.{Distributable, TableBlockInfo}
import org.apache.carbondata.core.datastore.compression.CompressorFactory
import org.apache.carbondata.core.datastore.filesystem.CarbonFile
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.exception.ConcurrentOperationException
import org.apache.carbondata.core.locks.{CarbonLockFactory, ICarbonLock, LockUsage}
import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnarFormatVersion, SegmentFileStore}
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.mutate.CarbonUpdateUtil
import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatus, SegmentStatusManager}
import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, ThreadLocalSessionInfo}
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
import org.apache.carbondata.indexserver.{DistributedRDDUtils, IndexServer}
import org.apache.carbondata.processing.loading.FailureCauses
import org.apache.carbondata.processing.loading.csvinput.BlockDetails
import org.apache.carbondata.processing.loading.events.LoadEvents.{LoadTablePostStatusUpdateEvent, LoadTablePreStatusUpdateEvent}
import org.apache.carbondata.processing.loading.exception.NoRetryException
import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
import org.apache.carbondata.processing.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
import org.apache.carbondata.processing.util.{CarbonDataProcessorUtil, CarbonLoaderUtil}
import org.apache.carbondata.spark.{DataLoadResultImpl, _}
import org.apache.carbondata.spark.load._
import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil, Util}
/**
* This is the factory class which can create different RDD depends on user needs.
*
*/
object CarbonDataRDDFactory {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
def handleCompactionForSystemLocking(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storeLocation: String,
compactionType: CompactionType,
carbonTable: CarbonTable,
compactedSegments: java.util.List[String],
compactionModel: CompactionModel,
operationContext: OperationContext): Unit = {
// taking system level lock at the mdt file location
var configuredMdtPath = CarbonProperties.getInstance().getProperty(
CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER,
CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER_DEFAULT).trim
configuredMdtPath = CarbonUtil.checkAndAppendFileSystemURIScheme(configuredMdtPath)
val lock = CarbonLockFactory.getSystemLevelCarbonLockObj(
configuredMdtPath + CarbonCommonConstants.FILE_SEPARATOR +
CarbonCommonConstants.SYSTEM_LEVEL_COMPACTION_LOCK_FOLDER,
LockUsage.SYSTEMLEVEL_COMPACTION_LOCK)
if (lock.lockWithRetries()) {
LOGGER.info(s"Acquired the compaction lock for table ${ carbonLoadModel.getDatabaseName }" +
s".${ carbonLoadModel.getTableName }")
try {
startCompactionThreads(
sqlContext,
carbonLoadModel,
storeLocation,
compactionModel,
lock,
compactedSegments,
operationContext
)
} catch {
case e: Exception =>
LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
// if the compaction is a blocking call then only need to throw the exception.
if (compactionModel.isDDLTrigger) {
throw e
}
}
} else {
LOGGER.error("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
CarbonCompactionUtil
.createCompactionRequiredFile(carbonTable.getMetadataPath, compactionType)
// throw exception only in case of DDL trigger.
if (compactionModel.isDDLTrigger) {
CarbonException.analysisException(
s"Compaction is in progress, compaction request for table " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}" +
" is in queue.")
} else {
LOGGER.error("Compaction is in progress, compaction request for table " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}" +
" is in queue.")
}
}
}
def startCompactionThreads(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storeLocation: String,
compactionModel: CompactionModel,
compactionLock: ICarbonLock,
compactedSegments: java.util.List[String],
operationContext: OperationContext): Unit = {
val executor: ExecutorService = Executors.newFixedThreadPool(1)
// update the updated table status.
if (compactionModel.compactionType != CompactionType.IUD_UPDDEL_DELTA) {
// update the updated table status. For the case of Update Delta Compaction the Metadata
// is filled in LoadModel, no need to refresh.
carbonLoadModel.readAndSetLoadMetadataDetails()
}
val compactionThread = new Thread {
override def run(): Unit = {
val compactor = CompactionFactory.getCompactor(
carbonLoadModel,
compactionModel,
executor,
sqlContext,
storeLocation,
compactedSegments,
operationContext)
try {
// compaction status of the table which is triggered by the user.
var triggeredCompactionStatus = false
var exception: Exception = null
try {
compactor.executeCompaction()
triggeredCompactionStatus = true
} catch {
case e: Exception =>
LOGGER.error(s"Exception in compaction thread ${ e.getMessage }")
exception = e
}
// continue in case of exception also, check for all the tables.
val isConcurrentCompactionAllowed = CarbonProperties.getInstance().getProperty(
CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
).equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
LOGGER.info("System level compaction lock is enabled.")
val skipCompactionTables = ListBuffer[CarbonTableIdentifier]()
var tableForCompaction = CarbonCompactionUtil.getNextTableToCompact(
CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore
.listAllTables(sqlContext.sparkSession).toArray,
skipCompactionTables.toList.asJava)
while (null != tableForCompaction) {
LOGGER.info("Compaction request has been identified for table " +
s"${ tableForCompaction.getDatabaseName }." +
s"${ tableForCompaction.getTableName}")
val table: CarbonTable = tableForCompaction
val metadataPath = table.getMetadataPath
val compactionType = CarbonCompactionUtil.determineCompactionType(metadataPath)
val newCarbonLoadModel = prepareCarbonLoadModel(table)
val compactionSize = CarbonDataMergerUtil
.getCompactionSize(CompactionType.MAJOR, carbonLoadModel)
val newcompactionModel = CompactionModel(
compactionSize,
compactionType,
table,
compactionModel.isDDLTrigger,
CarbonFilters.getCurrentPartitions(sqlContext.sparkSession,
TableIdentifier(table.getTableName,
Some(table.getDatabaseName))), None)
// proceed for compaction
try {
CompactionFactory.getCompactor(
newCarbonLoadModel,
newcompactionModel,
executor,
sqlContext,
storeLocation,
compactedSegments,
operationContext).executeCompaction()
} catch {
case e: Exception =>
LOGGER.error("Exception in compaction thread for table " +
s"${ tableForCompaction.getDatabaseName }." +
s"${ tableForCompaction.getTableName }")
// not handling the exception. only logging as this is not the table triggered
// by user.
} finally {
// delete the compaction required file in case of failure or success also.
if (!CarbonCompactionUtil
.deleteCompactionRequiredFile(metadataPath, compactionType)) {
// if the compaction request file is not been able to delete then
// add those tables details to the skip list so that it wont be considered next.
skipCompactionTables.+=:(tableForCompaction.getCarbonTableIdentifier)
LOGGER.error("Compaction request file can not be deleted for table " +
s"${ tableForCompaction.getDatabaseName }." +
s"${ tableForCompaction.getTableName }")
}
}
// ********* check again for all the tables.
tableForCompaction = CarbonCompactionUtil.getNextTableToCompact(
CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore
.listAllTables(sqlContext.sparkSession).toArray,
skipCompactionTables.asJava)
}
}
// Remove compacted segments from executor cache.
if (CarbonProperties.getInstance().isDistributedPruningEnabled(
carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)) {
try {
IndexServer.getClient.invalidateSegmentCache(carbonLoadModel
.getCarbonDataLoadSchema.getCarbonTable,
compactedSegments.asScala.toArray,
SparkSQLUtil.getTaskGroupId(sqlContext.sparkSession))
} catch {
case ex: Exception =>
LOGGER.warn(s"Clear cache job has failed for ${carbonLoadModel
.getDatabaseName}.${carbonLoadModel.getTableName}", ex)
}
}
// giving the user his error for telling in the beeline if his triggered table
// compaction is failed.
if (!triggeredCompactionStatus) {
throw new Exception("Exception in compaction " + exception.getMessage)
}
} finally {
executor.shutdownNow()
compactor.deletePartialLoadsInCompaction()
if (compactionModel.compactionType != CompactionType.IUD_UPDDEL_DELTA) {
compactionLock.unlock()
}
}
}
}
// calling the run method of a thread to make the call as blocking call.
// in the future we may make this as concurrent.
compactionThread.run()
}
private def prepareCarbonLoadModel(
table: CarbonTable
): CarbonLoadModel = {
val loadModel = new CarbonLoadModel
loadModel.setTableName(table.getTableName)
val dataLoadSchema = new CarbonDataLoadSchema(table)
// Need to fill dimension relation
loadModel.setCarbonDataLoadSchema(dataLoadSchema)
loadModel.setTableName(table.getCarbonTableIdentifier.getTableName)
loadModel.setDatabaseName(table.getCarbonTableIdentifier.getDatabaseName)
loadModel.setTablePath(table.getTablePath)
loadModel.setCarbonTransactionalTable(table.isTransactionalTable)
loadModel.readAndSetLoadMetadataDetails()
val loadStartTime = CarbonUpdateUtil.readCurrentTime()
loadModel.setFactTimeStamp(loadStartTime)
val columnCompressor = table.getTableInfo.getFactTable.getTableProperties.asScala
.getOrElse(CarbonCommonConstants.COMPRESSOR,
CompressorFactory.getInstance().getCompressor.getName)
loadModel.setColumnCompressor(columnCompressor)
loadModel
}
def loadCarbonData(
sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
partitionStatus: SegmentStatus = SegmentStatus.SUCCESS,
overwriteTable: Boolean,
hadoopConf: Configuration,
dataFrame: Option[DataFrame] = None,
scanResultRdd : Option[RDD[InternalRow]] = None,
updateModel: Option[UpdateTableModel] = None,
operationContext: OperationContext): LoadMetadataDetails = {
// Check if any load need to be deleted before loading new data
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
var status: Array[(String, (LoadMetadataDetails, ExecutionErrors))] = null
var res: Array[List[(String, (LoadMetadataDetails, ExecutionErrors))]] = null
// create new segment folder in carbon store
if (updateModel.isEmpty && carbonLoadModel.isCarbonTransactionalTable) {
CarbonLoaderUtil.checkAndCreateCarbonDataLocation(carbonLoadModel.getSegmentId, carbonTable)
}
var loadStatus = SegmentStatus.SUCCESS
var errorMessage: String = "DataLoad failure"
var executorMessage: String = ""
val isSortTable = carbonTable.getNumberOfSortColumns > 0
val sortScope = CarbonDataProcessorUtil.getSortScope(carbonLoadModel.getSortScope)
val segmentLock = CarbonLockFactory.getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier,
CarbonTablePath.addSegmentPrefix(carbonLoadModel.getSegmentId) + LockUsage.LOCK)
try {
if (!carbonLoadModel.isCarbonTransactionalTable || segmentLock.lockWithRetries()) {
if (updateModel.isDefined) {
res = loadDataFrameForUpdate(
sqlContext,
dataFrame,
carbonLoadModel,
updateModel,
carbonTable,
hadoopConf)
res.foreach { resultOfSeg =>
resultOfSeg.foreach { resultOfBlock =>
if (resultOfBlock._2._1.getSegmentStatus == SegmentStatus.LOAD_FAILURE) {
loadStatus = SegmentStatus.LOAD_FAILURE
if (resultOfBlock._2._2.failureCauses == FailureCauses.NONE) {
updateModel.get.executorErrors.failureCauses = FailureCauses.EXECUTOR_FAILURE
updateModel.get.executorErrors.errorMsg = "Failure in the Executor."
} else {
updateModel.get.executorErrors = resultOfBlock._2._2
}
} else if (resultOfBlock._2._1.getSegmentStatus ==
SegmentStatus.LOAD_PARTIAL_SUCCESS) {
loadStatus = SegmentStatus.LOAD_PARTIAL_SUCCESS
updateModel.get.executorErrors.failureCauses = resultOfBlock._2._2.failureCauses
updateModel.get.executorErrors.errorMsg = resultOfBlock._2._2.errorMsg
}
}
}
} else {
status = if (scanResultRdd.isDefined) {
val colSchema = carbonLoadModel
.getCarbonDataLoadSchema
.getCarbonTable
.getTableInfo
.getFactTable
.getListOfColumns
.asScala.filterNot(col => col.isInvisible || col.getColumnName.contains("."))
val convertedRdd = CommonLoadUtils.getConvertedInternalRow(colSchema, scanResultRdd.get)
if (isSortTable && sortScope.equals(SortScopeOptions.SortScope.GLOBAL_SORT)) {
DataLoadProcessBuilderOnSpark.insertDataUsingGlobalSortWithInternalRow(sqlContext
.sparkSession,
convertedRdd,
carbonLoadModel,
hadoopConf)
} else {
loadDataFrame(sqlContext, None, Some(convertedRdd), carbonLoadModel)
}
} else {
if (dataFrame.isEmpty && isSortTable &&
carbonLoadModel.getRangePartitionColumn != null &&
(sortScope.equals(SortScopeOptions.SortScope.GLOBAL_SORT) ||
sortScope.equals(SortScopeOptions.SortScope.LOCAL_SORT))) {
DataLoadProcessBuilderOnSpark
.loadDataUsingRangeSort(sqlContext.sparkSession, carbonLoadModel, hadoopConf)
} else if (isSortTable && sortScope.equals(SortScopeOptions.SortScope.GLOBAL_SORT)) {
DataLoadProcessBuilderOnSpark.loadDataUsingGlobalSort(sqlContext.sparkSession,
dataFrame,
carbonLoadModel,
hadoopConf)
} else if (dataFrame.isDefined) {
loadDataFrame(sqlContext, dataFrame, None, carbonLoadModel)
} else {
loadDataFile(sqlContext, carbonLoadModel, hadoopConf)
}
}
val newStatusMap = scala.collection.mutable.Map.empty[String, SegmentStatus]
if (status.nonEmpty) {
status.foreach { eachLoadStatus =>
val state = newStatusMap.get(eachLoadStatus._1)
state match {
case Some(SegmentStatus.LOAD_FAILURE) =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getSegmentStatus)
case Some(SegmentStatus.LOAD_PARTIAL_SUCCESS)
if eachLoadStatus._2._1.getSegmentStatus ==
SegmentStatus.SUCCESS =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getSegmentStatus)
case _ =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getSegmentStatus)
}
}
newStatusMap.foreach {
case (key, value) =>
if (value == SegmentStatus.LOAD_FAILURE) {
loadStatus = SegmentStatus.LOAD_FAILURE
} else if (value == SegmentStatus.LOAD_PARTIAL_SUCCESS &&
loadStatus != SegmentStatus.LOAD_FAILURE) {
loadStatus = SegmentStatus.LOAD_PARTIAL_SUCCESS
}
}
} else {
// if no value is there in data load, make load status Success
// and data load flow executes
if ((dataFrame.isDefined || scanResultRdd.isDefined) && updateModel.isEmpty) {
if (dataFrame.isDefined) {
val rdd = dataFrame.get.rdd
if (rdd.partitions == null || rdd.partitions.length == 0) {
LOGGER.warn("DataLoading finished. No data was loaded.")
loadStatus = SegmentStatus.SUCCESS
}
} else {
if (scanResultRdd.get.partitions == null ||
scanResultRdd.get.partitions.length == 0) {
LOGGER.warn("DataLoading finished. No data was loaded.")
loadStatus = SegmentStatus.SUCCESS
}
}
} else {
loadStatus = SegmentStatus.LOAD_FAILURE
}
}
if (loadStatus != SegmentStatus.LOAD_FAILURE &&
partitionStatus == SegmentStatus.LOAD_PARTIAL_SUCCESS) {
loadStatus = partitionStatus
}
}
}
} catch {
case ex: Throwable =>
loadStatus = SegmentStatus.LOAD_FAILURE
val (extrMsgLocal, errorMsgLocal) = CarbonScalaUtil.retrieveAndLogErrorMsg(ex, LOGGER)
executorMessage = extrMsgLocal
errorMessage = errorMsgLocal
LOGGER.info(errorMessage)
LOGGER.error(ex)
} finally {
segmentLock.unlock()
}
// handle the status file updation for the update cmd.
if (updateModel.isDefined) {
if (loadStatus == SegmentStatus.LOAD_FAILURE) {
CarbonScalaUtil.updateErrorInUpdateModel(updateModel.get, executorMessage)
return null
} else if (loadStatus == SegmentStatus.LOAD_PARTIAL_SUCCESS &&
updateModel.get.executorErrors.failureCauses == FailureCauses.BAD_RECORDS &&
carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
return null
} else {
// in success case handle updation of the table status file.
// success case.
val segmentDetails = new util.HashSet[Segment]()
var resultSize = 0
res.foreach { resultOfSeg =>
resultSize = resultSize + resultOfSeg.size
resultOfSeg.foreach { resultOfBlock =>
segmentDetails.add(new Segment(resultOfBlock._2._1.getLoadName))
}
}
val segmentFiles = updateSegmentFiles(carbonTable, segmentDetails, updateModel.get)
// this means that the update doesnt have any records to update so no need to do table
// status file updation.
if (resultSize == 0) {
return null
}
if (!CarbonUpdateUtil.updateTableMetadataStatus(
segmentDetails,
carbonTable,
updateModel.get.updatedTimeStamp + "",
true,
new util.ArrayList[Segment](0),
new util.ArrayList[Segment](segmentFiles), "")) {
LOGGER.error("Data update failed due to failure in table status updation.")
updateModel.get.executorErrors.errorMsg = errorMessage
updateModel.get.executorErrors.failureCauses = FailureCauses
.STATUS_FILE_UPDATION_FAILURE
return null
}
// code to handle Pre-Priming cache for update command
if (!segmentFiles.isEmpty) {
val segmentsToPrePrime = segmentFiles.asScala.map(iterator => iterator.getSegmentNo).toSeq
DistributedRDDUtils
.triggerPrepriming(sqlContext.sparkSession, carbonTable, segmentsToPrePrime,
operationContext, hadoopConf, segmentsToPrePrime.toList)
}
}
return null
}
val uniqueTableStatusId = Option(operationContext.getProperty("uuid")).getOrElse("")
.asInstanceOf[String]
if (loadStatus == SegmentStatus.LOAD_FAILURE) {
// update the load entry in table status file for changing the status to marked for delete
CarbonLoaderUtil.updateTableStatusForFailure(carbonLoadModel, uniqueTableStatusId)
LOGGER.info("********starting clean up**********")
if (carbonLoadModel.isCarbonTransactionalTable) {
// delete segment is applicable for transactional table
CarbonLoaderUtil.deleteSegment(carbonLoadModel, carbonLoadModel.getSegmentId.toInt)
clearDataMapFiles(carbonTable, carbonLoadModel.getSegmentId)
}
LOGGER.info("********clean up done**********")
LOGGER.warn("Cannot write load metadata file as data load failed")
throw new Exception(errorMessage)
} else {
// check if data load fails due to bad record and throw data load failure due to
// bad record exception
if (loadStatus == SegmentStatus.LOAD_PARTIAL_SUCCESS &&
status(0)._2._2.failureCauses == FailureCauses.BAD_RECORDS &&
carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
// update the load entry in table status file for changing the status to marked for delete
CarbonLoaderUtil.updateTableStatusForFailure(carbonLoadModel, uniqueTableStatusId)
LOGGER.info("********starting clean up**********")
if (carbonLoadModel.isCarbonTransactionalTable) {
// delete segment is applicable for transactional table
CarbonLoaderUtil.deleteSegment(carbonLoadModel, carbonLoadModel.getSegmentId.toInt)
clearDataMapFiles(carbonTable, carbonLoadModel.getSegmentId)
}
LOGGER.info("********clean up done**********")
throw new Exception(status(0)._2._2.errorMsg)
}
// as no record loaded in new segment, new segment should be deleted
val newEntryLoadStatus =
if (carbonLoadModel.isCarbonTransactionalTable &&
!carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.isChildTableForMV &&
!CarbonLoaderUtil.isValidSegment(carbonLoadModel, carbonLoadModel.getSegmentId.toInt)) {
LOGGER.warn("Cannot write load metadata file as there is no data to load")
SegmentStatus.MARKED_FOR_DELETE
} else {
loadStatus
}
val segmentFileName =
SegmentFileStore.writeSegmentFile(carbonTable, carbonLoadModel.getSegmentId,
String.valueOf(carbonLoadModel.getFactTimeStamp))
SegmentFileStore.updateTableStatusFile(
carbonTable,
carbonLoadModel.getSegmentId,
segmentFileName,
carbonTable.getCarbonTableIdentifier.getTableId,
new SegmentFileStore(carbonTable.getTablePath, segmentFileName))
operationContext.setProperty(carbonTable.getTableUniqueName + "_Segment",
carbonLoadModel.getSegmentId)
val loadTablePreStatusUpdateEvent: LoadTablePreStatusUpdateEvent =
new LoadTablePreStatusUpdateEvent(
carbonTable.getCarbonTableIdentifier,
carbonLoadModel)
OperationListenerBus.getInstance().fireEvent(loadTablePreStatusUpdateEvent, operationContext)
val (done, writtenSegment) =
updateTableStatus(
status,
carbonLoadModel,
newEntryLoadStatus,
overwriteTable,
segmentFileName,
uniqueTableStatusId)
val loadTablePostStatusUpdateEvent: LoadTablePostStatusUpdateEvent =
new LoadTablePostStatusUpdateEvent(carbonLoadModel)
val commitComplete = try {
OperationListenerBus.getInstance()
.fireEvent(loadTablePostStatusUpdateEvent, operationContext)
true
} catch {
case ex: Exception =>
LOGGER.error("Problem while committing data maps", ex)
false
}
if (!done || !commitComplete) {
CarbonLoaderUtil.updateTableStatusForFailure(carbonLoadModel, uniqueTableStatusId)
LOGGER.info("********starting clean up**********")
if (carbonLoadModel.isCarbonTransactionalTable) {
// delete segment is applicable for transactional table
CarbonLoaderUtil.deleteSegment(carbonLoadModel, carbonLoadModel.getSegmentId.toInt)
// delete corresponding segment file from metadata
val segmentFile = CarbonTablePath.getSegmentFilesLocation(carbonLoadModel.getTablePath) +
File.separator + segmentFileName
FileFactory.deleteFile(segmentFile)
clearDataMapFiles(carbonTable, carbonLoadModel.getSegmentId)
}
LOGGER.info("********clean up done**********")
LOGGER.error("Data load failed due to failure in table status updation.")
throw new Exception("Data load failed due to failure in table status updation.")
}
if (SegmentStatus.LOAD_PARTIAL_SUCCESS == loadStatus) {
LOGGER.info("Data load is partially successful for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
} else {
LOGGER.info("Data load is successful for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
}
// code to handle Pre-Priming cache for loading
if (!StringUtils.isEmpty(carbonLoadModel.getSegmentId)) {
DistributedRDDUtils.triggerPrepriming(sqlContext.sparkSession, carbonTable, Seq(),
operationContext, hadoopConf, List(carbonLoadModel.getSegmentId))
}
try {
// compaction handling
if (carbonTable.isHivePartitionTable) {
carbonLoadModel.setFactTimeStamp(System.currentTimeMillis())
}
val compactedSegments = new util.ArrayList[String]()
handleSegmentMerging(sqlContext,
carbonLoadModel
.getCopyWithPartition(carbonLoadModel.getCsvHeader, carbonLoadModel.getCsvDelimiter),
carbonTable,
compactedSegments,
operationContext)
carbonLoadModel.setMergedSegmentIds(compactedSegments)
writtenSegment
} catch {
case e: Exception =>
LOGGER.error(
"Auto-Compaction has failed. Ignoring this exception because the" +
" load is passed.", e)
writtenSegment
}
}
}
/**
* clear datamap files for segment
*/
def clearDataMapFiles(carbonTable: CarbonTable, segmentId: String): Unit = {
try {
val segments = List(new Segment(segmentId)).asJava
DataMapStoreManager.getInstance().getAllDataMap(carbonTable).asScala
.filter(_.getDataMapSchema.isIndexDataMap)
.foreach(_.deleteDatamapData(segments))
} catch {
case ex : Exception =>
LOGGER.error(s"Failed to clear datamap files for" +
s" ${carbonTable.getDatabaseName}.${carbonTable.getTableName}")
}
}
/**
* Add and update the segment files. In case of update scenario the carbonindex files are written
* to the same segment so we need to update old segment file. So this ethod writes the latest data
* to new segment file and merges this file old file to get latest updated files.
* @param carbonTable
* @param segmentDetails
* @return
*/
private def updateSegmentFiles(
carbonTable: CarbonTable,
segmentDetails: util.HashSet[Segment],
updateModel: UpdateTableModel) = {
val metadataDetails =
SegmentStatusManager.readTableStatusFile(
CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath))
val segmentFiles = segmentDetails.asScala.map { seg =>
val load =
metadataDetails.find(_.getLoadName.equals(seg.getSegmentNo)).get
val segmentFile = load.getSegmentFile
var segmentFiles: Seq[CarbonFile] = Seq.empty[CarbonFile]
val file = SegmentFileStore.writeSegmentFile(
carbonTable,
seg.getSegmentNo,
String.valueOf(System.currentTimeMillis()),
load.getPath)
if (segmentFile != null) {
segmentFiles ++= FileFactory.getCarbonFile(
SegmentFileStore.getSegmentFilePath(carbonTable.getTablePath, segmentFile)) :: Nil
}
val updatedSegFile = if (file != null) {
val carbonFile = FileFactory.getCarbonFile(
SegmentFileStore.getSegmentFilePath(carbonTable.getTablePath, file))
segmentFiles ++= carbonFile :: Nil
val mergedSegFileName = SegmentFileStore.genSegmentFileName(
seg.getSegmentNo,
updateModel.updatedTimeStamp.toString)
SegmentFileStore.mergeSegmentFiles(
mergedSegFileName,
CarbonTablePath.getSegmentFilesLocation(carbonTable.getTablePath),
segmentFiles.toArray)
carbonFile.delete()
mergedSegFileName + CarbonTablePath.SEGMENT_EXT
} else {
null
}
new Segment(seg.getSegmentNo, updatedSegFile)
}.filter(_.getSegmentFileName != null).asJava
segmentFiles
}
/**
* If data load is triggered by UPDATE query, this func will execute the update
* TODO: move it to a separate update command
*/
private def loadDataFrameForUpdate(
sqlContext: SQLContext,
dataFrame: Option[DataFrame],
carbonLoadModel: CarbonLoadModel,
updateModel: Option[UpdateTableModel],
carbonTable: CarbonTable,
hadoopConf: Configuration): Array[List[(String, (LoadMetadataDetails, ExecutionErrors))]] = {
val segmentUpdateParallelism = CarbonProperties.getInstance().getParallelismForSegmentUpdate
val updateRdd = dataFrame.get.rdd
// return directly if no rows to update
val noRowsToUpdate = updateRdd.isEmpty()
if (noRowsToUpdate) {
Array[List[(String, (LoadMetadataDetails, ExecutionErrors))]]()
} else {
// splitting as (key, value) i.e., (segment, updatedRows)
val keyRDD = updateRdd.map(row =>
(row.get(row.size - 1).toString, Row(row.toSeq.slice(0, row.size - 1): _*)))
val loadMetadataDetails = SegmentStatusManager.readLoadMetadata(
carbonTable.getMetadataPath)
.filter(lmd => lmd.getSegmentStatus.equals(SegmentStatus.LOAD_PARTIAL_SUCCESS) ||
lmd.getSegmentStatus.equals(SegmentStatus.SUCCESS))
val segments = loadMetadataDetails.map(f => new Segment(f.getLoadName, f.getSegmentFile))
val segmentIdIndex = segments.map(_.getSegmentNo).zipWithIndex.toMap
val segmentId2maxTaskNo = segments.map { seg =>
(seg.getSegmentNo,
CarbonUpdateUtil.getLatestTaskIdForSegment(seg, carbonLoadModel.getTablePath))
}.toMap
class SegmentPartitioner(segIdIndex: Map[String, Int], parallelism: Int)
extends org.apache.spark.Partitioner {
override def numPartitions: Int = segmentIdIndex.size * parallelism
override def getPartition(key: Any): Int = {
val segId = key.asInstanceOf[String]
segmentIdIndex(segId) * parallelism + Random.nextInt(parallelism)
}
}
val partitionByRdd = keyRDD.partitionBy(
new SegmentPartitioner(segmentIdIndex, segmentUpdateParallelism))
// because partitionId=segmentIdIndex*parallelism+RandomPart and RandomPart<parallelism,
// so segmentIdIndex=partitionId/parallelism, this has been verified.
val conf = SparkSQLUtil.broadCastHadoopConf(sqlContext.sparkSession.sparkContext, hadoopConf)
partitionByRdd.map(_._2).mapPartitions { partition =>
ThreadLocalSessionInfo.setConfigurationToCurrentThread(conf.value.value)
val partitionId = TaskContext.getPartitionId()
val segIdIndex = partitionId / segmentUpdateParallelism
val randomPart = partitionId - segIdIndex * segmentUpdateParallelism
val segId = segments(segIdIndex)
val newTaskNo = segmentId2maxTaskNo(segId.getSegmentNo) + randomPart + 1
List(triggerDataLoadForSegment(
carbonLoadModel,
updateModel,
segId.getSegmentNo,
newTaskNo,
partition).toList).toIterator
}.collect()
}
}
/**
* TODO: move it to a separate update command
*/
private def triggerDataLoadForSegment(
carbonLoadModel: CarbonLoadModel,
updateModel: Option[UpdateTableModel],
key: String,
taskNo: Long,
iter: Iterator[Row]): Iterator[(String, (LoadMetadataDetails, ExecutionErrors))] = {
val rddResult = new updateResultImpl()
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
val resultIter = new Iterator[(String, (LoadMetadataDetails, ExecutionErrors))] {
val loadMetadataDetails = new LoadMetadataDetails
val executionErrors = ExecutionErrors(FailureCauses.NONE, "")
var uniqueLoadStatusId = ""
try {
val segId = key
val index = taskNo
uniqueLoadStatusId = carbonLoadModel.getTableName +
CarbonCommonConstants.UNDERSCORE +
(index + "_0")
loadMetadataDetails.setLoadName(segId)
loadMetadataDetails.setSegmentStatus(SegmentStatus.LOAD_FAILURE)
carbonLoadModel.setSegmentId(segId)
carbonLoadModel.setTaskNo(String.valueOf(index))
carbonLoadModel.setFactTimeStamp(updateModel.get.updatedTimeStamp)
loadMetadataDetails.setSegmentStatus(SegmentStatus.SUCCESS)
UpdateDataLoad.DataLoadForUpdate(segId,
index,
iter,
carbonLoadModel,
loadMetadataDetails)
} catch {
case e: NoRetryException =>
loadMetadataDetails
.setSegmentStatus(SegmentStatus.LOAD_PARTIAL_SUCCESS)
executionErrors.failureCauses = FailureCauses.BAD_RECORDS
executionErrors.errorMsg = e.getMessage
LOGGER.info("Bad Record Found")
case e: Exception =>
LOGGER.info("DataLoad failure")
LOGGER.error(e)
throw e
}
var finished = false
override def hasNext: Boolean = !finished
override def next(): (String, (LoadMetadataDetails, ExecutionErrors)) = {
finished = true
rddResult
.getKey(uniqueLoadStatusId,
(loadMetadataDetails, executionErrors))
}
}
resultIter
}
/**
* Trigger compaction after data load
*/
def handleSegmentMerging(
sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
carbonTable: CarbonTable,
compactedSegments: java.util.List[String],
operationContext: OperationContext): Unit = {
LOGGER.info(s"compaction need status is" +
s" ${ CarbonDataMergerUtil.checkIfAutoLoadMergingRequired(carbonTable) }")
if (CarbonDataMergerUtil.checkIfAutoLoadMergingRequired(carbonTable)) {
val compactionSize = 0
val isCompactionTriggerByDDl = false
val compactionModel = CompactionModel(
compactionSize,
CompactionType.MINOR,
carbonTable,
isCompactionTriggerByDDl,
CarbonFilters.getCurrentPartitions(sqlContext.sparkSession,
TableIdentifier(carbonTable.getTableName,
Some(carbonTable.getDatabaseName))), None)
var storeLocation = ""
val configuredStore = Util.getConfiguredLocalDirs(SparkEnv.get.conf)
if (null != configuredStore && configuredStore.nonEmpty) {
storeLocation = configuredStore(Random.nextInt(configuredStore.length))
}
if (storeLocation == null) {
storeLocation = System.getProperty("java.io.tmpdir")
}
storeLocation = storeLocation + "/carbonstore/" + System.nanoTime()
val isConcurrentCompactionAllowed = CarbonProperties.getInstance().getProperty(
CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
).equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
handleCompactionForSystemLocking(sqlContext,
carbonLoadModel,
storeLocation,
CompactionType.MINOR,
carbonTable,
compactedSegments,
compactionModel,
operationContext
)
} else {
val lock = CarbonLockFactory.getCarbonLockObj(
carbonTable.getAbsoluteTableIdentifier,
LockUsage.COMPACTION_LOCK)
val updateLock = CarbonLockFactory.getCarbonLockObj(carbonTable
.getAbsoluteTableIdentifier, LockUsage.UPDATE_LOCK)
try {
if (updateLock.lockWithRetries(3, 3)) {
if (lock.lockWithRetries()) {
LOGGER.info("Acquired the compaction lock.")
startCompactionThreads(sqlContext,
carbonLoadModel,
storeLocation,
compactionModel,
lock,
compactedSegments,
operationContext
)
} else {
LOGGER.error("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName}")
}
} else {
throw new ConcurrentOperationException(carbonTable, "update", "compaction")
}
} catch {
case e: Exception =>
LOGGER.error(s"Exception in start compaction thread.", e)
lock.unlock()
throw e
} finally {
updateLock.unlock()
}
}
}
}
/**
* Update table status file after data loading
* @param status status collected from each task
* @param carbonLoadModel load model used for loading
* @param newEntryLoadStatus segment status to set in the metadata
* @param overwriteTable true the operation is overwrite
* @param segmentFileName segment file name
* @param uuid uuid for the table status file name
* @return whether operation success and
* the segment metadata that written into the segment status file
*/
private def updateTableStatus(
status: Array[(String, (LoadMetadataDetails, ExecutionErrors))],
carbonLoadModel: CarbonLoadModel,
newEntryLoadStatus: SegmentStatus,
overwriteTable: Boolean,
segmentFileName: String,
uuid: String = ""): (Boolean, LoadMetadataDetails) = {
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val metadataDetails = if (status != null && status.size > 0 && status(0) != null) {
status(0)._2._1
} else {
new LoadMetadataDetails
}
metadataDetails.setSegmentFile(segmentFileName)
CarbonLoaderUtil.populateNewLoadMetaEntry(
metadataDetails,
newEntryLoadStatus,
carbonLoadModel.getFactTimeStamp,
true)
CarbonLoaderUtil
.addDataIndexSizeIntoMetaEntry(metadataDetails, carbonLoadModel.getSegmentId, carbonTable)
if (!carbonLoadModel.isCarbonTransactionalTable && overwriteTable) {
CarbonLoaderUtil.deleteNonTransactionalTableForInsertOverwrite(carbonLoadModel)
}
val done = CarbonLoaderUtil.recordNewLoadMetadata(metadataDetails, carbonLoadModel, false,
overwriteTable, uuid)
if (!done) {
val errorMessage = s"Dataload failed due to failure in table status updation for" +
s" ${carbonLoadModel.getTableName}"
LOGGER.error(errorMessage)
throw new Exception(errorMessage)
} else {
DataMapStatusManager.disableAllLazyDataMaps(carbonTable)
if (overwriteTable) {
val allDataMapSchemas = DataMapStoreManager.getInstance
.getDataMapSchemasOfTable(carbonTable).asScala
.filter(dataMapSchema => null != dataMapSchema.getRelationIdentifier &&
!dataMapSchema.isIndexDataMap).asJava
if (!allDataMapSchemas.isEmpty) {
DataMapStatusManager.truncateDataMap(allDataMapSchemas)
}
}
}
(done, metadataDetails)
}
/**
* Execute load process to load from input dataframe
*
* @param sqlContext sql context
* @param dataFrame optional dataframe for insert
* @param scanResultRDD optional internal row rdd for direct insert
* @param carbonLoadModel load model
* @return Return an array that contains all of the elements in NewDataFrameLoaderRDD.
*/
private def loadDataFrame(
sqlContext: SQLContext,
dataFrame: Option[DataFrame],
scanResultRDD: Option[RDD[InternalRow]],
carbonLoadModel: CarbonLoadModel
): Array[(String, (LoadMetadataDetails, ExecutionErrors))] = {
try {
val rdd = if (dataFrame.isDefined) {
dataFrame.get.rdd
} else {
// For internal row, no need of converter and re-arrange step,
carbonLoadModel.setLoadWithoutConverterWithoutReArrangeStep(true)
scanResultRDD.get
}
val nodeNumOfData = rdd.partitions.flatMap[String, Array[String]] { p =>
DataLoadPartitionCoalescer.getPreferredLocs(rdd, p).map(_.host)
}.distinct.length
val nodes = DistributionUtil.ensureExecutorsByNumberAndGetNodeList(
nodeNumOfData,
sqlContext.sparkContext)
val newRdd =
if (dataFrame.isDefined) {
new DataLoadCoalescedRDD[Row](
sqlContext.sparkSession, dataFrame.get.rdd, nodes.toArray.distinct)
} else {
new DataLoadCoalescedRDD[InternalRow](
sqlContext.sparkSession,
scanResultRDD.get,
nodes.toArray.distinct)
}
new NewDataFrameLoaderRDD(
sqlContext.sparkSession,
new DataLoadResultImpl(),
carbonLoadModel,
newRdd
).collect()
} catch {
case ex: Exception =>
LOGGER.error("load data frame failed", ex)
throw ex
}
}
/**
* Execute load process to load from input file path specified in `carbonLoadModel`
*/
private def loadDataFile(
sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
hadoopConf: Configuration
): Array[(String, (LoadMetadataDetails, ExecutionErrors))] = {
/*
* when data load handle by node partition
* 1)clone the hadoop configuration,and set the file path to the configuration
* 2)use org.apache.hadoop.mapreduce.lib.input.TextInputFormat to get splits,size info
* 3)use CarbonLoaderUtil.nodeBlockMapping to get mapping info of node and block,
* for locally writing carbondata files(one file one block) in nodes
* 4)use NewCarbonDataLoadRDD to load data and write to carbondata files
*/
// FileUtils will skip file which is no csv, and return all file path which split by ','
val filePaths = carbonLoadModel.getFactFilePath
hadoopConf.set(FileInputFormat.INPUT_DIR, filePaths)
hadoopConf.set(FileInputFormat.INPUT_DIR_RECURSIVE, "true")
hadoopConf.set("io.compression.codecs",
"""org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.DefaultCodec,
org.apache.hadoop.io.compress.BZip2Codec""".stripMargin)
CommonUtil.configSplitMaxSize(sqlContext.sparkContext, filePaths, hadoopConf)
val jobConf = new JobConf(hadoopConf)
SparkHadoopUtil.get.addCredentials(jobConf)
val inputFormat = new org.apache.hadoop.mapreduce.lib.input.TextInputFormat
val jobContext = new Job(jobConf)
val rawSplits = inputFormat.getSplits(jobContext).toArray
val blockList = rawSplits.map { inputSplit =>
val fileSplit = inputSplit.asInstanceOf[FileSplit]
new TableBlockInfo(fileSplit.getPath.toString,
fileSplit.getStart, "1",
fileSplit.getLocations, fileSplit.getLength, ColumnarFormatVersion.V3, null
).asInstanceOf[Distributable]
}
// group blocks to nodes, tasks
val startTime = System.currentTimeMillis
val activeNodes = DistributionUtil
.ensureExecutorsAndGetNodeList(blockList, sqlContext.sparkContext)
val skewedDataOptimization = CarbonProperties.getInstance()
.isLoadSkewedDataOptimizationEnabled()
// get user ddl input the node loads the smallest amount of data
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
var loadMinSize = carbonLoadModel.getLoadMinSize()
if (loadMinSize.equalsIgnoreCase(CarbonCommonConstants.CARBON_LOAD_MIN_SIZE_INMB_DEFAULT)) {
loadMinSize = carbonTable.getTableInfo.getFactTable.getTableProperties.asScala
.getOrElse(CarbonCommonConstants.CARBON_LOAD_MIN_SIZE_INMB,
CarbonCommonConstants.CARBON_LOAD_MIN_SIZE_INMB_DEFAULT)
}
val blockAssignStrategy = if (!loadMinSize.equalsIgnoreCase(
CarbonCommonConstants.CARBON_LOAD_MIN_SIZE_INMB_DEFAULT)) {
CarbonLoaderUtil.BlockAssignmentStrategy.NODE_MIN_SIZE_FIRST
} else if (skewedDataOptimization) {
CarbonLoaderUtil.BlockAssignmentStrategy.BLOCK_SIZE_FIRST
} else {
CarbonLoaderUtil.BlockAssignmentStrategy.BLOCK_NUM_FIRST
}
LOGGER.info(s"Allocating block to nodes using strategy: $blockAssignStrategy")
val nodeBlockMapping = CarbonLoaderUtil.nodeBlockMapping(blockList.toSeq.asJava, -1,
activeNodes.toList.asJava, blockAssignStrategy, loadMinSize).asScala.toSeq
val timeElapsed: Long = System.currentTimeMillis - startTime
LOGGER.info("Total Time taken in block allocation: " + timeElapsed)
LOGGER.info(s"Total no of blocks: ${ blockList.length }, " +
s"No.of Nodes: ${nodeBlockMapping.size}")
var str = ""
nodeBlockMapping.foreach { entry =>
val tableBlock = entry._2
val totalSize = tableBlock.asScala.map(_.asInstanceOf[TableBlockInfo].getBlockLength).sum
str = str + "#Node: " + entry._1 + ", no.of.blocks: " + tableBlock.size() +
f", totalsize.of.blocks: ${totalSize * 0.1 * 10 / 1024 /1024}%.2fMB"
tableBlock.asScala.foreach(tableBlockInfo =>
if (!tableBlockInfo.getLocations.exists(hostentry =>
hostentry.equalsIgnoreCase(entry._1)
)) {
str = str + " , mismatch locations: " + tableBlockInfo.getLocations
.foldLeft("")((a, b) => a + "," + b)
}
)
str = str + "\\n"
}
LOGGER.info(str)
val blocksGroupBy: Array[(String, Array[BlockDetails])] = nodeBlockMapping.map { entry =>
val blockDetailsList =
entry._2.asScala.map(distributable => {
val tableBlock = distributable.asInstanceOf[TableBlockInfo]
new BlockDetails(new Path(tableBlock.getFilePath),
tableBlock.getBlockOffset, tableBlock.getBlockLength, tableBlock.getLocations
)
}).toArray
(entry._1, blockDetailsList)
}.toArray
new NewCarbonDataLoadRDD(
sqlContext.sparkSession,
new DataLoadResultImpl(),
carbonLoadModel,
blocksGroupBy
).collect()
}
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala | Scala | apache-2.0 | 50,827 |
package com.arcusys.valamis.gradebook.service.impl
import com.arcusys.learn.liferay.LiferayClasses._
import com.arcusys.valamis.gradebook.model.LessonAverageGrade
import com.arcusys.valamis.gradebook.service.{StatisticBuilder, LessonGradeService, CourseLessonsResultService}
import com.arcusys.valamis.lesson.model.Lesson
import com.arcusys.valamis.lesson.service._
import scala.slick.driver._
import scala.slick.jdbc._
abstract class CourseLessonsResultServiceImpl(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends CourseLessonsResultService {
def lessonGradeService: LessonGradeService
def memberService: LessonMembersService
def statisticBuilder: StatisticBuilder
override def getLessonsAverageGrade(lessons: Seq[Lesson], users: Seq[LUser]): Seq[LessonAverageGrade] = {
lazy val lessonUsers = memberService.getLessonsUsers(lessons, users)
lessons.map {lesson =>
if (users.isEmpty) {
LessonAverageGrade(
lesson,
Seq(),
0F
)
}
else {
val users = lessonUsers.filter(_.lesson == lesson).map(_.user)
val grade = lessonGradeService.getLessonAverageGrades(lesson, users)
LessonAverageGrade(
lesson,
users,
grade
)
}
}
}
}
| igor-borisov/valamis | valamis-gradebook/src/main/scala/com/arcusys/valamis/gradebook/service/impl/CourseLessonsResultServiceImpl.scala | Scala | gpl-3.0 | 1,341 |
package io.ddf.flink.etl
import io.ddf.DDF
import io.ddf.etl.IHandleMissingData.{NAChecking, Axis}
import io.ddf.exception.DDFException
import io.ddf.flink.BaseSpec
import io.ddf.types.AggregateTypes.AggregateFunction
import org.apache.flink.api.scala.DataSet
import scala.collection.JavaConversions._
class MissingDataHandlerSpec extends BaseSpec {
val missingData = loadAirlineNADDF()
it should "drop all rows with NA values" in {
val result = missingData.dropNA()
result.getNumRows should be(9)
}
it should "keep all the rows" in {
val result = missingData.getMissingDataHandler.dropNA(Axis.ROW, NAChecking.ALL, 0, null)
result.getNumRows should be(31)
}
it should "keep all the rows when drop threshold is high" in {
val result = missingData.getMissingDataHandler.dropNA(Axis.ROW, NAChecking.ALL, 10, null)
result.getNumRows should be(31)
}
it should "throw an exception when drop threshold > columns" in {
intercept[DDFException] {
missingData.getMissingDataHandler.dropNA(Axis.ROW, NAChecking.ANY, 31, null)
}
}
it should "drop all columns with NA values" in {
val result = missingData.dropNA(Axis.COLUMN)
result.getNumColumns should be(22)
}
it should "drop all columns with NA values with load table" in {
val missingData = loadAirlineNADDF()
val result = missingData.dropNA(Axis.COLUMN)
result.getNumColumns should be(22)
}
it should "keep all the columns" in {
val result = missingData.getMissingDataHandler.dropNA(Axis.COLUMN, NAChecking.ALL, 0, null)
result.getNumColumns should be(29)
}
it should "keep most(24) columns when drop threshold is high(20)" in {
val result = missingData.getMissingDataHandler.dropNA(Axis.COLUMN, NAChecking.ALL, 20, null)
result.getNumColumns should be(24)
}
it should "throw an exception when drop threshold > rows" in {
intercept[DDFException] {
missingData.getMissingDataHandler.dropNA(Axis.COLUMN, NAChecking.ANY, 40, null)
}
}
it should "fill by value" in {
val ddf = loadDDF()
val ddf1: DDF = ddf.VIEWS.project(List("V1", "V29"))
val filledDDF: DDF = ddf1.fillNA("0")
val annualDelay = filledDDF.aggregate("V1, sum(V29)").get("2008")(0)
annualDelay should be(282.0 +- 0.1)
}
it should "fill by dictionary" in {
val ddf = loadDDF()
val ddf1: DDF = ddf.VIEWS.project(List("V1", "V28", "V29"))
val dict: Map[String, String] = Map("V1" -> "2000", "V28" -> "0", "V29" -> "1")
val filledDDF = ddf1.getMissingDataHandler.fillNA(null, null, 0, null, dict, null)
val annualDelay = filledDDF.aggregate("V1, sum(V29)").get("2008")(0)
annualDelay should be(282.0 +- 0.1)
}
it should "fill by aggregate function" in {
val ddf = loadDDF()
val ddf1: DDF = ddf.VIEWS.project(List("V1", "V28", "V29"))
val result = ddf1.getMissingDataHandler.fillNA(null, null, 0, AggregateFunction.MEAN, null, null)
result should not be (null)
}
}
| ddf-project/ddf-flink | flink/src/test/scala/io/ddf/flink/etl/MissingDataHandlerSpec.scala | Scala | apache-2.0 | 2,978 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.json4s.JsonDSL._
// Scala
import scala.util.matching.Regex
import scala.annotation.tailrec
/**
* Converts unstructured events and custom contexts to a format which the Elasticsearch
* mapper can understand
*/
object Shredder {
private val schemaPattern = """.+:([a-zA-Z0-9_\\.]+)/([a-zA-Z0-9_]+)/[^/]+/(.*)""".r
/**
* Create an Elasticsearch field name from a schema
*
* "iglu:com.acme/PascalCase/jsonschema/13-0-0" -> "context_com_acme_pascal_case_13"
*
* @param prefix "context" or "unstruct_event"
* @param schema Schema field from an incoming JSON
* @return Elasticsearch field name
*/
// TODO: move this to shared storage/shredding utils
// See https://github.com/snowplow/snowplow/issues/1189
def fixSchema(prefix: String, schema: String): ValidationNel[String, String] = {
schema match {
case schemaPattern(organization, name, schemaVer) => {
// Split the vendor's reversed domain name using underscores rather than dots
val snakeCaseOrganization = organization.replaceAll("""\\.""", "_").toLowerCase
// Change the name from PascalCase to snake_case if necessary
val snakeCaseName = name.replaceAll("([^A-Z_])([A-Z])", "$1_$2").toLowerCase
// Extract the schemaver version's model
val model = schemaVer.split("-")(0)
s"${prefix}_${snakeCaseOrganization}_${snakeCaseName}_${model}".successNel
}
case _ => "Schema %s does not conform to regular expression %s".format(schema, schemaPattern.toString).failNel
}
}
/**
* Convert a contexts JSON to an Elasticsearch-compatible JObject
* For example, the JSON
*
* {
* "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0",
* "data": [
* {
* "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0",
* "data": {
* "unique": true
* }
* },
* {
* "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0",
* "data": {
* "value": 1
* }
* },
* {
* "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0",
* "data": {
* "value": 2
* }
* }
* ]
* }
*
* would become
*
* {
* "context_com_acme_duplicated_1": [{"value": 1}, {"value": 2}],
* "context_com_acme_unduplicated_1": [{"unique": true}]
* }
*
* @param contexts Contexts JSON
* @return Contexts JSON in an Elasticsearch-compatible format
*/
def parseContexts(contexts: String): ValidationNel[String, JObject] = {
/**
* Validates and pairs up the schema and data fields without grouping the same schemas together
*
* For example, the JSON
*
* {
* "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0",
* "data": [
* {
* "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0",
* "data": {
* "value": 1
* }
* },
* {
* "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0",
* "data": {
* "value": 2
* }
* }
* ]
* }
*
* would become
*
* [
* {"contexts_com_acme_duplicated_1": {"value": 1}},
* {"contexts_com_acme_duplicated_1": {"value": 2}}
* ]
*
* @param contextJsons List of inner custom context JSONs
* @param accumulator Custom contexts which have already been parsed
* @return List of validated tuples containing a fixed schema string and the original data JObject
*/
@tailrec def innerParseContexts(contextJsons: List[JValue], accumulator: List[ValidationNel[String, (String, JValue)]]):
List[ValidationNel[String, (String, JValue)]] = {
contextJsons match {
case Nil => accumulator
case head :: tail => {
val context = head
val innerData = context \\ "data" match {
case JNothing => "Could not extract inner data field from custom context".failNel // TODO: decide whether to enforce object type of data
case d => d.successNel
}
val fixedSchema: ValidationNel[String, String] = context \\ "schema" match {
case JString(schema) => fixSchema("contexts", schema)
case _ => "Context JSON did not contain a stringly typed schema field".failNel
}
val schemaDataPair = (fixedSchema |@| innerData) {_ -> _}
innerParseContexts(tail, schemaDataPair :: accumulator)
}
}
}
val json = parse(contexts)
val data = json \\ "data"
data match {
case JArray(Nil) => "Custom contexts data array is empty".failNel
case JArray(ls) => {
val innerContexts: ValidationNel[String, List[(String, JValue)]] = innerParseContexts(ls, Nil).sequenceU
// Group contexts with the same schema together
innerContexts.map(_.groupBy(_._1).map(pair => (pair._1, pair._2.map(_._2))))
}
case _ => "Could not extract contexts data field as an array".failNel
}
}
/**
* Convert an unstructured event JSON to an Elasticsearch-compatible JObject
* For example, the JSON
*
* {
* "schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
* "data": {
* "schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1",
* "data": {
* "key": "value"
* }
* }
* }
*
* would become
*
* {
* "unstruct_event_com_snowplowanalytics_snowplow_link_click_1": {"key": "value"}
* }
*
* @param unstruct Unstructured event JSON
* @return Unstructured event JSON in an Elasticsearch-compatible format
*/
def parseUnstruct(unstruct: String): ValidationNel[String, JObject] = {
val json = parse(unstruct)
val data = json \\ "data"
val schema = data \\ "schema"
val innerData = data \\ "data" match {
case JNothing => "Could not extract inner data field from unstructured event".failNel // TODO: decide whether to enforce object type of data
case d => d.successNel
}
val fixedSchema = schema match {
case JString(s) => fixSchema("unstruct_event", s)
case _ => "Unstructured event JSON did not contain a stringly typed schema field".failNel
}
(fixedSchema |@| innerData) {_ -> _}
}
}
| mdavid/lessig-bigdata | lib/snowplow/4-storage/kinesis-elasticsearch-sink/src/main/scala/com.snowplowanalytics.snowplow.storage.kinesis/elasticsearch/Shredder.scala | Scala | mit | 7,342 |
package sgl
/** Provides the Window abstraction, essentially a screen.
*
* A window is essentially the same as a screen as far as we are concerned for
* game developed with SGL.
*
* The Window exports the available width/height in physical pixels, as well
* as density information of the pixels (how big/small they are). The
* density information is fairly relevant with mobile games, because these
* tend to have extremely dense screens, which means that pixels are tiny
* and text/images can appear very small on such screen versus how they would
* look on a classic desktop monitor.
*
* It's not just about visual appearance of the objects, it can also be important
* for a touch-based input, when the player needs to touch/select object
* directly on the screen, it's important that they are big enough to be touched
* accurately.
*/
trait WindowProvider {
/*
* TODO:
* The Window properties could change dynamically (think resize of the window
* in a desktop app, or a browser), so we are using def, but it would be good
* to provide some listener API for the game to be notified when there is a
* change to the screen properties.
*/
/** Window implements the [[AbstractWindow]] interface. */
type Window <: AbstractWindow
/** The abstract API for the global Window object.
*
* Each platform backend will provide a concrete implementation of
* this AbstractWindow, and will set the type Window to the
* concrete implementation.
*/
abstract class AbstractWindow {
/*
* Pixels are fascinating small and complex objects. Turns
* out that they are not the same physical size everywhere and
* they do not even have to be square. Because of that, when
* drawing object, we need to be aware of the true physical size
* of pixels. This is especially relevant in touch-based inputs
* where the controls needs to have a size roughly matching
* people's fingers, in order to be useable. This is less relevant
* on Desktop, because input is usually keyboards or mouse, although
* under very high pixel density, objects will appear rather small
* and text hard to read.
*
* To expose these settings to the game, we first provide width and height
* of the game window in the screen native pixels (which could be of any
* physical size). In general, when we mention pixel, we mean the native pixel
* from the available screen. Next, we provide the horizontal and vertical
* pixel-per-inch (xppi and yppi), as these could differ when the pixels
* are not actually square. Pixel-per-inch is the standard term for
* the the pixel density on a screenn, but it is commonly refered to as
* dpi instead of ppi, but ppi seems to be the technically more correct
* term. The ppi value is computed by taking the diagonal number of pixels
* in the screen and dividing it by the diagonal size of the screen. With
* square pixels, ppi, xppi, and yppi should all be equivalent. Using
* the ppi is the most convenient way to scale all values.
*
* In practice, screens have an upper limit ppi capacity, and the actual
* visual ppi depends on the chosen resolution in the OS settings. All
* the settings the Window wxport are runtime information about the
* current state of the screen.
*/
/** The width of the window, in pixels.
*
* This is the size in the physical pixels of the platform, which
* are technically the smallest unit that can display a color, and
* thus the lowest-level control we can possibly have.
*/
def width: Int
/** The height of the window, in pixels.
*
* This is the size in the physical pixels of the platform, which
* are technically the smallest unit that can display a color, and
* thus the lowest-level control we can possibly have.
*/
def height: Int
/** The horizontal number of pixels per inch.
*
* This refers to the physical size of the pixels of the screen on which
* the Window is rendered. This is typically a property of the screen
* and will not change with different window size or resolution.
*
* The larger this value is, the smaller the pixels are (to the human
* eye), and potentially the more you should use of them for drawing
* some objects (or not, depending on the game style). Note that if
* you have more pixels to draw the same physical size object, you
* naturally get a crisper image.
*/
def xppi: Float
/** The vertical number of pixels per inch.
*
* See [[xppi]] for more details.
*/
def yppi: Float
/** The diagnoal number of pixels per inch.
*
* See [[xppi]] and [[yppi]] for more details.
*
* If you want to perform your own runtime scaling, use [[logicalPpi]]
* instead.
**/
def ppi: Float
/** The screen ppi (dpi) used for scaling device-independent pixels.
*
* This is the screen ppi (physical pixel per inch) but potentially
* rounded to a more convenient value (instead of a real ppi of 178.7, we
* would round it to 160, the standard number of pixel per inch for the
* mdpi density).
*
* This value is used by SGL whenever it loads resources that need to be
* scaled to fit the device ppi. The Window.ppi method is meant to be
* exact, but it not actually used by SGL for any scaling. The reason for
* that is that it's common for devices/platforms to export an
* approximation of the true ppi (DisplayMetrics.densityDpi in Android,
* window.devicePixelRatio in the HTML dom). These platforms do that
* because it's easier to test and ensure consistency when the pixel
* density has only a few well-defined values, and it's generally good
* enough (one probalby does not need higher precision than that).
*
* The rounding is not guaranteed, it depends on the platform, and
* theoretically it could be as precise than the true ppi. The SGL scaling
* code should still work with a non-standard value (say 222.37).
*
* If you need to do your own custom scaling (say for rectangles you are
* drawing in the canvas that should match your sprites, which are
* themselves scaled by SGL when loaded), this is the value you should
* use. The way you use it, is you multiply your device-independent pixel
* by the ratio of logicalPpi over 160f, so essentially
* dp*logicalPpi/160f. The dp you would use is the size of your bitmaps
* in mdpi, so if your base bitmaps are 32x32 in mdpi, and you need to
* draw a 32x32 rectangle, you compute 32*logicalPpi/160f to get the
* scaled rectangle widthxheight in the screen ppi.
*/
def logicalPpi: Float
/*
* Because of the variying ppi, we use the notion of DIP, or
* device-independent pixels (which Android supports as well). These
* pixels are a way to ensure that all the objects are displayed
* with roughly the same size in all possible screen configurations.
* This is again especially important in touch-based games. This is
* not mandatory to use, and a game can decide to use regular pixels
* or make up their own abstraction on top of what the Window object
* provides, which are the low level settings. But using DIP on ANdroid
* has proven to be convenient so we essentially export a similar
* abstraction here, also matching the size of a DIP to the size defined
* in Android.
*
* A DIP is defined to of such size that we can have a ppi of 160. In
* other words, we can put 160 DIP in one inch. This is also the way
* Android defines a DIP. The goal of the conversion functions is to
* be able to express everything in DIP and get the equivalent number
* of screen pixels in order to be the same physical dimension. This
* conversion uses the actual ppi.
*/
// TODO: these conversion functions are not great, because they end up
// being used across the entire game codebase. It would probably
// be better to do the mapping from dp to px using the Viewport,
// in a single location. Once I figure out how this is feasible
// cleanly, we should remove these to discourage people from using
// such pattern.
/** Convert an amount of DIP to the same amount of pixel. */
def dp2px(x: Int): Int = (x*Window.logicalPpi/160f).toInt
/** Convert an amount of DIP to the same amount of pixel. */
def dp2px(x: Float): Float = x*Window.logicalPpi/160f
}
/** the unique window hosting the game.
*
* Each game app has a single Window object, which is automatically
* initialized by the framework. The window is not necessarly the whole
* screen, it can be a small frame within the larger screen. The framework
* does not provide access to anything outside the window object.
*
* Window implements the [[AbstractWindow]] interface.
*/
val Window: Window
//TODO: How about only providing a callback to get a pointer to the window object,
// and invoking this callback whenever the window properties are updated?
// This will eventually be necessary, as windows can be resized and we
// want to tell the game about it and not have it poll that info. Maybe
// it's enough to just have a notification callback, and provide the Window
// object available all the time.
}
| regb/scala-game-library | core/src/main/scala/sgl/WindowProvider.scala | Scala | mit | 9,634 |
package gatlin
import akka.actor.ActorRef
import akka.pattern.ask
import akka.persistence.pg.perf.Messages.Alter
import akka.persistence.pg.perf.PerfActor
import akka.util.Timeout
import com.typesafe.config.Config
import gatlin.Predef._
import io.gatling.core.Predef._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
abstract class SingleActorPerfSimulation(override val config: Config) extends AbstractPersistenceSimulation(config)
{
var actor: ActorRef = _
override def warmup() = {
actor = system.actorOf(PerfActor.props)
implicit val timeout = Timeout(2 seconds)
Await.result(actor ? Alter("warmup"), 10 seconds)
()
}
val scn = scenario("single persistent actor").during(30 seconds) {
feed(feeder)
.exec { session => session.set("actor", actor) }
.exec { request(AlterMessage("${text}")) }
}
setUp(scn.inject(atOnceUsers(10)))
}
| kwark/akka-persistence-postgresql | modules/benchmark/src/it/scala/gatlin/SingleActorPerfSimulation.scala | Scala | mit | 935 |
import leon.lang._
import leon.collection._
import leon.annotation._
object Tree {
sealed abstract class Tree
case object Empty extends Tree
case class Node(left: Tree, value: BigInt, right: Tree) extends Tree
sealed abstract class Dir
case object Left extends Dir
case object Right extends Dir
def lookup(t: Tree, path : List[Dir]): Option[Tree] = {
(t,path) match {
case (_,Nil()) => Some[Tree](t)
case (Empty,_) => None[Tree]()
case (Node(left,_,_), Cons(Left,rest)) => lookup(left,rest)
case (Node(_,_,right), Cons(Right,rest)) => lookup(right,rest)
}
}
def cons[A](x: A, lst: List[A]): List[A] = Cons[A](x,lst)
def find(t: Tree, subtree: Tree): List[List[Dir]] = ({
if (t==subtree)
List(Nil[Dir])
else {
t match {
case Empty => Nil[List[Dir]]
case Node(left,_,right) => {
find(left,subtree).map(cons(Left,_)) ++
find(right,subtree).map(cons(Right,_))
}
}
}
} : List[List[Dir]]).ensuring((res:List[List[Dir]]) => res.forall((path:List[Dir]) => true))
}
| epfl-lara/leon | testcases/verification/higher-order/valid/SubtreeSearch.scala | Scala | gpl-3.0 | 1,098 |
package com.jejking.rprng.png
import akka.util.{ByteString, ByteStringBuilder}
import java.util.zip.Deflater
import scala.annotation.tailrec
/**
* Essentially copied from the deprecated Akka Http Deflate Compressor.
* @param compressionLevel
*/
class DeflateHelper (compressionLevel: Int) {
require(compressionLevel >= 0 && compressionLevel <= 9, "Compression level needs to be between 0 and 9")
import DeflateHelper._
def this() = this(DeflateHelper.DefaultCompressionLevel)
protected lazy val deflater = new Deflater(compressionLevel, false)
final def compressAndFlush(input: ByteString): ByteString = {
val buffer = newTempBuffer(input.size)
compressWithBuffer(input, buffer) ++ flushWithBuffer(buffer)
}
final def compressAndFinish(input: ByteString): ByteString = {
val buffer = newTempBuffer(input.size)
compressWithBuffer(input, buffer) ++ finishWithBuffer(buffer)
}
protected def compressWithBuffer(input: ByteString, buffer: Array[Byte]): ByteString = {
require(deflater.needsInput())
deflater.setInput(input.toArray)
drainDeflater(deflater, buffer)
}
protected def flushWithBuffer(buffer: Array[Byte]): ByteString = {
val written = deflater.deflate(buffer, 0, buffer.length, Deflater.SYNC_FLUSH)
ByteString.fromArray(buffer, 0, written)
}
protected def finishWithBuffer(buffer: Array[Byte]): ByteString = {
deflater.finish()
val res = drainDeflater(deflater, buffer)
deflater.end()
res
}
private def newTempBuffer(size: Int = 65536): Array[Byte] = {
new Array[Byte](math.max(size, MinBufferSize))
}
}
object DeflateHelper {
val MinBufferSize = 1024
val DefaultCompressionLevel = 6
@tailrec
def drainDeflater(deflater: Deflater, buffer: Array[Byte], result: ByteStringBuilder = new ByteStringBuilder()): ByteString = {
val len = deflater.deflate(buffer)
if (len > 0) {
result ++= ByteString.fromArray(buffer, 0, len)
drainDeflater(deflater, buffer, result)
} else {
require(deflater.needsInput())
result.result()
}
}
}
| jejking/rprng | src/main/scala/com/jejking/rprng/png/DeflateHelper.scala | Scala | apache-2.0 | 2,084 |
package ee.cone.c4gate
import ee.cone.c4actor.IdGenUtil
import ee.cone.c4proto._
trait KeyGenerator {
def idGenUtil: IdGenUtil
def genPK[P <: Product](model: P, adapter: ProtoAdapter[Product] with HasId): String =
idGenUtil.srcIdFromSerialized(adapter.id,ToByteString(adapter.encode(model)))
}
| conecenter/c4proto | base_lib/src/main/scala/ee/cone/c4gate/OrigKeyGenerator.scala | Scala | apache-2.0 | 305 |
package maker.project
import com.typesafe.zinc.Compiler
import java.io.{ File, FileWriter, Writer }
import maker.MakerProps
import org.apache.commons.io.FileUtils
class EnsimeGenerator(props: MakerProps) {
def generateModules(root: File, name: String, modules: List[Module]): Unit = {
val writer = new FileWriter(new File(root, ".ensime"))
try {
writer.append(";; Generated by Maker\\n\\n")
writer.append("(\\n")
writer.append(" :scala-version \\"" + props.ProjectScalaVersion.stringValue + "\\"\\n")
writer.append(" :java-flags (\\"-Xmx8g\\" \\"-XX:+UseConcMarkSweepGC\\" \\"-Xss2m\\")\\n") // hack
writer.append(" :java-home \\"" + props.JavaHome().getAbsolutePath + "\\"\\n")
writer.append(" :root-dir \\"" + root.getAbsolutePath + "\\"\\n")
writer.append(" :name \\"" + name + "\\"\\n")
// Scala library is shared between all modules
writer.append(" :compile-deps (\\n")
writer.append(" \\"" + props.ProjectScalaLibraryJar().getAbsolutePath + "\\"\\n")
writer.append(" )\\n")
// Java and Scala sources are shared between all modules
writer.append(" :reference-source-roots (\\n")
writer.append(" \\"" + new File(props.JavaHome(), "src.zip").getAbsolutePath + "\\"\\n")
writer.append(" \\"" + props.ProjectScalaLibrarySourceJar().getAbsolutePath + "\\"\\n")
writer.append(" )\\n")
writer.append(" :subprojects (\\n")
modules.foreach(appendModule(writer, _))
writer.append(
""" )
|)
|""".stripMargin)
} finally writer.close()
}
private def appendModule(writer: Writer, module: Module): Unit = {
writer.append(" (\\n")
writer.append(" :name \\"" + module.name + "\\"\\n")
writer.append(" :module-name \\"" + module.name + "\\"\\n")
writer.append(" :depends-on-modules (\\n")
module.immediateUpstreamModules.foreach { dep =>
writer.append(" \\"" + dep.name + "\\"\\n")
}
writer.append(" )\\n")
writer.append(" :compile-deps (\\n")
def appendDeps(m: Module): Unit = {
// hack: avoid duplicates already pulled in by upstream
def got(m: Module): List[String] =
m.managedJars.map(_.getName).toList ::: m.immediateUpstreamModules.flatMap(got)
val existing = m.immediateUpstreamModules.flatMap(got)
m.managedJars.filterNot { jar =>
existing.contains(jar.getName)
}.foreach { dep =>
writer.append(" \\"" + dep.getAbsolutePath + "\\"\\n")
}
}
appendDeps(module)
writer.append(" )\\n")
writer.append(" :reference-source-roots (\\n")
def appendDepRefSrcs(m: Module): Unit = {
if (!m.managedLibSourceDir.exists) return
def archives(m: Module): List[String] = {
val filenames = m.managedLibSourceDir.list
if (filenames == null) Nil
else filenames.toList.filter{ f =>
f.endsWith(".jar") || f.endsWith(".zip")
}
}
// hack: avoid duplicates already pulled in by upstream
def got(m: Module): List[String] =
archives(m) ::: m.immediateUpstreamModules.flatMap(got)
val existing = m.immediateUpstreamModules.flatMap(got)
archives(m) filterNot { jar =>
existing.contains(jar)
} foreach { dep =>
val from = new File(m.managedLibSourceDir, dep)
writer.append(" \\"" + from.getAbsolutePath + "\\"\\n")
}
}
appendDepRefSrcs(module)
writer.append(" )\\n")
writer.append(" :source-roots (\\n")
writer.append(" " + module.sourceDirs.map(_.getAbsolutePath).mkString("\\"","\\" \\"","\\"") + "\\n")
writer.append(" " + module.testSourceDirs.map(_.getAbsolutePath).mkString("\\"","\\" \\"","\\"") + "\\n")
writer.append(" )\\n")
writer.append(" :target \\"" + module.outputDir.getAbsolutePath + "\\"\\n")
writer.append(" :test-target \\"" + module.testOutputFile.getAbsolutePath + "\\"\\n")
writer.append(" )\\n")
}
}
| syl20bnr/maker | maker/src/maker/project/EnsimeGenerator.scala | Scala | bsd-2-clause | 4,007 |
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.crdt.convergent
import akka.remote.testkit.MultiNodeConfig
import akka.crdt._
import akka.remote.testkit.MultiNodeSpec
import akka.remote.testconductor.RoleName
import akka.actor._
import akka.cluster._
import scala.util._
import scala.concurrent.duration._
import com.typesafe.config.ConfigFactory
import play.api.libs.json.Json._
import play.api.libs.json._
import scala.concurrent.Await
object GSetClusterSpecConfig extends MultiNodeConfig {
val node1 = role("node1")
val node2 = role("node2")
val node3 = role("node3")
commonConfig(ConfigFactory.parseString("""
akka.crdt.convergent.leveldb.destroy-on-shutdown = on
akka.actor.provider = akka.cluster.ClusterActorRefProvider
akka.cluster.auto-join = off
akka.cluster.auto-down = on
akka.loggers = ["akka.testkit.TestEventListener"]
akka.loglevel = INFO
akka.remote.log-remote-lifecycle-events = off
"""))
}
class GSetClusterSpecMultiJvmNode1 extends GSetClusterSpec
class GSetClusterSpecMultiJvmNode2 extends GSetClusterSpec
class GSetClusterSpecMultiJvmNode3 extends GSetClusterSpec
class GSetClusterSpec extends MultiNodeSpec(GSetClusterSpecConfig) with STMultiNodeSpec {
import GSetClusterSpecConfig._
implicit def roleNameToAddress(role: RoleName): Address = testConductor.getAddressFor(role).await
implicit val sys: ActorSystem = system
def initialParticipants = roles.size
"A ConvergentReplicatedDataTypeDatabase" must {
"Make sure that a GSet, used by multiple nodes, eventually converge to a consistent value" in {
val cluster = Cluster(system)
val db = ConvergentReplicatedDataTypeDatabase(system)
implicit val ec = system.dispatcher
val duration = 10 seconds
runOn(node1) { cluster join node1 }
runOn(node2) { cluster join node1 }
runOn(node3) { cluster join node1 }
Thread.sleep(5000)
// create CRDT on node1
runOn(node1) {
db.create[GSet]("users").size must be(0)
}
enterBarrier("stored g-set on node1")
// find CRDT by id on the other nodes
runOn(node2, node3) {
awaitAssert(Await.result(db.findById[GSet]("users"), duration)) // wait until it does not throw exception
}
enterBarrier("g-set exists on all nodes")
val coltrane = """{"username":"john","password":"coltrane"}"""
val rollins = """{"username":"sonny","password":"rollins"}"""
val parker = """{"username":"charlie","password":"parker"}"""
// let each node update the set
runOn(node1) {
db.findById[GSet]("users") map (_ + parse(coltrane)) foreach (db.update(_))
}
runOn(node2) {
db.findById[GSet]("users") map (_ + parse(rollins)) foreach (db.update(_))
}
runOn(node3) {
db.findById[GSet]("users") map (_ + parse(parker)) foreach (db.update(_))
db.findById[GSet]("users") map (_ + parse(rollins)) foreach (db.update(_)) // try to add the same element concurrently
}
enterBarrier("updated-set-on-all-nodes")
// make sure each node sees the converged set with all the users
runOn(node1, node2, node3) {
awaitCond(Await.result(db.findById[GSet]("users"), duration).value.size == 3, 10 seconds)
db.findById[GSet]("users") foreach { set =>
set.id must be("users")
set.dataType must be("g-set")
val usersAsStrings = set.value.map(stringify(_))
usersAsStrings.contains(coltrane) must be(true)
usersAsStrings.contains(rollins) must be(true)
usersAsStrings.contains(parker) must be(true)
}
}
enterBarrier("verified-set-on-all-nodes")
db.shutdown()
enterBarrier("after-shutdown")
}
}
}
| jboner/akka-crdt | src/multi-jvm/scala/akka/crdt/convergent/GSetClusterSpec.scala | Scala | apache-2.0 | 3,801 |
package com.getjenny.starchat.entities.persistents
import org.elasticsearch.action.get.GetResponse
import org.elasticsearch.action.search.SearchResponse
import org.elasticsearch.search.aggregations.bucket.nested.ParsedNested
import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms
import sun.reflect.generics.reflectiveObjects.NotImplementedException
import scala.collection.JavaConverters._
import scala.collection.immutable.Map
case class WordFrequenciesInQuery(frequencies: Map[String, Double])
object WordFrequenciesInQueryEntityManager extends ReadEntityManager[WordFrequenciesInQuery] {
override def fromSearchResponse(response: SearchResponse): List[WordFrequenciesInQuery] = {
val parsedNested: ParsedNested = response.getAggregations.get("queries")
val nQueries: Double = parsedNested.getDocCount
val parsedStringTerms: ParsedStringTerms = parsedNested.getAggregations.get("queries_children")
val result = if (nQueries > 0) {
parsedStringTerms.getBuckets.asScala.map {
bucket => bucket.getKeyAsString -> bucket.getDocCount / nQueries
}.toMap
}
else {
Map[String, Double]()
}
List(WordFrequenciesInQuery(result))
}
override def fromGetResponse(response: List[GetResponse]): List[WordFrequenciesInQuery] = {
throw new NotImplementedException()
}
} | GetJenny/starchat | src/main/scala/com/getjenny/starchat/entities/persistents/WordFrequenciesInQuery.scala | Scala | gpl-2.0 | 1,348 |
import synthesis.Definitions._
object BarbaraBrokeIt {
def main(args: Array[String]): Unit = {
println("Give me a lower bound [0-100]: ")
val lower: Int = Console.readInt
println("Give me an upper bound [0-100]: ")
val upper: Int = Console.readInt
try {
val (x,y,g) = choose(
(x:Int,y:Int,g:Int) => (
18*g == 3*x + 2*y &&
x > 0 && y > 0 &&
lower <= g && g <= upper
));
println("x: " + x)
println("y: " + y)
println("G: " + g)
} catch {
case UnsatisfiableConstraint() => println("Sorry, no solution")
}
}
}
| epfl-lara/comfusy | src/examples/BarbaraBrokeIt.scala | Scala | bsd-2-clause | 608 |
package edu.rit.csh.scaladb.serialization.binary
class ByteArrayOutput(size: Int) extends BinaryOutput {
private val buffer = new Array[Byte](size)
private var index = 0
def output: Array[Byte] = buffer
override def write(b: Array[Byte], off: Int, len: Int): Unit = {
System.arraycopy(b, off, output, index, len)
index += len
}
override def write(i: Int): Unit = {
buffer(index) = i.toByte
index += 1
}
override def toString: String = s"ByteArrayOutput of size: $size, current position: $index"
} | JDrit/RaftService | serialization/src/main/scala/edu/rit/csh/scaladb/serialization/binary/ByteArrayOutput.scala | Scala | apache-2.0 | 534 |
package biz.meetmatch.logging
import org.apache.spark.scheduler._
import scala.collection.mutable
class BusinessSparkListener extends SparkListener {
private val jobGroups = mutable.HashMap[Long, String]()
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
val jobGroup = Option(jobStart.properties.getProperty("spark.jobGroup.id")).getOrElse("undefined")
val jobDescription = Option(jobStart.properties.getProperty("spark.job.description")).getOrElse("undefined")
val executionId = jobStart.properties.getProperty("spark.sql.execution.id")
new BusinessLogger(jobGroup).jobStarted(jobStart.jobId, jobDescription, jobStart.stageInfos.size, Option(executionId))
synchronized {
jobGroups(jobStart.jobId) = jobGroup
}
}
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
val result = jobEnd.jobResult match {
case JobSucceeded => "SUCCESS"
case _ => "FAILURE"
}
jobGroups.get(jobEnd.jobId).foreach(jobGroup => new BusinessLogger(jobGroup).jobStopped(jobEnd.jobId, result))
}
} | tolomaus/languagedetector | src/main/scala/biz/meetmatch/logging/BusinessSparkListener.scala | Scala | mit | 1,069 |
package controllers
import play.api._
import play.api.mvc._
import play.api.libs.iteratee.Done
import play.api.libs.json._
import play.api.http.HeaderNames
import play.api.data.Form
import play.api.data.Forms._
import play.modules.reactivemongo.MongoController
import play.modules.reactivemongo.json.collection.JSONCollection
import scala.concurrent.Future
import models.Blob
object Vault extends Controller with MongoController {
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits.defaultContext
private def vault: JSONCollection = db.collection[JSONCollection]("vault")
private def log: JSONCollection = db.collection[JSONCollection]("log")
private val enableLogging = current.configuration.getBoolean("vault.log").getOrElse("false")
private val allowOrigin = current.configuration.getString("vault.allow_origin").getOrElse("*")
def retrieve(key: String) = Allowed {
Action.async { implicit request =>
vault.find(Json.obj("_id" -> key)).one[JsValue].map {
case Some(js) =>
val blob = (js \\ "blob").as[Blob]
Logger.debug(request.method + " " + key + " " + blob)
logAccess(key)
Ok(blob.toBase64)
case None => Ok("")
}
}
}
private val blobForm = Form(single("blob" -> of[Blob]))
def update(key: String) = Allowed {
Action.async { implicit request =>
blobForm.bindFromRequest.fold(
_ => Future.successful(BadRequest),
blob => {
Logger.debug(request.method + " " + key + " " + blob)
val timestamp = System.currentTimeMillis()
val js = Json.obj("_id" -> key, "blob" -> blob, "lastaccess" -> timestamp)
vault.save(js).map { _ =>
logAccess(key)
NoContent
}
})
}
}
private def origin()(implicit request: RequestHeader) = request.headers.get(HeaderNames.ORIGIN)
private def logAccess(key: String)(implicit request: Request[AnyContent]) {
val access = Json.obj(
"timestamp" -> System.currentTimeMillis(),
"ip" -> request.remoteAddress,
"origin" -> origin)
vault.update(
Json.obj("_id" -> key),
Json.obj("$set" -> Json.obj("lastaccess" -> access)))
current.configuration.getBoolean("vault.log").foreach { enabled =>
if (enabled) {
val js = Json.obj(
"key" -> key,
"action" -> request.method,
"access" -> access)
log.insert(js)
}
}
}
private def Allowed(action: => EssentialAction) = EssentialAction { implicit request =>
if (allowOrigin == "*" || origin == Some(allowOrigin))
action(request).map(_.withHeaders(HeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN -> allowOrigin))
else
Done(Forbidden.withHeaders(HeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN -> "*"))
}
} | alexdupre/ripple-blobvault | app/controllers/Vault.scala | Scala | bsd-2-clause | 2,830 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job.yarn
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.fs.{FileStatus, Path, FileSystem}
import org.apache.hadoop.yarn.api.records.ApplicationId
import org.apache.hadoop.yarn.client.api.YarnClient
import org.apache.samza.SamzaException
import org.apache.samza.config.{MapConfig, JobConfig, Config, YarnConfig}
import org.mockito.Mockito._
import org.mockito.Matchers.any
import org.scalatest.FunSuite
import org.scalatest.mock.MockitoSugar
class TestClientHelper extends FunSuite {
import MockitoSugar._
val hadoopConfig = mock[Configuration]
val clientHelper = new ClientHelper(hadoopConfig) {
override def createYarnClient() = {
mock[YarnClient]
}
}
test("test validateJobConfig") {
import collection.JavaConverters._
var config = new MapConfig()
intercept[SamzaException] {
clientHelper.validateJobConfig(config)
}
config = new MapConfig(Map(JobConfig.JOB_SECURITY_MANAGER_FACTORY -> "some value").asJava)
clientHelper.validateJobConfig(config)
}
test("test prepareJobConfig") {
val jobContext = new JobContext
jobContext.setAppStagingDir(new Path("/user/temp/.samzaStaging/app_123"))
clientHelper.jobContext = jobContext
val ret = clientHelper.getSecurityYarnConfig
assert(ret.size == 2)
assert(ret.get(YarnConfig.YARN_JOB_STAGING_DIRECTORY) == Some("/user/temp/.samzaStaging/app_123"))
assert(ret.get(YarnConfig.YARN_CREDENTIALS_FILE) == Some("/user/temp/.samzaStaging/app_123/credentials"))
}
test("test setupAMLocalResources") {
val applicationId = mock[ApplicationId]
when(applicationId.toString).thenReturn("application_123")
val jobContext = new JobContext
jobContext.setAppId(applicationId)
clientHelper.jobContext = jobContext
val mockFs = mock[FileSystem]
val fileStatus = new FileStatus(0, false, 0, 0, System.currentTimeMillis(), null)
when(mockFs.getHomeDirectory).thenReturn(new Path("/user/test"))
when(mockFs.getFileStatus(any[Path])).thenReturn(fileStatus)
when(mockFs.mkdirs(any[Path])).thenReturn(true)
doNothing().when(mockFs).copyFromLocalFile(any[Path], any[Path])
doNothing().when(mockFs).setPermission(any[Path], any[FsPermission])
val ret = clientHelper.setupAMLocalResources(mockFs, Some("some.principal"), Some("some.keytab"))
assert(ret.size == 1)
assert(ret.contains("some.keytab"))
}
}
| InnovaCo/samza | samza-yarn/src/test/scala/org/apache/samza/job/yarn/TestClientHelper.scala | Scala | apache-2.0 | 3,296 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.File
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.UUID
import scala.util.Random
import org.scalatest.Matchers._
import org.apache.spark.SparkException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.plans.logical.{Filter, OneRowRelation, Project, Union}
import org.apache.spark.sql.execution.{FilterExec, QueryExecution}
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchange}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSQLContext}
import org.apache.spark.sql.test.SQLTestData.TestData2
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class DataFrameSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("analysis error should be eagerly reported") {
intercept[Exception] { testData.select('nonExistentName) }
intercept[Exception] {
testData.groupBy('key).agg(Map("nonExistentName" -> "sum"))
}
intercept[Exception] {
testData.groupBy("nonExistentName").agg(Map("key" -> "sum"))
}
intercept[Exception] {
testData.groupBy($"abcd").agg(Map("key" -> "sum"))
}
}
test("dataframe toString") {
assert(testData.toString === "[key: int, value: string]")
assert(testData("key").toString === "key")
assert($"test".toString === "test")
}
test("rename nested groupby") {
val df = Seq((1, (1, 1))).toDF()
checkAnswer(
df.groupBy("_1").agg(sum("_2._1")).toDF("key", "total"),
Row(1, 1) :: Nil)
}
test("access complex data") {
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1)
}
test("table scan") {
checkAnswer(
testData,
testData.collect().toSeq)
}
test("union all") {
val unionDF = testData.union(testData).union(testData)
.union(testData).union(testData)
// Before optimizer, Union should be combined.
assert(unionDF.queryExecution.analyzed.collect {
case j: Union if j.children.size == 5 => j }.size === 1)
checkAnswer(
unionDF.agg(avg('key), max('key), min('key), sum('key)),
Row(50.5, 100, 1, 25250) :: Nil
)
}
test("union should union DataFrames with UDTs (SPARK-13410)") {
val rowRDD1 = sparkContext.parallelize(Seq(Row(1, new ExamplePoint(1.0, 2.0))))
val schema1 = StructType(Array(StructField("label", IntegerType, false),
StructField("point", new ExamplePointUDT(), false)))
val rowRDD2 = sparkContext.parallelize(Seq(Row(2, new ExamplePoint(3.0, 4.0))))
val schema2 = StructType(Array(StructField("label", IntegerType, false),
StructField("point", new ExamplePointUDT(), false)))
val df1 = spark.createDataFrame(rowRDD1, schema1)
val df2 = spark.createDataFrame(rowRDD2, schema2)
checkAnswer(
df1.union(df2).orderBy("label"),
Seq(Row(1, new ExamplePoint(1.0, 2.0)), Row(2, new ExamplePoint(3.0, 4.0)))
)
}
test("empty data frame") {
assert(spark.emptyDataFrame.columns.toSeq === Seq.empty[String])
assert(spark.emptyDataFrame.count() === 0)
}
test("head and take") {
assert(testData.take(2) === testData.collect().take(2))
assert(testData.head(2) === testData.collect().take(2))
assert(testData.head(2).head.schema === testData.schema)
}
test("dataframe alias") {
val df = Seq(Tuple1(1)).toDF("c").as("t")
val dfAlias = df.alias("t2")
df.col("t.c")
dfAlias.col("t2.c")
}
test("simple explode") {
val df = Seq(Tuple1("a b c"), Tuple1("d e")).toDF("words")
checkAnswer(
df.explode("words", "word") { word: String => word.split(" ").toSeq }.select('word),
Row("a") :: Row("b") :: Row("c") :: Row("d") ::Row("e") :: Nil
)
}
test("explode") {
val df = Seq((1, "a b c"), (2, "a b"), (3, "a")).toDF("number", "letters")
val df2 =
df.explode('letters) {
case Row(letters: String) => letters.split(" ").map(Tuple1(_)).toSeq
}
checkAnswer(
df2
.select('_1 as 'letter, 'number)
.groupBy('letter)
.agg(countDistinct('number)),
Row("a", 3) :: Row("b", 2) :: Row("c", 1) :: Nil
)
}
test("Star Expansion - CreateStruct and CreateArray") {
val structDf = testData2.select("a", "b").as("record")
// CreateStruct and CreateArray in aggregateExpressions
assert(structDf.groupBy($"a").agg(min(struct($"record.*"))).first() == Row(3, Row(3, 1)))
assert(structDf.groupBy($"a").agg(min(array($"record.*"))).first() == Row(3, Seq(3, 1)))
// CreateStruct and CreateArray in project list (unresolved alias)
assert(structDf.select(struct($"record.*")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*")).first().getAs[Seq[Int]](0) === Seq(1, 1))
// CreateStruct and CreateArray in project list (alias)
assert(structDf.select(struct($"record.*").as("a")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*").as("a")).first().getAs[Seq[Int]](0) === Seq(1, 1))
}
test("Star Expansion - hash") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(hash($"a", $"*")),
structDf.groupBy($"a", $"b").agg(hash($"a", $"a", $"b")))
checkAnswer(
structDf.select(hash($"*")),
structDf.select(hash($"record.*")))
checkAnswer(
structDf.select(hash($"a", $"*")),
structDf.select(hash($"a", $"record.*")))
}
test("Star Expansion - explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1", "1,2"), ("2", "4"), ("3", "7,8,9")).toDF("prefix", "csv")
val e = intercept[AnalysisException] {
df.explode($"*") { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
}.queryExecution.assertAnalyzed()
}
assert(e.getMessage.contains("Invalid usage of '*' in explode/json_tuple/UDTF"))
checkAnswer(
df.explode('prefix, 'csv) { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
},
Row("1", "1,2", "1:1") ::
Row("1", "1,2", "1:2") ::
Row("2", "4", "2:4") ::
Row("3", "7,8,9", "3:7") ::
Row("3", "7,8,9", "3:8") ::
Row("3", "7,8,9", "3:9") :: Nil)
}
test("Star Expansion - explode alias and star") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select(explode($"a").as("a"), $"*"),
Row("a", Seq("a"), 1) :: Nil)
}
test("sort after generate with join=true") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select($"*", explode($"a").as("c")).sortWithinPartitions("b", "c"),
Row(Seq("a"), 1, "a") :: Nil)
}
test("selectExpr") {
checkAnswer(
testData.selectExpr("abs(key)", "value"),
testData.collect().map(row => Row(math.abs(row.getInt(0)), row.getString(1))).toSeq)
}
test("selectExpr with alias") {
checkAnswer(
testData.selectExpr("key as k").select("k"),
testData.select("key").collect().toSeq)
}
test("selectExpr with udtf") {
val df = Seq((Map("1" -> 1), 1)).toDF("a", "b")
checkAnswer(
df.selectExpr("explode(a)"),
Row("1", 1) :: Nil)
}
test("filterExpr") {
val res = testData.collect().filter(_.getInt(0) > 90).toSeq
checkAnswer(testData.filter("key > 90"), res)
checkAnswer(testData.filter("key > 9.0e1"), res)
checkAnswer(testData.filter("key > .9e+2"), res)
checkAnswer(testData.filter("key > 0.9e+2"), res)
checkAnswer(testData.filter("key > 900e-1"), res)
checkAnswer(testData.filter("key > 900.0E-1"), res)
checkAnswer(testData.filter("key > 9.e+1"), res)
}
test("filterExpr using where") {
checkAnswer(
testData.where("key > 50"),
testData.collect().filter(_.getInt(0) > 50).toSeq)
}
test("repartition") {
intercept[IllegalArgumentException] {
testData.select('key).repartition(0)
}
checkAnswer(
testData.select('key).repartition(10).select('key),
testData.select('key).collect().toSeq)
}
test("coalesce") {
intercept[IllegalArgumentException] {
testData.select('key).coalesce(0)
}
assert(testData.select('key).coalesce(1).rdd.partitions.size === 1)
checkAnswer(
testData.select('key).coalesce(1).select('key),
testData.select('key).collect().toSeq)
}
test("convert $\"attribute name\" into unresolved attribute") {
checkAnswer(
testData.where($"key" === lit(1)).select($"value"),
Row("1"))
}
test("convert Scala Symbol 'attrname into unresolved attribute") {
checkAnswer(
testData.where('key === lit(1)).select('value),
Row("1"))
}
test("select *") {
checkAnswer(
testData.select($"*"),
testData.collect().toSeq)
}
test("simple select") {
checkAnswer(
testData.where('key === lit(1)).select('value),
Row("1"))
}
test("select with functions") {
checkAnswer(
testData.select(sum('value), avg('value), count(lit(1))),
Row(5050.0, 50.5, 100))
checkAnswer(
testData2.select('a + 'b, 'a < 'b),
Seq(
Row(2, false),
Row(3, true),
Row(3, false),
Row(4, false),
Row(4, false),
Row(5, false)))
checkAnswer(
testData2.select(sumDistinct('a)),
Row(6))
}
test("sorting with null ordering") {
val data = Seq[java.lang.Integer](2, 1, null).toDF("key")
checkAnswer(data.orderBy('key.asc), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy('key.asc_nulls_first), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc_nulls_first("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy('key.asc_nulls_last), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy(asc_nulls_last("key")), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy('key.desc), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy('key.desc_nulls_first), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy(desc_nulls_first("key")), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy('key.desc_nulls_last), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc_nulls_last("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
}
test("global sorting") {
checkAnswer(
testData2.orderBy('a.asc, 'b.asc),
Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2)))
checkAnswer(
testData2.orderBy(asc("a"), desc("b")),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy('a.asc, 'b.desc),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy('a.desc, 'b.desc),
Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1)))
checkAnswer(
testData2.orderBy('a.desc, 'b.asc),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)))
checkAnswer(
arrayData.toDF().orderBy('data.getItem(0).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(0).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).reverse.toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(1).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(1).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).reverse.toSeq)
}
test("limit") {
checkAnswer(
testData.limit(10),
testData.take(10).toSeq)
checkAnswer(
arrayData.toDF().limit(1),
arrayData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
checkAnswer(
mapData.toDF().limit(1),
mapData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
// SPARK-12340: overstep the bounds of Int in SparkPlan.executeTake
checkAnswer(
spark.range(2).toDF().limit(2147483638),
Row(0) :: Row(1) :: Nil
)
}
test("except") {
checkAnswer(
lowerCaseData.except(upperCaseData),
Row(1, "a") ::
Row(2, "b") ::
Row(3, "c") ::
Row(4, "d") :: Nil)
checkAnswer(lowerCaseData.except(lowerCaseData), Nil)
checkAnswer(upperCaseData.except(upperCaseData), Nil)
// check null equality
checkAnswer(
nullInts.except(nullInts.filter("0 = 1")),
nullInts)
checkAnswer(
nullInts.except(nullInts),
Nil)
// check if values are de-duplicated
checkAnswer(
allNulls.except(allNulls.filter("0 = 1")),
Row(null) :: Nil)
checkAnswer(
allNulls.except(allNulls),
Nil)
// check if values are de-duplicated
val df = Seq(("id1", 1), ("id1", 1), ("id", 1), ("id1", 2)).toDF("id", "value")
checkAnswer(
df.except(df.filter("0 = 1")),
Row("id1", 1) ::
Row("id", 1) ::
Row("id1", 2) :: Nil)
// check if the empty set on the left side works
checkAnswer(
allNulls.filter("0 = 1").except(allNulls),
Nil)
}
test("except distinct - SQL compliance") {
val df_left = Seq(1, 2, 2, 3, 3, 4).toDF("id")
val df_right = Seq(1, 3).toDF("id")
checkAnswer(
df_left.except(df_right),
Row(2) :: Row(4) :: Nil
)
}
test("except - nullability") {
val nonNullableInts = Seq(Tuple1(11), Tuple1(3)).toDF()
assert(nonNullableInts.schema.forall(!_.nullable))
val df1 = nonNullableInts.except(nullInts)
checkAnswer(df1, Row(11) :: Nil)
assert(df1.schema.forall(!_.nullable))
val df2 = nullInts.except(nonNullableInts)
checkAnswer(df2, Row(1) :: Row(2) :: Row(null) :: Nil)
assert(df2.schema.forall(_.nullable))
val df3 = nullInts.except(nullInts)
checkAnswer(df3, Nil)
assert(df3.schema.forall(_.nullable))
val df4 = nonNullableInts.except(nonNullableInts)
checkAnswer(df4, Nil)
assert(df4.schema.forall(!_.nullable))
}
test("intersect") {
checkAnswer(
lowerCaseData.intersect(lowerCaseData),
Row(1, "a") ::
Row(2, "b") ::
Row(3, "c") ::
Row(4, "d") :: Nil)
checkAnswer(lowerCaseData.intersect(upperCaseData), Nil)
// check null equality
checkAnswer(
nullInts.intersect(nullInts),
Row(1) ::
Row(2) ::
Row(3) ::
Row(null) :: Nil)
// check if values are de-duplicated
checkAnswer(
allNulls.intersect(allNulls),
Row(null) :: Nil)
// check if values are de-duplicated
val df = Seq(("id1", 1), ("id1", 1), ("id", 1), ("id1", 2)).toDF("id", "value")
checkAnswer(
df.intersect(df),
Row("id1", 1) ::
Row("id", 1) ::
Row("id1", 2) :: Nil)
}
test("intersect - nullability") {
val nonNullableInts = Seq(Tuple1(1), Tuple1(3)).toDF()
assert(nonNullableInts.schema.forall(!_.nullable))
val df1 = nonNullableInts.intersect(nullInts)
checkAnswer(df1, Row(1) :: Row(3) :: Nil)
assert(df1.schema.forall(!_.nullable))
val df2 = nullInts.intersect(nonNullableInts)
checkAnswer(df2, Row(1) :: Row(3) :: Nil)
assert(df2.schema.forall(!_.nullable))
val df3 = nullInts.intersect(nullInts)
checkAnswer(df3, Row(1) :: Row(2) :: Row(3) :: Row(null) :: Nil)
assert(df3.schema.forall(_.nullable))
val df4 = nonNullableInts.intersect(nonNullableInts)
checkAnswer(df4, Row(1) :: Row(3) :: Nil)
assert(df4.schema.forall(!_.nullable))
}
test("udf") {
val foo = udf((a: Int, b: String) => a.toString + b)
checkAnswer(
// SELECT *, foo(key, value) FROM testData
testData.select($"*", foo('key, 'value)).limit(3),
Row(1, "1", "11") :: Row(2, "2", "22") :: Row(3, "3", "33") :: Nil
)
}
test("callUDF without Hive Support") {
val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
df.sparkSession.udf.register("simpleUDF", (v: Int) => v * v)
checkAnswer(
df.select($"id", callUDF("simpleUDF", $"value")),
Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil)
}
test("withColumn") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol"))
}
test("replace column using withColumn") {
val df2 = sparkContext.parallelize(Array(1, 2, 3)).toDF("x")
val df3 = df2.withColumn("x", df2("x") + 1)
checkAnswer(
df3.select("x"),
Row(2) :: Row(3) :: Row(4) :: Nil)
}
test("drop column using drop") {
val df = testData.drop("key")
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop columns using drop") {
val src = Seq((0, 2, 3)).toDF("a", "b", "c")
val df = src.drop("a", "b")
checkAnswer(df, Row(3))
assert(df.schema.map(_.name) === Seq("c"))
}
test("drop unknown column (no-op)") {
val df = testData.drop("random")
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop column using drop with column reference") {
val col = testData("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop unknown column (no-op) with column reference") {
val col = Column("random")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop unknown column with same name with column reference") {
val col = Column("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop column after join with duplicate columns using column reference") {
val newSalary = salary.withColumnRenamed("personId", "id")
val col = newSalary("id")
// this join will result in duplicate "id" columns
val joinedDf = person.join(newSalary,
person("id") === newSalary("id"), "inner")
// remove only the "id" column that was associated with newSalary
val df = joinedDf.drop(col)
checkAnswer(
df,
joinedDf.collect().map {
case Row(id: Int, name: String, age: Int, idToDrop: Int, salary: Double) =>
Row(id, name, age, salary)
}.toSeq)
assert(df.schema.map(_.name) === Seq("id", "name", "age", "salary"))
assert(df("id") == person("id"))
}
test("drop top level columns that contains dot") {
val df1 = Seq((1, 2)).toDF("a.b", "a.c")
checkAnswer(df1.drop("a.b"), Row(2))
// Creates data set: {"a.b": 1, "a": {"b": 3}}
val df2 = Seq((1)).toDF("a.b").withColumn("a", struct(lit(3) as "b"))
// Not like select(), drop() parses the column name "a.b" literally without interpreting "."
checkAnswer(df2.drop("a.b").select("a.b"), Row(3))
// "`" is treated as a normal char here with no interpreting, "`a`b" is a valid column name.
assert(df2.drop("`a.b`").columns.size == 2)
}
test("drop(name: String) search and drop all top level columns that matchs the name") {
val df1 = Seq((1, 2)).toDF("a", "b")
val df2 = Seq((3, 4)).toDF("a", "b")
checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4))
// Finds and drops all columns that match the name (case insensitive).
checkAnswer(df1.crossJoin(df2).drop("A"), Row(2, 4))
}
test("withColumnRenamed") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
.withColumnRenamed("value", "valueRenamed")
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "valueRenamed", "newCol"))
}
test("describe") {
val describeTestData = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
val describeResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("max", "David", "60", "192"))
val emptyDescribeResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val describeTwoCols = describeTestData.describe("name", "age", "height")
assert(getSchemaAsSeq(describeTwoCols) === Seq("summary", "name", "age", "height"))
checkAnswer(describeTwoCols, describeResult)
// All aggregate value should have been cast to string
describeTwoCols.collect().foreach { row =>
assert(row.get(2).isInstanceOf[String], "expected string but found " + row.get(2).getClass)
assert(row.get(3).isInstanceOf[String], "expected string but found " + row.get(3).getClass)
}
val describeAllCols = describeTestData.describe()
assert(getSchemaAsSeq(describeAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(describeAllCols, describeResult)
val describeOneCol = describeTestData.describe("age")
assert(getSchemaAsSeq(describeOneCol) === Seq("summary", "age"))
checkAnswer(describeOneCol, describeResult.map { case Row(s, _, d, _) => Row(s, d)} )
val describeNoCol = describeTestData.select("name").describe()
assert(getSchemaAsSeq(describeNoCol) === Seq("summary", "name"))
checkAnswer(describeNoCol, describeResult.map { case Row(s, n, _, _) => Row(s, n)} )
val emptyDescription = describeTestData.limit(0).describe()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptyDescribeResult)
}
test("apply on query results (SPARK-5462)") {
val df = testData.sparkSession.sql("select key from testData")
checkAnswer(df.select(df("key")), testData.select('key).collect().toSeq)
}
test("inputFiles") {
withTempDir { dir =>
val df = Seq((1, 22)).toDF("a", "b")
val parquetDir = new File(dir, "parquet").getCanonicalPath
df.write.parquet(parquetDir)
val parquetDF = spark.read.parquet(parquetDir)
assert(parquetDF.inputFiles.nonEmpty)
val jsonDir = new File(dir, "json").getCanonicalPath
df.write.json(jsonDir)
val jsonDF = spark.read.json(jsonDir)
assert(parquetDF.inputFiles.nonEmpty)
val unioned = jsonDF.union(parquetDF).inputFiles.sorted
val allFiles = (jsonDF.inputFiles ++ parquetDF.inputFiles).distinct.sorted
assert(unioned === allFiles)
}
}
ignore("show") {
// This test case is intended ignored, but to make sure it compiles correctly
testData.select($"*").show()
testData.select($"*").show(1000)
}
test("showString: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+---------------------+
||value |
|+---------------------+
||1 |
||111111111111111111111|
|+---------------------+
|""".stripMargin
assert(df.showString(10, truncate = 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+--------------------+
|| value|
|+--------------------+
|| 1|
||11111111111111111...|
|+--------------------+
|""".stripMargin
assert(df.showString(10, truncate = 20) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+-----+
||value|
|+-----+
|| 1|
|| 111|
|+-----+
|""".stripMargin
assert(df.showString(10, truncate = 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+-----------------+
|| value|
|+-----------------+
|| 1|
||11111111111111...|
|+-----------------+
|""".stripMargin
assert(df.showString(10, truncate = 17) === expectedAnswerForTrue)
}
test("showString(negative)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(-1) === expectedAnswer)
}
test("showString(0)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(0) === expectedAnswer)
}
test("showString: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = """+---------+---------+
|| _1| _2|
|+---------+---------+
||[1, 2, 3]|[1, 2, 3]|
||[2, 3, 4]|[2, 3, 4]|
|+---------+---------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = """+-------+----------------+
|| _1| _2|
|+-------+----------------+
||[31 32]| [41 42 43 2E]|
||[33 34]|[31 32 33 34 36]|
|+-------+----------------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: minimum column width") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = """+---+---+
|| _1| _2|
|+---+---+
|| 1| 1|
|| 2| 2|
|+---+---+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("SPARK-7319 showString") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|""".stripMargin
assert(testData.select($"*").showString(1) === expectedAnswer)
}
test("SPARK-7327 show with empty dataFrame") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|""".stripMargin
assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer)
}
test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") {
val rowRDD = sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0))))
val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false)))
val df = spark.createDataFrame(rowRDD, schema)
df.rdd.collect()
}
test("SPARK-6899: type should match when using codegen") {
checkAnswer(decimalData.agg(avg('a)), Row(new java.math.BigDecimal(2.0)))
}
test("SPARK-7133: Implement struct, array, and map field accessor") {
assert(complexData.filter(complexData("a")(0) === 2).count() == 1)
assert(complexData.filter(complexData("m")("1") === 1).count() == 1)
assert(complexData.filter(complexData("s")("key") === 1).count() == 1)
assert(complexData.filter(complexData("m")(complexData("s")("value")) === 1).count() == 1)
assert(complexData.filter(complexData("a")(complexData("s")("key")) === 1).count() == 1)
}
test("SPARK-7551: support backticks for DataFrame attribute resolution") {
val df = spark.read.json(sparkContext.makeRDD(
"""{"a.b": {"c": {"d..e": {"f": 1}}}}""" :: Nil))
checkAnswer(
df.select(df("`a.b`.c.`d..e`.`f`")),
Row(1)
)
val df2 = spark.read.json(sparkContext.makeRDD(
"""{"a b": {"c": {"d e": {"f": 1}}}}""" :: Nil))
checkAnswer(
df2.select(df2("`a b`.c.d e.f")),
Row(1)
)
def checkError(testFun: => Unit): Unit = {
val e = intercept[org.apache.spark.sql.AnalysisException] {
testFun
}
assert(e.getMessage.contains("syntax error in attribute name:"))
}
checkError(df("`abc.`c`"))
checkError(df("`abc`..d"))
checkError(df("`a`.b."))
checkError(df("`a.b`.c.`d"))
}
test("SPARK-7324 dropDuplicates") {
val testData = sparkContext.parallelize(
(2, 1, 2) :: (1, 1, 1) ::
(1, 2, 1) :: (2, 1, 2) ::
(2, 2, 2) :: (2, 2, 1) ::
(2, 1, 1) :: (1, 1, 2) ::
(1, 2, 2) :: (1, 2, 1) :: Nil).toDF("key", "value1", "value2")
checkAnswer(
testData.dropDuplicates(),
Seq(Row(2, 1, 2), Row(1, 1, 1), Row(1, 2, 1),
Row(2, 2, 2), Row(2, 1, 1), Row(2, 2, 1),
Row(1, 1, 2), Row(1, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key", "value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("value1", "value2")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value2")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates("key", "value1"),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
}
test("SPARK-7150 range api") {
// numSlice is greater than length
val res1 = spark.range(0, 10, 1, 15).select("id")
assert(res1.count == 10)
assert(res1.agg(sum("id")).as("sumid").collect() === Seq(Row(45)))
val res2 = spark.range(3, 15, 3, 2).select("id")
assert(res2.count == 4)
assert(res2.agg(sum("id")).as("sumid").collect() === Seq(Row(30)))
val res3 = spark.range(1, -2).select("id")
assert(res3.count == 0)
// start is positive, end is negative, step is negative
val res4 = spark.range(1, -2, -2, 6).select("id")
assert(res4.count == 2)
assert(res4.agg(sum("id")).as("sumid").collect() === Seq(Row(0)))
// start, end, step are negative
val res5 = spark.range(-3, -8, -2, 1).select("id")
assert(res5.count == 3)
assert(res5.agg(sum("id")).as("sumid").collect() === Seq(Row(-15)))
// start, end are negative, step is positive
val res6 = spark.range(-8, -4, 2, 1).select("id")
assert(res6.count == 2)
assert(res6.agg(sum("id")).as("sumid").collect() === Seq(Row(-14)))
val res7 = spark.range(-10, -9, -20, 1).select("id")
assert(res7.count == 0)
val res8 = spark.range(Long.MinValue, Long.MaxValue, Long.MaxValue, 100).select("id")
assert(res8.count == 3)
assert(res8.agg(sum("id")).as("sumid").collect() === Seq(Row(-3)))
val res9 = spark.range(Long.MaxValue, Long.MinValue, Long.MinValue, 100).select("id")
assert(res9.count == 2)
assert(res9.agg(sum("id")).as("sumid").collect() === Seq(Row(Long.MaxValue - 1)))
// only end provided as argument
val res10 = spark.range(10).select("id")
assert(res10.count == 10)
assert(res10.agg(sum("id")).as("sumid").collect() === Seq(Row(45)))
val res11 = spark.range(-1).select("id")
assert(res11.count == 0)
// using the default slice number
val res12 = spark.range(3, 15, 3).select("id")
assert(res12.count == 4)
assert(res12.agg(sum("id")).as("sumid").collect() === Seq(Row(30)))
}
test("SPARK-8621: support empty string column name") {
val df = Seq(Tuple1(1)).toDF("").as("t")
// We should allow empty string as column name
df.col("")
df.col("t.``")
}
test("SPARK-8797: sort by float column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Float.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toFloat))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("SPARK-8797: sort by double column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Double.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toDouble))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("NaN is greater than all other non-NaN numeric values") {
val maxDouble = Seq(Double.NaN, Double.PositiveInfinity, Double.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Double.isNaN(maxDouble.getDouble(0)))
val maxFloat = Seq(Float.NaN, Float.PositiveInfinity, Float.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Float.isNaN(maxFloat.getFloat(0)))
}
test("SPARK-8072: Better Exception for Duplicate Columns") {
// only one duplicate column present
val e = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3), (2, 3, 4), (3, 4, 5)).toDF("column1", "column2", "column1")
.write.format("parquet").save("temp")
}
assert(e.getMessage.contains("Duplicate column(s)"))
assert(e.getMessage.contains("column1"))
assert(!e.getMessage.contains("column2"))
// multiple duplicate columns present
val f = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3, 4, 5), (2, 3, 4, 5, 6), (3, 4, 5, 6, 7))
.toDF("column1", "column2", "column3", "column1", "column3")
.write.format("json").save("temp")
}
assert(f.getMessage.contains("Duplicate column(s)"))
assert(f.getMessage.contains("column1"))
assert(f.getMessage.contains("column3"))
assert(!f.getMessage.contains("column2"))
}
test("SPARK-6941: Better error message for inserting into RDD-based Table") {
withTempDir { dir =>
val tempParquetFile = new File(dir, "tmp_parquet")
val tempJsonFile = new File(dir, "tmp_json")
val df = Seq(Tuple1(1)).toDF()
val insertion = Seq(Tuple1(2)).toDF("col")
// pass case: parquet table (HadoopFsRelation)
df.write.mode(SaveMode.Overwrite).parquet(tempParquetFile.getCanonicalPath)
val pdf = spark.read.parquet(tempParquetFile.getCanonicalPath)
pdf.createOrReplaceTempView("parquet_base")
insertion.write.insertInto("parquet_base")
// pass case: json table (InsertableRelation)
df.write.mode(SaveMode.Overwrite).json(tempJsonFile.getCanonicalPath)
val jdf = spark.read.json(tempJsonFile.getCanonicalPath)
jdf.createOrReplaceTempView("json_base")
insertion.write.mode(SaveMode.Overwrite).insertInto("json_base")
// error cases: insert into an RDD
df.createOrReplaceTempView("rdd_base")
val e1 = intercept[AnalysisException] {
insertion.write.insertInto("rdd_base")
}
assert(e1.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into a logical plan that is not a LeafNode
val indirectDS = pdf.select("_1").filter($"_1" > 5)
indirectDS.createOrReplaceTempView("indirect_ds")
val e2 = intercept[AnalysisException] {
insertion.write.insertInto("indirect_ds")
}
assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into an OneRowRelation
Dataset.ofRows(spark, OneRowRelation).createOrReplaceTempView("one_row")
val e3 = intercept[AnalysisException] {
insertion.write.insertInto("one_row")
}
assert(e3.getMessage.contains("Inserting into an RDD-based table is not allowed."))
}
}
test("SPARK-8608: call `show` on local DataFrame with random columns should return same value") {
val df = testData.select(rand(33))
assert(df.showString(5) == df.showString(5))
// We will reuse the same Expression object for LocalRelation.
val df1 = (1 to 10).map(Tuple1.apply).toDF().select(rand(33))
assert(df1.showString(5) == df1.showString(5))
}
test("SPARK-8609: local DataFrame with random columns should return same value after sort") {
checkAnswer(testData.sort(rand(33)), testData.sort(rand(33)))
// We will reuse the same Expression object for LocalRelation.
val df = (1 to 10).map(Tuple1.apply).toDF()
checkAnswer(df.sort(rand(33)), df.sort(rand(33)))
}
test("SPARK-9083: sort with non-deterministic expressions") {
import org.apache.spark.util.random.XORShiftRandom
val seed = 33
val df = (1 to 100).map(Tuple1.apply).toDF("i")
val random = new XORShiftRandom(seed)
val expected = (1 to 100).map(_ -> random.nextDouble()).sortBy(_._2).map(_._1)
val actual = df.sort(rand(seed)).collect().map(_.getInt(0))
assert(expected === actual)
}
test("Sorting columns are not in Filter and Project") {
checkAnswer(
upperCaseData.filter('N > 1).select('N).filter('N < 6).orderBy('L.asc),
Row(2) :: Row(3) :: Row(4) :: Row(5) :: Nil)
}
test("SPARK-9323: DataFrame.orderBy should support nested column name") {
val df = spark.read.json(sparkContext.makeRDD(
"""{"a": {"b": 1}}""" :: Nil))
checkAnswer(df.orderBy("a.b"), Row(Row(1)))
}
test("SPARK-9950: correctly analyze grouping/aggregating on struct fields") {
val df = Seq(("x", (1, 1)), ("y", (2, 2))).toDF("a", "b")
checkAnswer(df.groupBy("b._1").agg(sum("b._2")), Row(1, 1) :: Row(2, 2) :: Nil)
}
test("SPARK-10093: Avoid transformations on executors") {
val df = Seq((1, 1)).toDF("a", "b")
df.where($"a" === 1)
.select($"a", $"b", struct($"b"))
.orderBy("a")
.select(struct($"b"))
.collect()
}
test("SPARK-10185: Read multiple Hadoop Filesystem paths and paths with a comma in it") {
withTempDir { dir =>
val df1 = Seq((1, 22)).toDF("a", "b")
val dir1 = new File(dir, "dir,1").getCanonicalPath
df1.write.format("json").save(dir1)
val df2 = Seq((2, 23)).toDF("a", "b")
val dir2 = new File(dir, "dir2").getCanonicalPath
df2.write.format("json").save(dir2)
checkAnswer(spark.read.format("json").load(dir1, dir2),
Row(1, 22) :: Row(2, 23) :: Nil)
checkAnswer(spark.read.format("json").load(dir1),
Row(1, 22) :: Nil)
}
}
test("Alias uses internally generated names 'aggOrder' and 'havingCondition'") {
val df = Seq(1 -> 2).toDF("i", "j")
val query1 = df.groupBy('i)
.agg(max('j).as("aggOrder"))
.orderBy(sum('j))
checkAnswer(query1, Row(1, 2))
// In the plan, there are two attributes having the same name 'havingCondition'
// One is a user-provided alias name; another is an internally generated one.
val query2 = df.groupBy('i)
.agg(max('j).as("havingCondition"))
.where(sum('j) > 0)
.orderBy('havingCondition.asc)
checkAnswer(query2, Row(1, 2))
}
test("SPARK-10316: respect non-deterministic expressions in PhysicalOperation") {
val input = spark.read.json(spark.sparkContext.makeRDD(
(1 to 10).map(i => s"""{"id": $i}""")))
val df = input.select($"id", rand(0).as('r))
df.as("a").join(df.filter($"r" < 0.5).as("b"), $"a.id" === $"b.id").collect().foreach { row =>
assert(row.getDouble(1) - row.getDouble(3) === 0.0 +- 0.001)
}
}
test("SPARK-10539: Project should not be pushed down through Intersect or Except") {
val df1 = (1 to 100).map(Tuple1.apply).toDF("i")
val df2 = (1 to 30).map(Tuple1.apply).toDF("i")
val intersect = df1.intersect(df2)
val except = df1.except(df2)
assert(intersect.count() === 30)
assert(except.count() === 70)
}
test("SPARK-10740: handle nondeterministic expressions correctly for set operations") {
val df1 = (1 to 20).map(Tuple1.apply).toDF("i")
val df2 = (1 to 10).map(Tuple1.apply).toDF("i")
// When generating expected results at here, we need to follow the implementation of
// Rand expression.
def expected(df: DataFrame): Seq[Row] = {
df.rdd.collectPartitions().zipWithIndex.flatMap {
case (data, index) =>
val rng = new org.apache.spark.util.random.XORShiftRandom(7 + index)
data.filter(_.getInt(0) < rng.nextDouble() * 10)
}
}
val union = df1.union(df2)
checkAnswer(
union.filter('i < rand(7) * 10),
expected(union)
)
checkAnswer(
union.select(rand(7)),
union.rdd.collectPartitions().zipWithIndex.flatMap {
case (data, index) =>
val rng = new org.apache.spark.util.random.XORShiftRandom(7 + index)
data.map(_ => rng.nextDouble()).map(i => Row(i))
}
)
val intersect = df1.intersect(df2)
checkAnswer(
intersect.filter('i < rand(7) * 10),
expected(intersect)
)
val except = df1.except(df2)
checkAnswer(
except.filter('i < rand(7) * 10),
expected(except)
)
}
test("SPARK-10743: keep the name of expression if possible when do cast") {
val df = (1 to 10).map(Tuple1.apply).toDF("i").as("src")
assert(df.select($"src.i".cast(StringType)).columns.head === "i")
assert(df.select($"src.i".cast(StringType).cast(IntegerType)).columns.head === "i")
}
test("SPARK-11301: fix case sensitivity for filter on partitioned columns") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("year").parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
checkAnswer(df.filter($"yEAr" > 2000).select($"val"), Row("a"))
}
}
}
/**
* Verifies that there is no Exchange between the Aggregations for `df`
*/
private def verifyNonExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
atFirstAgg = !atFirstAgg
case _ =>
if (atFirstAgg) {
fail("Should not have operators between the two aggregations")
}
}
}
/**
* Verifies that there is an Exchange between the Aggregations for `df`
*/
private def verifyExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
if (atFirstAgg) {
fail("Should not have back to back Aggregates")
}
atFirstAgg = true
case e: ShuffleExchange => atFirstAgg = false
case _ =>
}
}
test("distributeBy and localSort") {
val original = testData.repartition(1)
assert(original.rdd.partitions.length == 1)
val df = original.repartition(5, $"key")
assert(df.rdd.partitions.length == 5)
checkAnswer(original.select(), df.select())
val df2 = original.repartition(10, $"key")
assert(df2.rdd.partitions.length == 10)
checkAnswer(original.select(), df2.select())
// Group by the column we are distributed by. This should generate a plan with no exchange
// between the aggregates
val df3 = testData.repartition($"key").groupBy("key").count()
verifyNonExchangingAgg(df3)
verifyNonExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key", "value").count())
// Grouping by just the first distributeBy expr, need to exchange.
verifyExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key").count())
val data = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData2(i % 10, i))).toDF()
// Distribute and order by.
val df4 = data.repartition($"a").sortWithinPartitions($"b".desc)
// Walk each partition and verify that it is sorted descending and does not contain all
// the values.
df4.rdd.foreachPartition { p =>
// Skip empty partition
if (p.hasNext) {
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue < v) throw new SparkException("Partition is not ordered.")
if (v + 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be globally ordered")
}
}
// Distribute and order by with multiple order bys
val df5 = data.repartition(2, $"a").sortWithinPartitions($"b".asc, $"a".asc)
// Walk each partition and verify that it is sorted ascending
df5.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be all sequential")
}
// Distribute into one partition and order by. This partition should contain all the values.
val df6 = data.repartition(1, $"a").sortWithinPartitions("b")
// Walk each partition and verify that it is sorted ascending and not globally sorted.
df6.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (!allSequential) throw new SparkException("Partition should contain all sequential values")
}
}
test("fix case sensitivity of partition by") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val p = path.getAbsolutePath
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("yEAr").parquet(p)
checkAnswer(spark.read.parquet(p).select("YeaR"), Row(2012))
}
}
}
// This test case is to verify a bug when making a new instance of LogicalRDD.
test("SPARK-11633: LogicalRDD throws TreeNode Exception: Failed to Copy Node") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val rdd = sparkContext.makeRDD(Seq(Row(1, 3), Row(2, 1)))
val df = spark.createDataFrame(
rdd,
new StructType().add("f1", IntegerType).add("f2", IntegerType),
needsConversion = false).select($"F1", $"f2".as("f2"))
val df1 = df.as("a")
val df2 = df.as("b")
checkAnswer(df1.join(df2, $"a.f2" === $"b.f2"), Row(1, 3, 1, 3) :: Row(2, 1, 2, 1) :: Nil)
}
}
test("SPARK-10656: completely support special chars") {
val df = Seq(1 -> "a").toDF("i_$.a", "d^'a.")
checkAnswer(df.select(df("*")), Row(1, "a"))
checkAnswer(df.withColumnRenamed("d^'a.", "a"), Row(1, "a"))
}
test("SPARK-11725: correctly handle null inputs for ScalaUDF") {
val df = sparkContext.parallelize(Seq(
new java.lang.Integer(22) -> "John",
null.asInstanceOf[java.lang.Integer] -> "Lucy")).toDF("age", "name")
// passing null into the UDF that could handle it
val boxedUDF = udf[java.lang.Integer, java.lang.Integer] {
(i: java.lang.Integer) => if (i == null) -10 else null
}
checkAnswer(df.select(boxedUDF($"age")), Row(null) :: Row(-10) :: Nil)
spark.udf.register("boxedUDF",
(i: java.lang.Integer) => (if (i == null) -10 else null): java.lang.Integer)
checkAnswer(sql("select boxedUDF(null), boxedUDF(-1)"), Row(-10, null) :: Nil)
val primitiveUDF = udf((i: Int) => i * 2)
checkAnswer(df.select(primitiveUDF($"age")), Row(44) :: Row(null) :: Nil)
}
test("SPARK-12398 truncated toString") {
val df1 = Seq((1L, "row1")).toDF("id", "name")
assert(df1.toString() === "[id: bigint, name: string]")
val df2 = Seq((1L, "c2", false)).toDF("c1", "c2", "c3")
assert(df2.toString === "[c1: bigint, c2: string ... 1 more field]")
val df3 = Seq((1L, "c2", false, 10)).toDF("c1", "c2", "c3", "c4")
assert(df3.toString === "[c1: bigint, c2: string ... 2 more fields]")
val df4 = Seq((1L, Tuple2(1L, "val"))).toDF("c1", "c2")
assert(df4.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string>]")
val df5 = Seq((1L, Tuple2(1L, "val"), 20.0)).toDF("c1", "c2", "c3")
assert(df5.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 1 more field]")
val df6 = Seq((1L, Tuple2(1L, "val"), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(df6.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 2 more fields]")
val df7 = Seq((1L, Tuple3(1L, "val", 2), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df7.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 1 more field> ... 2 more fields]")
val df8 = Seq((1L, Tuple7(1L, "val", 2, 3, 4, 5, 6), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df8.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 5 more fields> ... 2 more fields]")
val df9 =
Seq((1L, Tuple4(1L, Tuple4(1L, 2L, 3L, 4L), 2L, 3L), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df9.toString ===
"[c1: bigint, c2: struct<_1: bigint," +
" _2: struct<_1: bigint," +
" _2: bigint ... 2 more fields> ... 2 more fields> ... 2 more fields]")
}
test("reuse exchange") {
withSQLConf("spark.sql.autoBroadcastJoinThreshold" -> "2") {
val df = spark.range(100).toDF()
val join = df.join(df, "id")
val plan = join.queryExecution.executedPlan
checkAnswer(join, df)
assert(
join.queryExecution.executedPlan.collect { case e: ShuffleExchange => true }.size === 1)
assert(
join.queryExecution.executedPlan.collect { case e: ReusedExchangeExec => true }.size === 1)
val broadcasted = broadcast(join)
val join2 = join.join(broadcasted, "id").join(broadcasted, "id")
checkAnswer(join2, df)
assert(
join2.queryExecution.executedPlan.collect { case e: ShuffleExchange => true }.size === 1)
assert(
join2.queryExecution.executedPlan
.collect { case e: BroadcastExchangeExec => true }.size === 1)
assert(
join2.queryExecution.executedPlan.collect { case e: ReusedExchangeExec => true }.size === 4)
}
}
test("sameResult() on aggregate") {
val df = spark.range(100)
val agg1 = df.groupBy().count()
val agg2 = df.groupBy().count()
// two aggregates with different ExprId within them should have same result
assert(agg1.queryExecution.executedPlan.sameResult(agg2.queryExecution.executedPlan))
val agg3 = df.groupBy().sum()
assert(!agg1.queryExecution.executedPlan.sameResult(agg3.queryExecution.executedPlan))
val df2 = spark.range(101)
val agg4 = df2.groupBy().count()
assert(!agg1.queryExecution.executedPlan.sameResult(agg4.queryExecution.executedPlan))
}
test("SPARK-12512: support `.` in column name for withColumn()") {
val df = Seq("a" -> "b").toDF("col.a", "col.b")
checkAnswer(df.select(df("*")), Row("a", "b"))
checkAnswer(df.withColumn("col.a", lit("c")), Row("c", "b"))
checkAnswer(df.withColumn("col.c", lit("c")), Row("a", "b", "c"))
}
test("SPARK-12841: cast in filter") {
checkAnswer(
Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"),
Row(1, "a"))
}
test("SPARK-12982: Add table name validation in temp table registration") {
val df = Seq("foo", "bar").map(Tuple1.apply).toDF("col")
// invalid table name test as below
intercept[AnalysisException](df.createOrReplaceTempView("t~"))
// valid table name test as below
df.createOrReplaceTempView("table1")
// another invalid table name test as below
intercept[AnalysisException](df.createOrReplaceTempView("#$@sum"))
// another invalid table name test as below
intercept[AnalysisException](df.createOrReplaceTempView("table!#"))
}
test("assertAnalyzed shouldn't replace original stack trace") {
val e = intercept[AnalysisException] {
spark.range(1).select('id as 'a, 'id as 'b).groupBy('a).agg('b)
}
assert(e.getStackTrace.head.getClassName != classOf[QueryExecution].getName)
}
test("SPARK-13774: Check error message for non existent path without globbed paths") {
val uuid = UUID.randomUUID().toString
val baseDir = Utils.createTempDir()
try {
val e = intercept[AnalysisException] {
spark.read.format("csv").load(
new File(baseDir, "file").getAbsolutePath,
new File(baseDir, "file2").getAbsolutePath,
new File(uuid, "file3").getAbsolutePath,
uuid).rdd
}
assert(e.getMessage.startsWith("Path does not exist"))
} finally {
}
}
test("SPARK-13774: Check error message for not existent globbed paths") {
// Non-existent initial path component:
val nonExistentBasePath = "/" + UUID.randomUUID().toString
assert(!new File(nonExistentBasePath).exists())
val e = intercept[AnalysisException] {
spark.read.format("text").load(s"$nonExistentBasePath/*")
}
assert(e.getMessage.startsWith("Path does not exist"))
// Existent initial path component, but no matching files:
val baseDir = Utils.createTempDir()
val childDir = Utils.createTempDir(baseDir.getAbsolutePath)
assert(childDir.exists())
try {
val e1 = intercept[AnalysisException] {
spark.read.json(s"${baseDir.getAbsolutePath}/*/*-xyz.json").rdd
}
assert(e1.getMessage.startsWith("Path does not exist"))
} finally {
Utils.deleteRecursively(baseDir)
}
}
test("SPARK-15230: distinct() does not handle column name with dot properly") {
val df = Seq(1, 1, 2).toDF("column.with.dot")
checkAnswer(df.distinct(), Row(1) :: Row(2) :: Nil)
}
test("SPARK-16181: outer join with isNull filter") {
val left = Seq("x").toDF("col")
val right = Seq("y").toDF("col").withColumn("new", lit(true))
val joined = left.join(right, left("col") === right("col"), "left_outer")
checkAnswer(joined, Row("x", null, null))
checkAnswer(joined.filter($"new".isNull), Row("x", null, null))
}
test("SPARK-16664: persist with more than 200 columns") {
val size = 201L
val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(Seq.range(0, size))))
val schemas = List.range(0, size).map(a => StructField("name" + a, LongType, true))
val df = spark.createDataFrame(rdd, StructType(schemas), false)
assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100)
}
test("SPARK-17409: Do Not Optimize Query in CTAS (Data source tables) More Than Once") {
withTable("bar") {
withTempView("foo") {
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
sql("select 0 as id").createOrReplaceTempView("foo")
val df = sql("select * from foo group by id")
// If we optimize the query in CTAS more than once, the following saveAsTable will fail
// with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])`
df.write.mode("overwrite").saveAsTable("bar")
checkAnswer(spark.table("bar"), Row(0) :: Nil)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar"))
assert(tableMetadata.provider == Some("json"),
"the expected table is a data source table using json")
}
}
}
}
test("copy results for sampling with replacement") {
val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b")
val sampleDf = df.sample(true, 2.00)
val d = sampleDf.withColumn("c", monotonically_increasing_id).select($"c").collect
assert(d.size == d.distinct.size)
}
test("SPARK-17625: data source table in InMemoryCatalog should guarantee output consistency") {
val tableName = "tbl"
withTable(tableName) {
spark.range(10).select('id as 'i, 'id as 'j).write.saveAsTable(tableName)
val relation = spark.sessionState.catalog.lookupRelation(TableIdentifier(tableName))
val expr = relation.resolve("i")
val qe = spark.sessionState.executePlan(Project(Seq(expr), relation))
qe.assertAnalyzed()
}
}
private def verifyNullabilityInFilterExec(
df: DataFrame,
expr: String,
expectedNonNullableColumns: Seq[String]): Unit = {
val dfWithFilter = df.where(s"isnotnull($expr)").selectExpr(expr)
// In the logical plan, all the output columns of input dataframe are nullable
dfWithFilter.queryExecution.optimizedPlan.collect {
case e: Filter => assert(e.output.forall(_.nullable))
}
dfWithFilter.queryExecution.executedPlan.collect {
// When the child expression in isnotnull is null-intolerant (i.e. any null input will
// result in null output), the involved columns are converted to not nullable;
// otherwise, no change should be made.
case e: FilterExec =>
assert(e.output.forall { o =>
if (expectedNonNullableColumns.contains(o.name)) !o.nullable else o.nullable
})
}
}
test("SPARK-17957: no change on nullability in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> new java.lang.Integer(3),
new java.lang.Integer(1) -> null.asInstanceOf[java.lang.Integer],
new java.lang.Integer(2) -> new java.lang.Integer(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, _2)", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, 0) + Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "cast(coalesce(cast(coalesce(_1, _2) as double), 0.0) as int)",
expectedNonNullableColumns = Seq.empty[String])
}
test("SPARK-17957: set nullability to false in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> new java.lang.Integer(3),
new java.lang.Integer(1) -> null.asInstanceOf[java.lang.Integer],
new java.lang.Integer(2) -> new java.lang.Integer(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "_1 + _2 * 3", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1 + _2", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1", expectedNonNullableColumns = Seq("_1"))
// `constructIsNotNullConstraints` infers the IsNotNull(_2) from IsNotNull(_2 + Rand())
// Thus, we are able to set nullability of _2 to false.
// If IsNotNull(_2) is not given from `constructIsNotNullConstraints`, the impl of
// isNullIntolerant in `FilterExec` needs an update for more advanced inference.
verifyNullabilityInFilterExec(df,
expr = "_2 + Rand()", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "_2 * 3 + coalesce(_1, 0)", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "cast((_1 + _2) as boolean)", expectedNonNullableColumns = Seq("_1", "_2"))
}
test("SPARK-17897: Fixed IsNotNull Constraint Inference Rule") {
val data = Seq[java.lang.Integer](1, null).toDF("key")
checkAnswer(data.filter(!$"key".isNotNull), Row(null))
checkAnswer(data.filter(!(- $"key").isNotNull), Row(null))
}
test("SPARK-17957: outer join + na.fill") {
val df1 = Seq((1, 2), (2, 3)).toDF("a", "b")
val df2 = Seq((2, 5), (3, 4)).toDF("a", "c")
val joinedDf = df1.join(df2, Seq("a"), "outer").na.fill(0)
val df3 = Seq((3, 1)).toDF("a", "d")
checkAnswer(joinedDf.join(df3, "a"), Row(3, 0, 4, 1))
}
test("SPARK-17123: Performing set operations that combine non-scala native types") {
val dates = Seq(
(new Date(0), BigDecimal.valueOf(1), new Timestamp(2)),
(new Date(3), BigDecimal.valueOf(4), new Timestamp(5))
).toDF("date", "timestamp", "decimal")
val widenTypedRows = Seq(
(new Timestamp(2), 10.5D, "string")
).toDF("date", "timestamp", "decimal")
dates.union(widenTypedRows).collect()
dates.except(widenTypedRows).collect()
dates.intersect(widenTypedRows).collect()
}
test("SPARK-18070 binary operator should not consider nullability when comparing input types") {
val rows = Seq(Row(Seq(1), Seq(1)))
val schema = new StructType()
.add("array1", ArrayType(IntegerType))
.add("array2", ArrayType(IntegerType, containsNull = false))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
assert(df.filter($"array1" === $"array2").count() == 1)
}
}
| ZxlAaron/mypros | sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala | Scala | apache-2.0 | 64,296 |
/**
* Copyright (c) 2007-2011 Eric Torreborre <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.specification
import org.specs._
import org.specs.io.mock._
class sharedSpec extends SpecificationWithJUnit {
"The specification with shared subexamples" should {
"have not any pending results" in {
(new MockJavaSpecification).reportSpecs.messages must not containMatch("PENDING")
}
"have one ko nested example" in {
(new MockJavaSpecification).reportSpecs.messages must containMatch("x must be ko")
}
"have another ko example at a second level" in {
(new MockJavaSpecification).reportSpecs.messages must containMatch("x must also be ko")
}
"have examples appear once only with a val definition" in {
(new MockJavaSpecification2).reportSpecs.messages.filter(_ contains "must be ok") must have size(1)
}
"have no pending results when there are no nested examples" in {
(new MockJavaSpecification3).reportSpecs.messages must not containMatch("PENDING")
}
}
}
class SharedExamples extends Specification {
def shared = "The Scala language" should {
"a nested example" >> {
"must be ok" >> {
true must beTrue
}
"must be ko" >> {
false must beTrue
}
}
"another nested example" >> {
"must also be ok" >> {
true must beTrue
}
"must also be ko" >> {
false must beTrue
}
}
}
shared
}
class JavaSpecification extends Specification {
"The Java language" should {
behave like (new SharedExamples).shared
}
}
class SharedExamples2 extends Specification {
val shared = "The Scala language" should {
"a nested example" >> {
"must be ok" >> {
true must beTrue
}
}
"another nested example" >> {
"must also be ok" >> {
true must beTrue
}
"must also be ko" >> {
false must beTrue
}
}
}
}
class MockJavaSpecification extends Specification with MockOutput {
"The Java language" should {
behave like (new SharedExamples).shared
}
}
class MockJavaSpecification2 extends Specification with MockOutput {
"The Java language" should {
behave like (new SharedExamples2).shared
}
}
class JavaSpecification2 extends Specification {
"The Java language" should {
behave like (new SharedExamples2).shared
}
}
class SharedExamples3 extends Specification {
val shared = "The Scala language" should {
"an example" >> {
true must beTrue
}
"another example" >> {
false must beTrue
}
}
}
class MockJavaSpecification3 extends Specification with MockOutput {
"The Java language" should {
behave like (new SharedExamples3).shared
}
}
class JavaSpecification3 extends Specification {
"The Java language" should {
behave like (new SharedExamples3).shared
}
}
class JavaSpecification4 extends Specification {
val shared = "The Scala language" should {
"an example" >> {
true must beTrue
}
}
"The Java language" should {
behave like shared
}
}
| yyuu/specs | src/test/scala/org/specs/specification/sharedSpec.scala | Scala | mit | 4,444 |
///////////////////////////////////////////////////////////////////////////////
// GridRanker.scala
//
// Copyright (C) 2010-2014 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.textgrounder
package gridlocate
import scala.util.Random
import collection.mutable
import math._
import util.print.errprint
import util.debug._
import util.error._
import util.verbose._
import langmodel._
import learning._
import learning.vowpalwabbit._
/*
This file implements the various rankers used for inference of the
location of a document in a grid -- i.e. returning a ranking of the
suitability of the cells of the grid for a given document.
*/
object GridRanker {
private val next_ranker_no = new java.util.concurrent.atomic.AtomicInteger
}
/**
* A ranker for ranking cells in a grid as possible matches for a given
* document (aka "grid-locating a document").
*
* @tparam Co Type of document's identifying coordinate (e.g. a lat/long tuple,
* a year, etc.), which tends to determine the grid structure.
* @param ranker_name Name of the ranker, for output purposes
* @param grid Grid containing the cells over which this ranker operates
*/
abstract class GridRanker[Co](
val ranker_name: String,
val grid: Grid[Co]
) extends Ranker[GridDoc[Co], GridCell[Co]] {
/** Unique identifier for each ranker */
val ranker_no = GridRanker.next_ranker_no.incrementAndGet
/** Optional initialization stage passing one or more times over the
* test data. */
def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]
) { }
override def toString = {
"%s(#%s)" format (getClass.getSimpleName, ranker_no)
}
}
/**
* A ranker for ranking cells in a grid as possible matches for a given
* document (aka "grid-locating a document").
*
* @tparam Co Type of document's identifying coordinate (e.g. a lat/long tuple,
* a year, etc.), which tends to determine the grid structure.
* @param ranker_name Name of the ranker, for output purposes
* @param grid Grid containing the cells over which this ranker operates
*/
class InterpolatingGridRanker[Co](
val fg: GridRanker[Co],
val bg: GridRanker[Co],
val interp_factor: Double
// FIXME: Is it OK for this to be fg.grid specifically?
) extends GridRanker[Co]("interpolating", fg.grid) {
/** Optional initialization stage passing one or more times over the
* test data. */
override def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]
) {
fg.initialize(get_docstats)
bg.initialize(get_docstats)
}
def imp_evaluate(item: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean) = {
// We match up the cells by going through the cells in the background
// ranker, and for each cell's centroid, looking up the containing cell
// in the foreground ranker for this centroid (if any), then
// interpolating the scores.
val cells_scores_fg = fg.evaluate(item, correct, include_correct)
val cells_scores_bg = bg.evaluate(item, correct, include_correct)
if (cells_scores_fg.size == 0) cells_scores_bg
else if (cells_scores_bg.size == 0) cells_scores_fg
else {
val scores_fg_map = cells_scores_fg.toMap
// FIXME! This is written with a uniform grid in mind. Doubtful it
// will work well with K-d trees, where there are no holes in the
// grid.
// FIXME! We need to do something better when a cell is found in the
// foreground but not the background; or we need to interpolate
// fg with fg + bg rather than fg with bg.
// FIXME! We are doing the equivalent of Jelinek smoothing.
// Implement Dirichlet and PGT smoothing.
cells_scores_bg.map { case (cellbg, scorebg) =>
val cellfg = fg.grid.find_best_cell_for_coord(cellbg.get_centroid,
create_non_recorded = false)
if (cellfg == None) (cellbg, scorebg) else {
val scorefg = scores_fg_map(cellfg.get)
(cellbg, scorefg * (1 - interp_factor) + scorebg * interp_factor)
}
}.toSeq.sortWith(_._2 > _._2)
}
}
}
/**
* A grid ranker that does not use reranking.
*
* @tparam Co Type of document's identifying coordinate (e.g. a lat/long tuple,
* a year, etc.), which tends to determine the grid structure.
* @param ranker_name Name of the ranker, for output purposes
* @param grid Grid containing the cells over which this ranker operates
*/
abstract class SimpleGridRanker[Co](
ranker_name: String,
grid: Grid[Co]
) extends GridRanker[Co](ranker_name, grid) {
/**
* For a given test document, return an Iterable of tuples, each listing
* a particular cell on the Earth and a score of some sort. The correct
* cell is as given, and if `include_correct` is specified, must be
* included in the list. Higher scores are better. The results should
* be in sorted order, with better cells earlier.
*/
def return_ranked_cells(doc: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean):
Iterable[(GridCell[Co], Double)]
def imp_evaluate(item: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean) =
return_ranked_cells(item, correct, include_correct)
.filter { case (cell, score) => grid.cell_fits_restriction(cell) }
}
/**
* Object encapsulating a GridLocate data instance to be used by the
* classifier that is either used for ranking directly or underlies the
* reranker. This corresponds to a document in the training corpus.
* This is used in place of just using an aggregate feature vector directly
* because the cost perceptron cost function needs to retrieve the document
* and correct cell while training in order to compute the distance between
* them, which is used to compute the cost.
*/
abstract class GridRankerInst[Co] extends DataInstance {
def doc: GridDoc[Co]
def agg: AggregateFeatureVector
final def feature_vector = agg
/**
* Return the candidate cell at the given label index.
*/
def get_cell(index: LabelIndex): GridCell[Co]
def pretty_print_labeled(prefix: String, correct: LabelIndex) {
errprint(s"For instance $prefix with query doc $doc:")
for ((fv, index) <- agg.fv.zipWithIndex) {
val cell = get_cell(index)
errprint(s" $prefix-${index + 1}: %s: $cell: $fv",
if (index == correct) "CORRECT" else "WRONG")
}
}
}
/**
* Object encapsulating a GridLocate data instance to be used by the
* classifier that is used directly for ranking the cells.
*/
case class GridRankingClassifierInst[Co](
doc: GridDoc[Co],
agg: AggregateFeatureVector,
featvec_factory: CandidateFeatVecFactory[Co]
) extends GridRankerInst[Co] {
def get_cell(index: LabelIndex) = featvec_factory.index_to_cell(index)
}
/**
* Class that implements a very simple baseline ranker -- pick a random
* cell.
*/
class RandomGridRanker[Co](
ranker_name: String,
grid: Grid[Co]
) extends SimpleGridRanker[Co](ranker_name, grid) {
def return_ranked_cells(doc: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean) = {
val cells = grid.iter_nonempty_cells_including(correct, include_correct)
val shuffled = (new Random()).shuffle(cells)
(for (cell <- shuffled) yield (cell, 0.0))
}
}
/**
* Class that implements a simple baseline ranker -- pick the "most
* popular" cell (the one either with the largest number of documents, or
* the highest salience, if `salience` is true).
*/
class MostPopularGridRanker[Co] (
ranker_name: String,
grid: Grid[Co],
salience: Boolean
) extends SimpleGridRanker[Co](ranker_name, grid) {
def return_ranked_cells(doc: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean) = {
val cells = grid.iter_nonempty_cells_including(correct, include_correct)
(for (cell <- cells) yield {
val rank = if (salience) cell.salience else cell.num_docs
(cell, rank.toDouble)
}).toIndexedSeq sortWith (_._2 > _._2)
}
}
/**
* Abstract class that implements a ranker for grid location that
* involves directly comparing the document language model against each cell
* in turn and computing a score.
*/
abstract class PointwiseScoreGridRanker[Co](
ranker_name: String,
grid: Grid[Co]
) extends SimpleGridRanker[Co](ranker_name, grid) {
/**
* Function to return the score of a document language model against a
* cell.
*/
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]): Double
def get_candidates(correct: Option[GridCell[Co]], include_correct: Boolean) =
grid.iter_nonempty_cells_including(correct, include_correct)
/**
* Compare a language model (for a document, typically) against all
* cells. Return a sequence of tuples (cell, score) where 'cell'
* indicates the cell and 'score' the score.
*/
def return_ranked_cells_serially(doc: GridDoc[Co],
correct: Option[GridCell[Co]], include_correct: Boolean) = {
for (cell <- get_candidates(correct, include_correct)) yield {
if (debug("ranking")) {
errprint(
"Nonempty cell at indices %s = location %s, num_documents = %s",
cell.format_indices, cell.format_location,
cell.num_docs)
}
val score = score_cell(doc, cell)
assert(!score.isNaN, s"Saw NaN for score of cell $cell, doc $doc")
(cell, score)
}
}
/**
* Compare a language model (for a document, typically) against all
* cells. Return a sequence of tuples (cell, score) where 'cell'
* indicates the cell and 'score' the score.
*/
def return_ranked_cells_parallel(doc: GridDoc[Co],
correct: Option[GridCell[Co]], include_correct: Boolean) = {
val cells = get_candidates(correct, include_correct)
cells.par.map(c => {
val score = score_cell(doc, c)
assert(!score.isNaN, s"Saw NaN for score of cell $c, doc $doc")
(c, score)
})
}
def return_ranked_cells(doc: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean) = {
val parallel = !grid.driver.params.no_parallel
val cell_buf = {
if (parallel)
return_ranked_cells_parallel(doc, correct, include_correct)
else
return_ranked_cells_serially(doc, correct, include_correct)
}
val retval = cell_buf.toIndexedSeq sortWith (_._2 > _._2)
/* If doing things parallel, this code applies for debugging
(serial has the debugging code embedded into it). */
if (parallel && debug("ranking")) {
for ((cell, score) <- retval)
errprint("Nonempty cell at indices %s = location %s, num_documents = %s, score = %s",
cell.format_indices, cell.format_location,
cell.num_docs, score)
}
retval
}
}
/**
* Class that implements a ranker for document geolocation by computing
* the KL-divergence between document and cell (approximately, how much
* the language models differ). Note that the KL-divergence as currently
* implemented uses the smoothed language models.
*
* @param partial If true (the default), only do "partial" KL-divergence.
* This only computes the divergence involving words in the document
* language model, rather than considering all words in the vocabulary.
* @param symmetric If true, do a symmetric KL-divergence by computing
* the divergence in both directions and averaging the two values.
* (Not by default; the comparison is fundamentally asymmetric in
* any case since it's comparing documents against cells.)
*/
class KLDivergenceGridRanker[Co](
ranker_name: String,
grid: Grid[Co],
partial: Boolean = true,
symmetric: Boolean = false
) extends PointwiseScoreGridRanker[Co](ranker_name, grid) {
var self_kl_cache: KLDivergenceCache = null
val slow = false
def call_kl_divergence(self: LangModel, other: LangModel) =
self.kl_divergence(other, partial = partial, cache = self_kl_cache)
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]) = {
val lang_model = doc.grid_lm
val cell_lang_model = cell.grid_lm
var kldiv = call_kl_divergence(lang_model, cell_lang_model)
if (symmetric) {
val kldiv2 = cell_lang_model.kl_divergence(lang_model,
partial = partial)
kldiv = (kldiv + kldiv2) / 2.0
}
// Negate so that higher scores are better
-kldiv
}
override def return_ranked_cells(doc: GridDoc[Co],
correct: Option[GridCell[Co]], include_correct: Boolean) = {
val lang_model = doc.grid_lm
// This will be used by `score_cell` above.
self_kl_cache = lang_model.get_kl_divergence_cache()
val cells = super.return_ranked_cells(doc, correct, include_correct)
if (debug("kldiv") && lang_model.isInstanceOf[FastSlowKLDivergence]) {
val fast_slow_dist = lang_model.asInstanceOf[FastSlowKLDivergence]
// Print out the words that contribute most to the KL divergence, for
// the top-ranked cells
errprint("")
errprint("KL-divergence debugging info:")
for (((cell, _), i) <- cells.take(
GridLocateConstants.kldiv_num_contrib_cells) zipWithIndex) {
val (_, contribs) =
fast_slow_dist.slow_kl_divergence_debug(
cell.grid_lm, partial = partial,
return_contributing_words = true)
errprint(" At rank #%s, cell %s:", i + 1, cell)
errprint(" %30s %s", "Word", "KL-div contribution")
errprint(" %s", "-" * 50)
// sort by absolute value of second element of tuple, in reverse order
val grams =
(contribs.toIndexedSeq sortWith ((x, y) => abs(x._2) > abs(y._2))).
take(GridLocateConstants.kldiv_num_contrib_words)
for ((word, contribval) <- grams)
errprint(" %30s %s", word, contribval)
errprint("")
}
}
cells
}
}
/**
* Class that implements a ranker for document geolocation by computing
* the cosine similarity between the language models of document and cell.
*
* @param smoothed If true, use the smoothed language models. (By default,
* use unsmoothed language models.)
* @param partial If true, only do "partial" cosine similarity.
* This only computes the similarity involving words in the document
* language model, rather than considering all words in the vocabulary.
*/
class CosineSimilarityGridRanker[Co](
ranker_name: String,
grid: Grid[Co],
smoothed: Boolean = false,
partial: Boolean = true
) extends PointwiseScoreGridRanker[Co](ranker_name, grid) {
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]) = {
val cossim =
doc.grid_lm.cosine_similarity(cell.grid_lm,
partial = partial, smoothed = smoothed)
assert_>=(cossim, 0.0)
// Just in case of round-off problems
assert_<=(cossim, 1.002)
cossim
}
}
/**
* Class that implements a ranker for document geolocation that sums the
* unsmoothed probability (or frequency) values for the words in the
* document. Generally only useful when '--tf-idf' or similar is invoked.
*/
class SumFrequencyGridRanker[Co](
ranker_name: String,
grid: Grid[Co]
) extends PointwiseScoreGridRanker[Co](ranker_name, grid) {
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]) = {
doc.grid_lm.sum_frequency(cell.grid_lm)
}
}
trait NaiveBayesFeature[Co]
{
/** Possible initialization step at beginning to do a pass over test data.
* Needed for NaiveBayesRoughRankerFeature when the wrapped ranker uses
* Vowpal Wabbit. */
def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]) {
}
def get_logprob(doc: GridDoc[Co], cell: GridCell[Co]): Double
}
class NaiveBayesTermsFeature[Co] extends NaiveBayesFeature[Co]
{
def get_logprob(doc: GridDoc[Co], cell: GridCell[Co]) =
cell.grid_lm.model_logprob(doc.grid_lm)
}
class NaiveBayesRoughRankerFeature[Co](
rough_ranker: PointwiseScoreGridRanker[Co]
) extends NaiveBayesFeature[Co]
{
// Needed for Vowpal Wabbit.
override def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]) {
rough_ranker.initialize(get_docstats)
}
def get_logprob(doc: GridDoc[Co], cell: GridCell[Co]) = {
val central = cell.get_central_point
val rough_cell = rough_ranker.grid.find_best_cell_for_coord(central,
create_non_recorded = true).get
// We don't need to take the log here. If the score is from Naive Bayes,
// we've already taken the log of the probability. If from maxent, we'd
// convert it to a probability by exponentiating and renormalizing, and
// the normalization factor will be the same for all cells so it shouldn't
// affect the ranking and can be ignored. Taking the log would then just
// cancel out the exponentiation, so neither needs to be done.
rough_ranker.score_cell(doc, rough_cell)
}
}
/** Use a Naive Bayes ranker for comparing document and cell. */
class NaiveBayesGridRanker[Co](
ranker_name: String,
grid: Grid[Co],
features: Iterable[NaiveBayesFeature[Co]]
) extends PointwiseScoreGridRanker[Co](ranker_name, grid) {
override def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]) {
for (f <- features)
f.initialize(get_docstats)
}
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]) = {
val params = grid.driver.params
// Determine respective weightings
val (word_weight, prior_weight) = {
val bw = params.naive_bayes_prior_weight
(1.0 - bw, bw)
}
val features_logprob = features.map(_.get_logprob(doc, cell)).sum
assert(!features_logprob.isNaN, s"features_logprob: Saw NaN for score of cell $cell, doc $doc")
// FIXME: Is the normalization necessary?
val prior_logprob = log(cell.prior_weighting / grid.total_prior_weighting)
assert(!prior_logprob.isNaN, s"prior_logprob: Saw NaN for score of cell $cell, doc $doc\\n" +
s"cell prior weighting ${cell.prior_weighting}, total prior weighting ${grid.total_prior_weighting}")
val logprob = (word_weight * features_logprob + prior_weight * prior_logprob)
logprob
}
}
/** Use a classifier (normally maxent) for comparing document and cell. */
abstract class ClassifierGridRanker[Co](
ranker_name: String,
grid: Grid[Co],
featvec_factory: CandidateFeatVecFactory[Co]
) extends PointwiseScoreGridRanker[Co](ranker_name, grid) {
// Only include the cells corresponding to those labels that the classifier
// knows, possibly plus the correct cell if required.
override def get_candidates(correct: Option[GridCell[Co]],
include_correct: Boolean) = {
val cands =
(0 until featvec_factory.featvec_factory.mapper.number_of_labels
).map { label => featvec_factory.index_to_cell(label) }
if (!include_correct || cands.find(_ == correct.get) != None)
cands
else
correct.get +: cands
}
/**
* Score a document by directly invoking the classifier, rather than
* by looking up a cache of scores, if such a cache exists.
*/
def score_doc_directly(doc: GridDoc[Co]): Iterable[(GridCell[Co], Double)]
}
/**
* A classifier where we can use the normal LinearClassifier mechanism,
* i.e. where we have the weights directly available and can cheaply score
* an individual cell of an individual document. */
class IndivClassifierGridRanker[Co](
ranker_name: String,
grid: Grid[Co],
classifier: LinearClassifier,
featvec_factory: CandidateFeatVecFactory[Co]
) extends ClassifierGridRanker[Co](ranker_name, grid, featvec_factory) {
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]) = {
val fv = featvec_factory(doc, cell, 0, 0, is_training = false)
// We might be asked about a cell outside of the set of candidates,
// especially when we have a limited set of possible candidates.
// See comment in VowpalWabbitGridRanker for more info.
featvec_factory.lookup_cell_if(cell) match {
case Some(label) => classifier.score_label(fv, label)
case None => Double.NegativeInfinity
}
}
def score_doc_directly(doc: GridDoc[Co]) =
return_ranked_cells(doc, None, include_correct = false)
}
/** Use a Vowpal Wabbit classifier for comparing document and cell. */
class VowpalWabbitGridRanker[Co](
ranker_name: String,
grid: Grid[Co],
classifier: VowpalWabbitBatchClassifier,
featvec_factory: DocFeatVecFactory[Co],
cost_sensitive: Boolean
) extends ClassifierGridRanker[Co](ranker_name, grid, featvec_factory) {
var doc_scores: Map[String, Array[Double]] = _
override def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]
) {
val docs = grid.docfact.document_statuses_to_documents(get_docstats())
doc_scores = score_test_docs(docs).toMap
}
// Score the test documents in `docs` by calling Vowpal Wabbit. Return
// an Iterable over tuples of document title and array of log-probabilities,
// with one probability per possible label. Multi-label classifiers are
// implemented in Vowpal Wabbit as a set of one-against-all binary
// classifiers, and the probabilities as directly returned by Vowpal Wabbit
// reflect the likelihood that a given label is correct compared with the
// others. In general, these probabilities won't be normalized. If
// `normalize` is true, normalize the probabilities so that they add up
// to one (although remember that we return log-probabilities, which
// naturally will not sum to one). Normalizing the probabilities creates a
// proper probability distribution but eliminates information on the
// absolute amount of compatibility with a given label. (E.g., if none of
// the labels match the test document very well, but one matches much
// better than the others, it will have a very high probability, although
// this may not mean much.).
//
// Experiments show that normalization works significantly better than not
// normalizing, so do it by default.
val normalize = !debug("vw-unnormalized")
//
// If `verbose` is true, output messages from Vowpal Wabbit as it runs.
def score_test_docs(docs: Iterator[GridDoc[Co]], verbose: Boolean = true
): Iterable[(String, Array[Double])] = {
val titles = mutable.Buffer[String]()
val verbosity = if (verbose) MsgNormal else MsgQuiet
val feature_file =
if (cost_sensitive) {
val labels_costs =
(0 until featvec_factory.featvec_factory.mapper.number_of_labels
).map { label => (label, 0.0) }
val training_data =
docs.map/*Metered(task)*/ { doc =>
val feats = featvec_factory.get_features(doc)
titles += doc.title
// It doesn't matter what we give as the correct costs here
(feats, labels_costs)
}
classifier.write_cost_sensitive_feature_file(training_data, verbosity)
} else {
val training_data =
docs.map/*Metered(task)*/ { doc =>
val feats = featvec_factory.get_features(doc)
titles += doc.title
// It doesn't matter what we give as the correct cell here
(feats, 0)
}
classifier.write_feature_file(training_data, verbosity)
}
val list_of_scores =
classifier(feature_file, verbosity).map { raw_label_scores =>
val label_scores =
if (cost_sensitive) {
// Raw scores for cost-sensitive appear to be costs, i.e.
// lower is better, so negate.
raw_label_scores.map { case (label, score) => (label, -score) }
} else {
// Convert to proper log-probs.
val indiv_probs = raw_label_scores map { case (label, score) =>
(label, 1/(1 + math.exp(-score)))
}
val norm_sum = if (normalize) indiv_probs.map(_._2).sum else 1.0
indiv_probs map { case (label, prob) =>
(label, math.log(prob/norm_sum))
}
}
val scores = label_scores.sortWith(_._1 < _._1).map(_._2)
assert_==(scores.size,
featvec_factory.featvec_factory.mapper.number_of_labels, "#labels",
s"For model ${classifier.model_filename}")
scores
}
assert_==(titles.size, list_of_scores.size, "#docs",
s"For model ${classifier.model_filename}")
titles zip list_of_scores
}
def score_cell(doc: GridDoc[Co], cell: GridCell[Co]) = {
val scores = doc_scores(doc.title)
// We might be asked about a cell outside of the set of candidates,
// especially when we have a limited set of possible candidates.
// Note that the set of labels that the classifier knows about may
// be less than the number in the 'candidates' param passed to
// create_classifier_ranker(), particularly in the non-cost-sensitive
// case, where the classifier only knows about labels corresponding
// to candidates with a non-zero number of training documents in them.
// (In the cost-sensitive case we have to supply costs for all labels
// for each training document, and we go ahead and convert all
// candidates to labels.)
featvec_factory.lookup_cell_if(cell) match {
case Some(label) => scores(label)
case None => Double.NegativeInfinity
}
}
/**
* Score a document by directly invoking the classifier (which requires
* spawning the VW app), rather than by looking up a cache of scores.
*/
def score_doc_directly(doc: GridDoc[Co]) = {
// Be quiet unless --verbose is given since we may be executing on
// large numbers of test docs.
val scores = score_test_docs(Iterator(doc),
verbose = grid.driver.params.verbose).head._2
val cands = get_candidates(None, include_correct = false)
assert_==(scores.size, cands.size, "#candidates")
cands zip scores
}
}
/** Use a hierarchical classifier for comparing document and
* cell. We work as follows:
*
* 1. Classify at the coarsest grid level, over all the cells in that grid.
* Take the top N for some beam size.
* 2. For each grid cell, classify among the subdividing cells at the next
* finer grid. This computes e.g. p(C2|C1) for cell C1 at the coarsest
* level and cell C2 at the next finer level. Compute e.g. p(C1,C2) =
* p(C1) * p(C2|C1); again take the top N.
* 3. Repeat till we reach the finest level.
*
* We need one classifier over all the non-empty cells at the coarsest level,
* then for each subsequent level except the finest, as many classifiers
* as there are non-empty cells on that level.
*
* @param ranker_name Identifying string, usually "classifier".
* @param grids List of successive hierarchical grids, from coarse to fine.
* @param coarse_ranker Ranker at coarsest level for all cells at
* that level.
* @param finer_rankers Sequence of maps, one per grid other than at the
* coarsest level, mapping a grid cell at a coarser level to a ranker
* over the subdivided cells of that cell at the next finer level.
* @param beam_size Number of top-ranked cells we keep from one level to
* the next.
* @param cost_sensitive Whether we are doing cost-sensitive classification.
*/
class HierarchicalClassifierGridRanker[Co](
ranker_name: String,
grids: Iterable[Grid[Co]],
coarse_ranker: ClassifierGridRanker[Co],
finer_rankers: Iterable[Map[GridCell[Co], ClassifierGridRanker[Co]]],
// training_docs_cells: Iterable[(GridDoc[Co], GridCell[Co])],
beam_size: Int
) extends SimpleGridRanker[Co](ranker_name, grids.last) {
val coarsest_grid = grids.head
// val finest_grid = grids.last
override def initialize(
get_docstats: () => Iterator[DocStatus[(RawDoc, GridDoc[Co])]]
) {
coarse_ranker.initialize(get_docstats)
}
def return_ranked_cells(doc: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean): Iterable[(GridCell[Co], Double)] = {
val do_gridrank =
debug("hier-gridrank") ||
debuglist_matches_alphanum("hier-gridrank", doc.title)
// First, we rank each cell at the coarsest level.
val raw_prev_scores =
for (cell <- coarsest_grid.iter_nonempty_cells) yield {
val score = coarse_ranker.score_cell(doc, cell)
(cell, score)
}
var prev_scores =
raw_prev_scores.toIndexedSeq.filter {
case (cell, score) => coarsest_grid.cell_fits_restriction(cell)
}.sortWith(_._2 > _._2)
if (do_gridrank)
coarsest_grid.output_ranking_data(s"${doc.title} (level 1)",
prev_scores, None, correct)
// Then, for each grid at the next finer level ...
for (((finer, rankers), level) <-
grids.tail zip finer_rankers zip Stream.from(2)) {
// First, reduce the cells at previous level that will be propagated to
// new level by the beam size
val beamed_prev_scores = prev_scores.take(beam_size)
// For each cell being considered ...
val new_scores = for (((old_cell, old_score), index) <-
beamed_prev_scores zip Stream.from(1)) yield {
// Find the corresponding ranker and run it
val ranker = rankers(old_cell)
val doc_ranked_scores =
ranker.score_doc_directly(doc).toIndexedSeq.filter {
case (cell, score) => finer.cell_fits_restriction(cell)
}.sortWith(_._2 > _._2)
if (do_gridrank) {
val docid = "%s (level %s, index %s, cell %s)" format (
doc.title, level, index, old_cell.format_location)
finer.output_ranking_data(docid, doc_ranked_scores, Some(old_cell),
correct)
}
// Fetch the top cell and corresponding log-probability
val (top_cell, top_score) = doc_ranked_scores.head
if (debug("hier-classifier")) {
errprint(s"Old cell: ${old_cell.format_coord(old_cell.get_central_point)} (old score $old_score)")
val mapper_doc_ranked_scores = doc_ranked_scores map {
case (cell, score) => (cell.format_coord(cell.get_central_point), score)
}
errprint(s"Doc ranked scores: $mapper_doc_ranked_scores")
errprint(s"Substituting: Top cell ${top_cell.format_coord(top_cell.get_central_point)}, top score $top_score, total score ${old_score + top_score}")
}
// Return top cell, accumulate log-probabilities across all levels
(top_cell, old_score + top_score)
}
// Send scored cells to next level, sorted so they can be reduced by
// the beam size
prev_scores = new_scores.sortWith(_._2 > _._2)
/*
The other way of doing hierarchical classification, constructing new
classifiers on the fly.
// Expand each cell to one or more cells at new level. Convert to set
// to remove duplicates, then back to indexed sequence.
val new_cells =
prev_cells.flatMap { finer.get_subdivided_cells(_) }.toSet.toIndexedSeq
val new_ranker = finer.driver.create_classifier_ranker(
ranker_name, finer, new_cells, training_docs_cells)
val new_scores =
if (finer == finest_grid)
new_ranker.evaluate(doc, correct, include_correct)
else
new_ranker.evaluate(doc, None, false)
prev_scores = new_scores.toIndexedSeq
*/
}
prev_scores
}
}
class AverageCellProbabilityGridRanker[Co](
ranker_name: String,
grid: Grid[Co]
) extends SimpleGridRanker[Co](ranker_name, grid) {
val cfact = new CellDistFactory[Co]
def return_ranked_cells(doc: GridDoc[Co], correct: Option[GridCell[Co]],
include_correct: Boolean) = {
val ranking = cfact.get_cell_dist_for_lang_model(grid, doc.grid_lm).
get_ranked_cells(correct, include_correct)
// If there are no words in the document, the list of cells will
// be empty. In that case, just return the cells in an arbitrary order
// (the order they appear in the hash table). This is similar to
// returning randomly but should hopefully give the same results each
// time, so the ACP results don't have any randomness in them.
if (!ranking.isEmpty)
ranking
else
for (cell <- grid.iter_nonempty_cells_including(correct, include_correct))
yield (cell, 0.0)
}
}
/////////////////////////////////////////////////////////////////////////////
// Segmentation //
/////////////////////////////////////////////////////////////////////////////
// General idea: Keep track of best possible segmentations up to a maximum
// number of segments. Either do it using a maximum number of segmentations
// (e.g. 100 or 1000) or all within a given factor of the best score (the
// "beam width", e.g. 10^-4). Then given the existing best segmentations,
// we search for new segmentations with more segments by looking at all
// possible ways of segmenting each of the existing best segments, and
// finding the best score for each of these. This is a slow process -- for
// each segmentation, we have to iterate over all segments, and for each
// segment we have to look at all possible ways of splitting it, and for
// each split we have to look at all assignments of cells to the two
// new segments. It also seems that we're likely to consider the same
// segmentation multiple times.
//
// In the case of per-word cell dists, we can maybe speed things up by
// computing the non-normalized distributions over each paragraph and then
// summing them up as necessary.
| utcompling/textgrounder | src/main/scala/opennlp/textgrounder/gridlocate/GridRanker.scala | Scala | apache-2.0 | 33,885 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtOptionalInteger}
import uk.gov.hmrc.ct.computations.HmrcAccountingPeriod
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.ct600.v3.calculations.CorporationTaxCalculator
// was B53
case class B380(value: Option[Int]) extends CtBoxIdentifier("Financial Year FY2") with CtOptionalInteger
object B380 extends CorporationTaxCalculator with Calculated[B380, ComputationsBoxRetriever] {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): B380 =
financialYear2(
HmrcAccountingPeriod(fieldValueRetriever.retrieveCP1(),fieldValueRetriever.retrieveCP2())
)
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B380.scala | Scala | apache-2.0 | 1,326 |
/* Copyright 2009-2011 Jay Conrod
*
* This file is part of Tungsten.
*
* Tungsten is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* Tungsten is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Tungsten. If not, see
* <http://www.gnu.org/licenses/>.
*/
package tungsten
import scala.collection.immutable.TreeMap
import org.junit.Test
import org.junit.Assert._
import java.io._
import Utilities._
import ModuleIO._
class ModuleIOTest // generate a classfile so Buildr doesn't rebuild this file unnecessarily
class ModuleIOReadTextTest {
@Test
def parseTest {
val program = "global unit @g"
val definitions = TreeMap(Symbol("g") -> Global("g", UnitType, None))
val expected = new Module(definitions=definitions)
assertEquals(Left(expected), parse(program, "<test>"))
}
}
class ModuleIOWriteBinaryCollectTest {
def makeWriter(program: String): BinaryModuleWriter = {
val module = readText(program)
val output = new DataOutputStream(new ByteArrayOutputStream)
new BinaryModuleWriter(module, output)
}
def testCollect[T](program: String,
getTable: BinaryModuleWriter => BinaryModuleWriter#Table[T],
value: T)
{
val writer = makeWriter(program)
writer.collect
val table = getTable(writer)
val index = table(value)
assertEquals(value, table.get(index))
}
@Test
def tableTest {
val writer = makeWriter("")
val table = writer.strings
table.add("test")
assertEquals(0, table("test"))
assertEquals("test", table.get(0))
}
@Test
def collectSymbolCollectsStrings {
val writer = makeWriter("")
val sym = symbolFromString("foo.bar")
writer.collectSymbol(sym)
assertEquals(sym.name(0), writer.strings.get(writer.strings(sym.name(0))))
}
@Test
def collectDefinitionNameStrings {
val program = "global unit @foo.bar"
testCollect(program, _.strings, "foo")
testCollect(program, _.strings, "bar")
}
@Test
def collectDefinitionNames {
val program = "global unit @foo.bar#32"
testCollect(program, _.symbols, symbolFromString("foo.bar#32"))
}
}
class ModuleIOWriteBinaryTest {
val module = new Module
val output = new ByteArrayOutputStream
val writer = new BinaryModuleWriter(module, new DataOutputStream(output))
def testOutput(expected: Any*) {
testOutput(expected.toList)
}
def testOutput(expected: List[Any]) {
val expectedOutput = new ByteArrayOutputStream
val stream = new DataOutputStream(expectedOutput)
expected.foreach {
case b: Byte => stream.writeByte(b)
case s: Short => stream.writeShort(s)
case i: Int => stream.writeInt(i)
case l: Long => stream.writeLong(l)
case f: Float => stream.writeFloat(f)
case d: Double => stream.writeDouble(d)
case s: String => stream.writeUTF(s)
}
val data = output.toByteArray
val expectedData = expectedOutput.toByteArray
assertArrayEquals(expectedData, data)
}
@Test
def testWriteOptionSome {
writer.writeOption(Some(12), writer.writeInt _)
testOutput(1.asInstanceOf[Byte], 12)
}
@Test
def testWriteOptionNone {
writer.writeOption(None, writer.writeInt _)
testOutput(0.asInstanceOf[Byte])
}
@Test
def testWriteList {
writer.writeList(List(1, 2, 3), writer.writeInt _)
testOutput(3, 1, 2, 3)
}
@Test
def testWriteString {
val s = "hello"
writer.writeString(s)
testOutput(s)
}
@Test
def testWriteSymbol {
writer.strings.add("foo")
writer.strings.add("bar")
writer.writeSymbol("foo.bar#32")
testOutput(2, writer.strings("foo"), writer.strings("bar"), 32)
}
}
class ModuleIOWriteTextTest {
val output = new StringWriter
val emptyModule = new Module(is64Bit=true)
val dummyWriter = new TextModuleWriter(emptyModule, output)
@Test
def values {
assertEquals("()", dummyWriter.localValue(UnitValue, None))
assertEquals("true", dummyWriter.localValue(BooleanValue(true), None))
assertEquals("false", dummyWriter.localValue(BooleanValue(false), None))
assertEquals("'c'", dummyWriter.localValue(CharValue('c'), None))
assertEquals("'\\\\000a'", dummyWriter.localValue(CharValue('\\n'), None))
assertEquals("\\"hello\\"", dummyWriter.localValue(StringValue("hello"), None))
assertEquals("\\"multi\\\\000aline\\"", dummyWriter.localValue(StringValue("multi\\nline"), None))
assertEquals("int32 12", dummyWriter.localValue(IntValue(12L, 32), None))
assertEquals("[1 x unit] {()}", dummyWriter.localValue(ArrayValue(UnitType, List(UnitValue)), None))
assertEquals("struct @A {()}", dummyWriter.localValue(StructValue("A", List(UnitValue)), None))
}
@Test
def types {
assertEquals("unit", dummyWriter.localType(UnitType, None))
assertEquals("boolean", dummyWriter.localType(BooleanType, None))
assertEquals("char", dummyWriter.localType(CharType, None))
assertEquals("string", dummyWriter.localType(StringType, None))
assertEquals("int32", dummyWriter.localType(IntType(32), None))
assertEquals("float32", dummyWriter.localType(FloatType(32), None))
assertEquals("unit*", dummyWriter.localType(PointerType(UnitType), None))
assertEquals("nulltype", dummyWriter.localType(NullType, None))
assertEquals("[2 x unit]", dummyWriter.localType(ArrayType(2L, UnitType), None))
assertEquals("struct @A", dummyWriter.localType(StructType("A"), None))
}
@Test
def localSymbol {
assertEquals("@a", dummyWriter.localSymbol("a", None).toString)
assertEquals("@b.c", dummyWriter.localSymbol("b.c", Some("a")).toString)
assertEquals("%b", dummyWriter.localSymbol("a.b", Some("a")).toString)
}
@Test
def localType {
val ty = StructType("B.A")
assertEquals("struct %A", dummyWriter.localType(ty, Some("B")))
}
@Test
def localValue {
val value = DefinedValue("b.a", StructType("A"))
assertEquals("struct @A %a", dummyWriter.localValue(value, Some("b")))
}
@Test
def writeChildren {
val children = List(1, 2, 3)
def writer(i: Int) = output.write(i.toString)
dummyWriter.writeChildren(children, writer, "(", ", ", ")")
assertEquals("(1, 2, 3)", output.toString)
}
}
| jayconrod/tungsten | core/src/test/scala/tungsten/ModuleIOTest.scala | Scala | gpl-2.0 | 6,685 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 Heiko Blobner
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package de.windelknecht.stup.utils.data.collection.bitSet
import de.windelknecht.stup.utils.tools.BitTwiddling
import scala.collection.immutable.BitSet
import scala.collection.immutable
object RichEnumBitValue {
/**
* Create new instance with this enum as underlying type
*
* @param enum this is the one
* @tparam E type info
* @return new instance
*/
def apply[E <: Enumeration](
enum: E
) = applyWithLen(enum, 0, BitTwiddling.getMostSignificantBit(enum.maxId).getOrElse(0) + 1)
/**
* Create new instance with this enum as underlying type (with len info as mask)
*
* @param enum this is the one
* @param shift shift the result
* @tparam E type info
* @return new instance
*/
def applyWithLen[E <: Enumeration](
enum: E,
shift: Int,
len: Int
) = applyWithMask(enum, shift, (1 << len) - 1)
/**
* Create new instance with this enum as underlying type (with given mask as mask)
*
* @param enum this is the one
* @param shift shift the result
* @param mask mask out these bits
* @tparam E type info
* @return new instance
*/
def applyWithMask[E <: Enumeration](
enum: E,
shift: Int,
mask: Int
) = new RichEnumBitValue[E](enum, _shift = shift, _mask = mask)
}
class RichEnumBitValue[E <: Enumeration](
_enum: E,
_shift: Int,
_mask: Int
) {
// fields
private var _value: Option[E#Value] = None
def mask = _mask
def shift = _shift
/**
* This method is used to clear out the all values.
*/
def clear() = _value = None
/**
* Returns true if the value is active
*/
def contains(value: E#Value): Boolean = _value.contains(value)
/**
* This method is used to clear out the given value.
*/
def -=(value: E#Value): this.type = {
if(contains(value))
_value = None
this
}
/**
* This method takes the enum and sets its value.
*/
def +=(value: E#Value): this.type = {
if(!maskOut(value))
_value = Some(value)
this
}
/**
* Add this value
*/
def add(value: E#Value) = this += value
/**
* Remove this value
*/
def remove(value: E#Value) = this -= value
/**
* Return our bit mask.
*/
def toBitMask: Array[Long] = toBitSet.toBitMask
/**
* Return our bit set.
*/
def toBitSet: immutable.BitSet = BitSet.fromBitMask(Array(shifted()))
/**
* Returns true if value should masked out
*/
private def maskOut(value: E#Value) = (value.id & _mask) == 0
/**
* Shift enum to usable var
*/
private def shifted(): Long = _value match {
case Some(x) => x.id << _shift
case None => 0
}
}
| windelknecht/stup-utils | src/main/scala/de/windelknecht/stup/utils/data/collection/bitSet/RichEnumBitValue.scala | Scala | mit | 3,785 |
package com.softwaremill.session
import org.json4s.JValue
import org.scalatest.{Matchers, FlatSpec}
class SessionManagerJwtEncoderTest extends FlatSpec with Matchers {
val defaultConfig = SessionConfig.default("1234567890123456789012345678901234567890123456789012345678901234567890")
val configMaxAge = defaultConfig.copy(sessionMaxAgeSeconds = Some(3600))
val configEncrypted = defaultConfig.copy(sessionEncryptData = true)
val configEncryptedMaxAge = configMaxAge.copy(sessionEncryptData = true)
case class TestData[T](
name: String,
data: T,
config: SessionConfig,
sessionSerializer: SessionSerializer[T, JValue]
)
import JValueSessionSerializer._
val tests = List(
TestData("string, default config", "username", defaultConfig, implicitly[SessionSerializer[String, JValue]]),
TestData("string, with max age", "username", configMaxAge, implicitly[SessionSerializer[String, JValue]]),
TestData("string, with encryption", "username", configEncrypted, implicitly[SessionSerializer[String, JValue]]),
TestData("string, with max age and encryption", "username", configEncryptedMaxAge, implicitly[SessionSerializer[String, JValue]]),
TestData("integer, default config", 12345, defaultConfig, implicitly[SessionSerializer[Int, JValue]]),
TestData("case class, default config", SessionData("john", 10), defaultConfig, JValueSessionSerializer.caseClass[SessionData]),
TestData("case class, with max age and encryption", SessionData("john", 20), configEncryptedMaxAge, JValueSessionSerializer.caseClass[SessionData])
)
tests.foreach { td =>
it should s"encode+decode for ${td.name}" in {
runTest(td)
}
}
def runTest[T](td: TestData[T]): Unit = {
implicit val ss = td.sessionSerializer
implicit val encoder = new JwtSessionEncoder[T]
val manager = new SessionManager(td.config).clientSessionManager
manager.decode(manager.encode(td.data)) should be (SessionResult.Decoded(td.data))
}
it should "encode correctly in the JWT format" in {
implicit val ss = JValueSessionSerializer.caseClass[SessionData]
implicit val encoder = new JwtSessionEncoder[SessionData]
val encoded = encoder.encode(SessionData("john", 30), 1447416197071L, defaultConfig)
println(s"Test on: http://jwt.io/#debugger:\\n$encoded")
encoded.count(_ == '.') should be (2)
}
it should "not decode an expired session" in {
implicit val ss = JValueSessionSerializer.caseClass[SessionData]
implicit val encoder = new JwtSessionEncoder[SessionData]
val managerHour1 = new SessionManager(configMaxAge) {
override def nowMillis = 1447416197071L
}.clientSessionManager
val managerHour3 = new SessionManager(configMaxAge) {
override def nowMillis = 1447416197071L + 1000L * 60 * 60 * 3
}.clientSessionManager
managerHour3.decode(managerHour1.encode(SessionData("john", 40))) should be (SessionResult.Expired)
}
it should "decode a token with 'Bearer' prefix" in {
implicit val ss = JValueSessionSerializer.caseClass[SessionData]
implicit val encoder = new JwtSessionEncoder[SessionData]
val manager = new SessionManager(defaultConfig).clientSessionManager
val data = SessionData("john", 50)
manager.decode("Bearer " + manager.encode(data)) should be (SessionResult.Decoded(data))
}
}
case class SessionData(userName: String, userId: Int) | ilyai/akka-http-session | jwt/src/test/scala/com/softwaremill/session/SessionManagerJwtEncoderTest.scala | Scala | apache-2.0 | 3,393 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler.broadcast
import scala.reflect.ClassTag
import org.apache.spark.broadcast.Broadcast
class MockBroadcast[T: ClassTag](id: Int, value: T) extends Broadcast[T](id) {
override protected def getValue(): T = value
override protected def doUnpersist(blocking: Boolean): Unit = {}
override protected def doDestroy(blocking: Boolean): Unit = {}
}
| asakusafw/asakusafw-spark | compiler/src/test/scala/com/asakusafw/spark/compiler/broadcast/MockBroadcast.scala | Scala | apache-2.0 | 999 |
package ch.descabato.core.actors
import akka.actor.ActorSystem
import ch.descabato.core.config.BackupFolderConfiguration
import ch.descabato.core.util.FileManager
import scala.concurrent.ExecutionContext
class BackupContext(val config: BackupFolderConfiguration,
val actorSystem: ActorSystem,
val fileManager: FileManager,
implicit val executionContext: ExecutionContext,
val eventBus: MyEventBus) {
}
| Stivo/DeScaBaTo | core/src/main/scala/ch/descabato/core/actors/BackupContext.scala | Scala | gpl-3.0 | 487 |
package debop4s.web.scalatra.controller
import debop4s.web.scalatra.annotations.ServletPath
import debop4s.web.scalatra.scalate.ScalatraWebStack
import org.scalatra.{AsyncResult, FutureSupport}
import org.slf4j.LoggerFactory
import org.springframework.stereotype.Component
import scala.concurrent._
/**
* IndexServlet
* @author [email protected]
*/
@ServletPath("/")
@Component
class IndexServlet extends ScalatraWebStack with FutureSupport {
private val log = LoggerFactory.getLogger(getClass)
implicit override protected def executor: ExecutionContextExecutor = ExecutionContext.Implicits.global
before() {
contentType = "text/html"
log.debug(s"before...")
}
after() {
log.debug(s"after...")
}
get("/") {
<h1>Scalatra Web Application</h1>
}
get("/async") {
new AsyncResult() {
override val is = Future {<h1>Scalatra Async Response</h1>}
}
}
get("/index") {
new AsyncResult() {
override val is = Future {
ssp("index")
}
}
}
}
| debop/debop4s | debop4s-web-scalatra/src/test/scala/debop4s/web/scalatra/controller/IndexServlet.scala | Scala | apache-2.0 | 1,027 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.expressions.utils
import java.util.Collections
import org.apache.calcite.plan.hep.{HepPlanner, HepProgramBuilder}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.logical.LogicalCalc
import org.apache.calcite.rel.rules._
import org.apache.calcite.rex.RexNode
import org.apache.calcite.sql.`type`.SqlTypeName.VARCHAR
import org.apache.flink.api.common.TaskInfo
import org.apache.flink.api.common.functions.util.RuntimeUDFContext
import org.apache.flink.api.common.functions.{MapFunction, RichFunction, RichMapFunction}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl
import org.apache.flink.table.api.{EnvironmentSettings, TableConfig}
import org.apache.flink.table.data.RowData
import org.apache.flink.table.data.binary.BinaryRowData
import org.apache.flink.table.data.conversion.{DataStructureConverter, DataStructureConverters}
import org.apache.flink.table.data.util.DataFormatConverters
import org.apache.flink.table.data.util.DataFormatConverters.DataFormatConverter
import org.apache.flink.table.expressions.{Expression, ExpressionParser}
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, ExprCodeGenerator, FunctionCodeGenerator}
import org.apache.flink.table.planner.delegation.PlannerBase
import org.apache.flink.table.runtime.types.TypeInfoLogicalTypeConverter.fromTypeInfoToLogicalType
import org.apache.flink.table.types.AbstractDataType
import org.apache.flink.table.types.logical.{RowType, VarCharType}
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.types.Row
import org.junit.Assert.{assertEquals, fail}
import org.junit.rules.ExpectedException
import org.junit.{After, Before, Rule}
import scala.collection.mutable
import scala.collection.JavaConverters._
abstract class ExpressionTestBase {
val config = new TableConfig()
// (originalExpr, optimizedExpr, expectedResult)
private val testExprs = mutable.ArrayBuffer[(String, RexNode, String)]()
private val env = StreamExecutionEnvironment.createLocalEnvironment(4)
private val setting = EnvironmentSettings.newInstance().inStreamingMode().build()
// use impl class instead of interface class to avoid
// "Static methods in interface require -target:jvm-1.8"
private val tEnv = StreamTableEnvironmentImpl.create(env, setting, config)
.asInstanceOf[StreamTableEnvironmentImpl]
private val resolvedDataType = if (containsLegacyTypes) {
TypeConversions.fromLegacyInfoToDataType(typeInfo)
} else {
tEnv.getCatalogManager.getDataTypeFactory.createDataType(testDataType)
}
private val planner = tEnv.getPlanner.asInstanceOf[PlannerBase]
private val relBuilder = planner.getRelBuilder
private val calcitePlanner = planner.createFlinkPlanner
private val parser = planner.plannerContext.createCalciteParser()
// setup test utils
private val tableName = "testTable"
protected val nullable = "null"
protected val notNullable = "not null"
// used for accurate exception information checking.
val expectedException: ExpectedException = ExpectedException.none()
@Rule
def thrown: ExpectedException = expectedException
@Before
def prepare(): Unit = {
if (containsLegacyTypes) {
val ds = env.fromCollection(Collections.emptyList[Row](), typeInfo)
tEnv.createTemporaryView(tableName, ds)
functions.foreach(f => tEnv.registerFunction(f._1, f._2))
} else {
tEnv.createTemporaryView(tableName, tEnv.fromValues(resolvedDataType))
testSystemFunctions.asScala.foreach(e => tEnv.createTemporarySystemFunction(e._1, e._2))
}
// prepare RelBuilder
relBuilder.scan(tableName)
// reset test exprs
testExprs.clear()
}
@After
def evaluateExprs(): Unit = {
val ctx = CodeGeneratorContext(config)
val inputType = if (containsLegacyTypes) {
fromTypeInfoToLogicalType(typeInfo)
} else {
resolvedDataType.getLogicalType
}
val exprGenerator = new ExprCodeGenerator(ctx, nullableInput = false).bindInput(inputType)
// cast expressions to String
val stringTestExprs = testExprs.map(expr => relBuilder.cast(expr._2, VARCHAR))
// generate code
val resultType = RowType.of(Seq.fill(testExprs.size)(
new VarCharType(VarCharType.MAX_LENGTH)): _*)
val exprs = stringTestExprs.map(exprGenerator.generateExpression)
val genExpr = exprGenerator.generateResultExpression(exprs, resultType, classOf[BinaryRowData])
val bodyCode =
s"""
|${genExpr.code}
|return ${genExpr.resultTerm};
""".stripMargin
val genFunc = FunctionCodeGenerator.generateFunction[MapFunction[RowData, BinaryRowData]](
ctx,
"TestFunction",
classOf[MapFunction[RowData, BinaryRowData]],
bodyCode,
resultType,
inputType)
val mapper = genFunc.newInstance(getClass.getClassLoader)
val isRichFunction = mapper.isInstanceOf[RichFunction]
// call setRuntimeContext method and open method for RichFunction
if (isRichFunction) {
val richMapper = mapper.asInstanceOf[RichMapFunction[_, _]]
val t = new RuntimeUDFContext(
new TaskInfo("ExpressionTest", 1, 0, 1, 1),
null,
env.getConfig,
Collections.emptyMap(),
Collections.emptyMap(),
null)
richMapper.setRuntimeContext(t)
richMapper.open(new Configuration())
}
val testRow = if (containsLegacyTypes) {
val converter = DataFormatConverters
.getConverterForDataType(resolvedDataType)
.asInstanceOf[DataFormatConverter[RowData, Row]]
converter.toInternal(testData)
} else {
val converter = DataStructureConverters
.getConverter(resolvedDataType)
.asInstanceOf[DataStructureConverter[RowData, Row]]
converter.toInternalOrNull(testData)
}
val result = mapper.map(testRow)
// call close method for RichFunction
if (isRichFunction) {
mapper.asInstanceOf[RichMapFunction[_, _]].close()
}
// compare
testExprs
.zipWithIndex
.foreach {
case ((originalExpr, optimizedExpr, expected), index) =>
// adapt string result
val actual = if(!result.asInstanceOf[BinaryRowData].isNullAt(index)) {
result.asInstanceOf[BinaryRowData].getString(index).toString
} else {
null
}
val original = if (originalExpr == null) "" else s"for: [$originalExpr]"
assertEquals(
s"Wrong result $original optimized to: [$optimizedExpr]",
expected,
if (actual == null) "null" else actual)
}
}
private def addSqlTestExpr(sqlExpr: String, expected: String): Unit = {
// create RelNode from SQL expression
val parsed = parser.parse(s"SELECT $sqlExpr FROM $tableName")
val validated = calcitePlanner.validate(parsed)
val converted = calcitePlanner.rel(validated).rel
addTestExpr(converted, expected, sqlExpr)
}
private def addTestExpr(relNode: RelNode, expected: String, summaryString: String): Unit = {
val builder = new HepProgramBuilder()
builder.addRuleInstance(ProjectToCalcRule.INSTANCE)
val hep = new HepPlanner(builder.build())
hep.setRoot(relNode)
val optimized = hep.findBestExp()
// throw exception if plan contains more than a calc
if (!optimized.getInput(0).getInputs.isEmpty) {
fail("Expression is converted into more than a Calc operation. Use a different test method.")
}
testExprs += ((summaryString, extractRexNode(optimized), expected))
}
private def extractRexNode(node: RelNode): RexNode = {
val calcProgram = node
.asInstanceOf[LogicalCalc]
.getProgram
calcProgram.expandLocalRef(calcProgram.getProjectList.get(0))
}
def testAllApis(
expr: Expression,
sqlExpr: String,
expected: String): Unit = {
addTableApiTestExpr(expr, expected)
addSqlTestExpr(sqlExpr, expected)
}
def testTableApi(
expr: Expression,
expected: String): Unit = {
addTableApiTestExpr(expr, expected)
}
private def addTableApiTestExpr(tableApiString: String, expected: String): Unit = {
addTableApiTestExpr(ExpressionParser.parseExpression(tableApiString), expected)
}
private def addTableApiTestExpr(tableApiExpr: Expression, expected: String): Unit = {
// create RelNode from Table API expression
val relNode = relBuilder
.queryOperation(tEnv.from(tableName).select(tableApiExpr).getQueryOperation).build()
addTestExpr(relNode, expected, tableApiExpr.asSummaryString())
}
def testSqlApi(
sqlExpr: String,
expected: String): Unit = {
addSqlTestExpr(sqlExpr, expected)
}
def testData: Row
def testDataType: AbstractDataType[_] =
throw new IllegalArgumentException("Implement this if no legacy types are expected.")
def testSystemFunctions: java.util.Map[String, ScalarFunction] = Collections.emptyMap();
// ----------------------------------------------------------------------------------------------
// Legacy type system
// ----------------------------------------------------------------------------------------------
def containsLegacyTypes: Boolean = true
@deprecated
def functions: Map[String, ScalarFunction] = Map()
@deprecated
def typeInfo: RowTypeInfo =
throw new IllegalArgumentException("Implement this if legacy types are expected.")
@deprecated
def testAllApis(
expr: Expression,
exprString: String,
sqlExpr: String,
expected: String): Unit = {
addTableApiTestExpr(expr, expected)
addTableApiTestExpr(exprString, expected)
addSqlTestExpr(sqlExpr, expected)
}
@deprecated
def testTableApi(
expr: Expression,
exprString: String,
expected: String): Unit = {
addTableApiTestExpr(expr, expected)
addTableApiTestExpr(exprString, expected)
}
}
| jinglining/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/expressions/utils/ExpressionTestBase.scala | Scala | apache-2.0 | 10,968 |
package guru.nidi.geo.tiff
import java.io.{File, FileInputStream, InputStream}
import scala.annotation.tailrec
import scala.collection.mutable
/**
*
*/
//TODO replace with geotools
object GeoTiffReader {
def read(file: File): GeoTiff = {
read(new FileInputStream(file), file.length().toInt)
}
def read(input: InputStream, size: Int): GeoTiff = {
val res = new GeoTiffReader(input, size).geoTiff
res
}
}
private class GeoTiffReader(input: InputStream, size: Int) {
// val in = new EndianAwareRandomAccessFile(new RandomAccessFileLike(new RandomAccessFile(file, "r")))
val a = System.currentTimeMillis()
val in = new EndianAwareRandomAccessFile(new BufferedFileLike(input, size))
val b = System.currentTimeMillis()
var width = 0
var height = 0
var bitsPerSample = 0
val bytesPerPixel = 2
val info = mutable.Map[String, String]()
var stripOffsets = Array[Int]()
var rowsPerStrip = 0
var stripByteCounts = Array[Int]()
var xResolution = 0.0
var yResolution = 0.0
var resolutionUnit = 2
var modelPixelScale = Array[Double](1, 1, 1)
var modelTiepoints = Array[Double]()
var geoKeyDirectory = Array[Int]()
var strips: Array[Array[Byte]] = null
val geoTiff = {
val endian = in.readShort()
if (endian == 0x4949) in.setLittleEndian()
else if (endian == 0x4d4d) in.setBigEndian()
else throw new IllegalArgumentException(s"Invalid endian $endian")
val magic = in.readShort()
if (magic != 42) throw new IllegalArgumentException(s"Invalid magic number $magic")
readDirectory()
in.close()
new GeoTiff(this, width, height, bitsPerSample, info,
xResolution, yResolution, resolutionUnit,
modelPixelScale, modelTiepoints, geoKeyDirectory)
}
val c = System.currentTimeMillis()
println("tiff load " + (b - a) + ";" + (c - b))
@tailrec
private def readDirectory(): Unit = {
val pos = in.readInt()
if (pos != 0) {
in.seek(pos)
val len = in.readShort()
for (i <- 0 until len) {
readEntry()
}
in.doAt(0) {
readStrips()
}
readDirectory()
}
}
private def readEntry() {
val tag = in.readShort()
val typ = in.readShort()
val len = in.readInt()
val pos = in.readInt()
def readScalar(): Int = {
if (typ != 1 && typ != 3 && typ != 4) throw new UnsupportedOperationException(s"Unknown type $typ")
pos
}
def readString(): String = {
if (typ != 2) throw new UnsupportedOperationException(s"Unknown type $typ")
in.doAt(pos) {
in.readAscii(len - 1)
}
}
def readFrac(): Double = {
if (typ != 5) throw new UnsupportedOperationException(s"Unknown type $typ")
in.doAt(pos) {
in.readInt().toDouble / in.readInt()
}
}
def readDoubles(): Array[Double] = {
if (typ != 12) throw new UnsupportedOperationException(s"Unknown type $typ")
in.doAt(pos) {
val res = new Array[Double](len)
for (i <- 0 until len) res(i) = in.readDouble()
res
}
}
def readScalars(): Array[Int] = {
if (typ != 1 && typ != 3 && typ != 4) throw new UnsupportedOperationException(s"Unknown type $typ")
in.doAt(pos) {
val res = new Array[Int](len)
for (i <- 0 until len) res(i) = typ match {
case 3 => in.readShort()
case 4 => in.readInt()
}
res
}
}
tag match {
case 256 => width = readScalar()
case 257 => height = readScalar()
case 258 => bitsPerSample = readScalar()
case 259 => if (readScalar() != 1) throw new UnsupportedOperationException(s"Unknown compression")
case 262 => //ignore photometric interpretation
case 266 => if (readScalar() != 1) throw new UnsupportedOperationException(s"Unknown fill order")
case 269 => info.put("documentName", readString())
case 270 => info.put("imageDescription", readString())
case 273 => stripOffsets = readScalars()
case 274 => //ignore orientation
case 277 => if (readScalar() != 1) throw new UnsupportedOperationException(s"Unknown samples per pixel")
case 278 => rowsPerStrip = readScalar()
case 279 => stripByteCounts = readScalars()
case 282 => xResolution = readFrac()
case 283 => yResolution = readFrac()
case 284 => if (readScalar() != 1) throw new UnsupportedOperationException(s"Unknown planar configuration")
case 296 => resolutionUnit = readScalar()
case 305 => info.put("software", readString())
case 306 => info.put("dateTime", readString())
case 339 => if (readScalar() != 2) throw new UnsupportedOperationException(s"Unknown sample format")
case -31986 => modelPixelScale = readDoubles()
case -31614 => modelTiepoints = readDoubles()
case -30801 => geoKeyDirectory = readScalars()
}
}
private def readStrips() = {
val stripCount = height / rowsPerStrip
strips = new Array[Array[Byte]](stripCount)
for (s <- 0 until stripCount) {
in.seek(stripOffsets(s))
strips(s) = in.read(rowsPerStrip * width * bytesPerPixel)
}
}
def getPixel(x: Int, y: Int): Short = {
val s = y / rowsPerStrip
val pos = (x + (y % rowsPerStrip) * width) * bytesPerPixel
in.readShort(strips(s), pos)
}
def doWithPixels(y: Int, work: (Int, Short) => Unit) = {
val s = y / rowsPerStrip
val pos = (y % rowsPerStrip) * width * bytesPerPixel
var x = 0
while (x < width) {
work(x, in.readShort(strips(s), pos + x * bytesPerPixel))
x += 1
}
}
}
| nidi3/mineedit | geo/src/main/scala/guru/nidi/geo/tiff/GeoTiffReader.scala | Scala | apache-2.0 | 5,573 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.s2graph.core.Integrate
import org.apache.s2graph.core.utils.logger
import org.scalatest.BeforeAndAfterEach
import play.api.libs.json.{JsNumber, JsValue, Json}
class QueryTest extends IntegrateCommon with BeforeAndAfterEach {
import TestUtil._
val insert = "insert"
val e = "e"
val weight = "weight"
val is_hidden = "is_hidden"
test("interval") {
def queryWithInterval(id: Int, index: String, prop: String, fromVal: Int, toVal: Int) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "$testServiceName",
"columnName": "$testColumnName",
"id": $id
}],
"steps": [
[ {
"label": "$testLabelName",
"index": "$index",
"interval": {
"from": [ { "$prop": $fromVal } ],
"to": [ { "$prop": $toVal } ]
}
}
]]
}
""")
var edges = getEdgesSync(queryWithInterval(0, index2, "_timestamp", 1000, 1001)) // test interval on timestamp index
(edges \\ "size").toString should be("1")
edges = getEdgesSync(queryWithInterval(0, index2, "_timestamp", 1000, 2000)) // test interval on timestamp index
(edges \\ "size").toString should be("2")
edges = getEdgesSync(queryWithInterval(2, index1, "weight", 10, 11)) // test interval on weight index
(edges \\ "size").toString should be("1")
edges = getEdgesSync(queryWithInterval(2, index1, "weight", 10, 20)) // test interval on weight index
(edges \\ "size").toString should be("2")
}
test("get edge with where condition") {
def queryWhere(id: Int, where: String) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "${testServiceName}",
"columnName": "${testColumnName}",
"id": ${id}
}],
"steps": [
[ {
"label": "${testLabelName}",
"direction": "out",
"offset": 0,
"limit": 100,
"where": "${where}"
}
]]
}""")
var result = getEdgesSync(queryWhere(0, "is_hidden=false and _from in (-1, 0)"))
(result \\ "results").as[List[JsValue]].size should be(1)
result = getEdgesSync(queryWhere(0, "is_hidden=true and _to in (1)"))
(result \\ "results").as[List[JsValue]].size should be(1)
result = getEdgesSync(queryWhere(0, "_from=0"))
(result \\ "results").as[List[JsValue]].size should be(2)
result = getEdgesSync(queryWhere(2, "_from=2 or weight in (-1)"))
(result \\ "results").as[List[JsValue]].size should be(2)
result = getEdgesSync(queryWhere(2, "_from=2 and weight in (10, 20)"))
(result \\ "results").as[List[JsValue]].size should be(2)
}
test("get edge exclude") {
def queryExclude(id: Int) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "${testServiceName}",
"columnName": "${testColumnName}",
"id": ${id}
}],
"steps": [
[ {
"label": "${testLabelName}",
"direction": "out",
"offset": 0,
"limit": 2
},
{
"label": "${testLabelName}",
"direction": "in",
"offset": 0,
"limit": 2,
"exclude": true
}
]]
}""")
val result = getEdgesSync(queryExclude(0))
(result \\ "results").as[List[JsValue]].size should be(1)
}
test("get edge groupBy property") {
def queryGroupBy(id: Int, props: Seq[String]): JsValue = {
Json.obj(
"groupBy" -> props,
"srcVertices" -> Json.arr(
Json.obj("serviceName" -> testServiceName, "columnName" -> testColumnName, "id" -> id)
),
"steps" -> Json.arr(
Json.obj(
"step" -> Json.arr(
Json.obj(
"label" -> testLabelName
)
)
)
)
)
}
val result = getEdgesSync(queryGroupBy(0, Seq("weight")))
(result \\ "size").as[Int] should be(2)
val weights = (result \\ "results" \\\\ "groupBy").map { js =>
(js \\ "weight").as[Int]
}
weights should contain(30)
weights should contain(40)
weights should not contain (10)
}
test("edge transform") {
def queryTransform(id: Int, transforms: String) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "${testServiceName}",
"columnName": "${testColumnName}",
"id": ${id}
}],
"steps": [
[ {
"label": "${testLabelName}",
"direction": "out",
"offset": 0,
"transform": $transforms
}
]]
}""")
var result = getEdgesSync(queryTransform(0, "[[\\"_to\\"]]"))
(result \\ "results").as[List[JsValue]].size should be(2)
result = getEdgesSync(queryTransform(0, "[[\\"weight\\"]]"))
(result \\ "results" \\\\ "to").map(_.toString).sorted should be((result \\ "results" \\\\ "weight").map(_.toString).sorted)
result = getEdgesSync(queryTransform(0, "[[\\"_from\\"]]"))
(result \\ "results" \\\\ "to").map(_.toString).sorted should be((result \\ "results" \\\\ "from").map(_.toString).sorted)
}
test("index") {
def queryIndex(ids: Seq[Int], indexName: String) = {
val $from = Json.arr(
Json.obj("serviceName" -> testServiceName,
"columnName" -> testColumnName,
"ids" -> ids))
val $step = Json.arr(Json.obj("label" -> testLabelName, "index" -> indexName))
val $steps = Json.arr(Json.obj("step" -> $step))
val js = Json.obj("withScore" -> false, "srcVertices" -> $from, "steps" -> $steps)
js
}
// weight order
var result = getEdgesSync(queryIndex(Seq(0), "idx_1"))
((result \\ "results").as[List[JsValue]].head \\\\ "weight").head should be(JsNumber(40))
// timestamp order
result = getEdgesSync(queryIndex(Seq(0), "idx_2"))
((result \\ "results").as[List[JsValue]].head \\\\ "weight").head should be(JsNumber(30))
}
// "checkEdges" in {
// running(FakeApplication()) {
// val json = Json.parse( s"""
// [{"from": 0, "to": 1, "label": "$testLabelName"},
// {"from": 0, "to": 2, "label": "$testLabelName"}]
// """)
//
// def checkEdges(queryJson: JsValue): JsValue = {
// val ret = route(FakeRequest(POST, "/graphs/checkEdges").withJsonBody(queryJson)).get
// contentAsJson(ret)
// }
//
// val res = checkEdges(json)
// val typeRes = res.isInstanceOf[JsArray]
// typeRes must equalTo(true)
//
// val fst = res.as[Seq[JsValue]].head \\ "to"
// fst.as[Int] must equalTo(1)
//
// val snd = res.as[Seq[JsValue]].last \\ "to"
// snd.as[Int] must equalTo(2)
// }
// }
test("duration") {
def queryDuration(ids: Seq[Int], from: Int, to: Int) = {
val $from = Json.arr(
Json.obj("serviceName" -> testServiceName,
"columnName" -> testColumnName,
"ids" -> ids))
val $step = Json.arr(Json.obj(
"label" -> testLabelName, "direction" -> "out", "offset" -> 0, "limit" -> 100,
"duration" -> Json.obj("from" -> from, "to" -> to)))
val $steps = Json.arr(Json.obj("step" -> $step))
Json.obj("srcVertices" -> $from, "steps" -> $steps)
}
// get all
var result = getEdgesSync(queryDuration(Seq(0, 2), from = 0, to = 5000))
(result \\ "results").as[List[JsValue]].size should be(4)
// inclusive, exclusive
result = getEdgesSync(queryDuration(Seq(0, 2), from = 1000, to = 4000))
(result \\ "results").as[List[JsValue]].size should be(3)
result = getEdgesSync(queryDuration(Seq(0, 2), from = 1000, to = 2000))
(result \\ "results").as[List[JsValue]].size should be(1)
val bulkEdges = Seq(
toEdge(1001, insert, e, 0, 1, testLabelName, Json.obj(weight -> 10, is_hidden -> true)),
toEdge(2002, insert, e, 0, 2, testLabelName, Json.obj(weight -> 20, is_hidden -> false)),
toEdge(3003, insert, e, 2, 0, testLabelName, Json.obj(weight -> 30)),
toEdge(4004, insert, e, 2, 1, testLabelName, Json.obj(weight -> 40))
)
insertEdgesSync(bulkEdges: _*)
// duration test after udpate
// get all
result = getEdgesSync(queryDuration(Seq(0, 2), from = 0, to = 5000))
(result \\ "results").as[List[JsValue]].size should be(4)
// inclusive, exclusive
result = getEdgesSync(queryDuration(Seq(0, 2), from = 1000, to = 4000))
(result \\ "results").as[List[JsValue]].size should be(3)
result = getEdgesSync(queryDuration(Seq(0, 2), from = 1000, to = 2000))
(result \\ "results").as[List[JsValue]].size should be(1)
}
test("return tree") {
def queryParents(id: Long) = Json.parse(
s"""
{
"returnTree": true,
"srcVertices": [
{ "serviceName": "$testServiceName",
"columnName": "$testColumnName",
"id": $id
}],
"steps": [
[ {
"label": "$testLabelName",
"direction": "out",
"offset": 0,
"limit": 2
}
],[{
"label": "$testLabelName",
"direction": "in",
"offset": 0,
"limit": 10
}
]]
}""".stripMargin)
val src = 100
val tgt = 200
insertEdgesSync(toEdge(1001, "insert", "e", src, tgt, testLabelName))
val result = TestUtil.getEdgesSync(queryParents(src))
val parents = (result \\ "results").as[Seq[JsValue]]
val ret = parents.forall {
edge => (edge \\ "parents").as[Seq[JsValue]].size == 1
}
ret should be(true)
}
// test("pagination and _to") {
// def querySingleWithTo(id: Int, offset: Int = 0, limit: Int = 100, to: Int) = Json.parse(
// s"""
// { "srcVertices": [
// { "serviceName": "${testServiceName}",
// "columnName": "${testColumnName}",
// "id": ${id}
// }],
// "steps": [
// [ {
// "label": "${testLabelName}",
// "direction": "out",
// "offset": $offset,
// "limit": $limit,
// "_to": $to
// }
// ]]
// }
// """)
//
// val src = System.currentTimeMillis().toInt
//
// val bulkEdges = Seq(
// toEdge(1001, insert, e, src, 1, testLabelName, Json.obj(weight -> 10, is_hidden -> true)),
// toEdge(2002, insert, e, src, 2, testLabelName, Json.obj(weight -> 20, is_hidden -> false)),
// toEdge(3003, insert, e, src, 3, testLabelName, Json.obj(weight -> 30)),
// toEdge(4004, insert, e, src, 4, testLabelName, Json.obj(weight -> 40))
// )
// insertEdgesSync(bulkEdges: _*)
//
// var result = getEdgesSync(querySingle(src, offset = 0, limit = 2))
// var edges = (result \\ "results").as[List[JsValue]]
//
// edges.size should be(2)
// (edges(0) \\ "to").as[Long] should be(4)
// (edges(1) \\ "to").as[Long] should be(3)
//
// result = getEdgesSync(querySingle(src, offset = 1, limit = 2))
//
// edges = (result \\ "results").as[List[JsValue]]
// edges.size should be(2)
// (edges(0) \\ "to").as[Long] should be(3)
// (edges(1) \\ "to").as[Long] should be(2)
//
// result = getEdgesSync(querySingleWithTo(src, offset = 0, limit = -1, to = 1))
// edges = (result \\ "results").as[List[JsValue]]
// edges.size should be(1)
// }
test("order by") {
def queryScore(id: Int, scoring: Map[String, Int]): JsValue = Json.obj(
"srcVertices" -> Json.arr(
Json.obj(
"serviceName" -> testServiceName,
"columnName" -> testColumnName,
"id" -> id
)
),
"steps" -> Json.arr(
Json.obj(
"step" -> Json.arr(
Json.obj(
"label" -> testLabelName,
"scoring" -> scoring
)
)
)
)
)
def queryOrderBy(id: Int, scoring: Map[String, Int], props: Seq[Map[String, String]]): JsValue = Json.obj(
"orderBy" -> props,
"srcVertices" -> Json.arr(
Json.obj("serviceName" -> testServiceName, "columnName" -> testColumnName, "id" -> id)
),
"steps" -> Json.arr(
Json.obj(
"step" -> Json.arr(
Json.obj(
"label" -> testLabelName,
"scoring" -> scoring
)
)
)
)
)
val bulkEdges = Seq(
toEdge(1001, insert, e, 0, 1, testLabelName, Json.obj(weight -> 10, is_hidden -> true)),
toEdge(2002, insert, e, 0, 2, testLabelName, Json.obj(weight -> 20, is_hidden -> false)),
toEdge(3003, insert, e, 2, 0, testLabelName, Json.obj(weight -> 30)),
toEdge(4004, insert, e, 2, 1, testLabelName, Json.obj(weight -> 40))
)
insertEdgesSync(bulkEdges: _*)
// get edges
val edges = getEdgesSync(queryScore(0, Map("weight" -> 1)))
val orderByScore = getEdgesSync(queryOrderBy(0, Map("weight" -> 1), Seq(Map("score" -> "DESC", "timestamp" -> "DESC"))))
val ascOrderByScore = getEdgesSync(queryOrderBy(0, Map("weight" -> 1), Seq(Map("score" -> "ASC", "timestamp" -> "DESC"))))
val edgesTo = edges \\ "results" \\\\ "to"
val orderByTo = orderByScore \\ "results" \\\\ "to"
val ascOrderByTo = ascOrderByScore \\ "results" \\\\ "to"
edgesTo should be(Seq(JsNumber(2), JsNumber(1)))
edgesTo should be(orderByTo)
ascOrderByTo should be(Seq(JsNumber(1), JsNumber(2)))
edgesTo.reverse should be(ascOrderByTo)
}
test("query with sampling") {
def queryWithSampling(id: Int, sample: Int) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "$testServiceName",
"columnName": "$testColumnName",
"id": $id
}],
"steps": [
{
"step": [{
"label": "$testLabelName",
"direction": "out",
"offset": 0,
"limit": 100,
"sample": $sample
}]
}
]
}""")
def twoStepQueryWithSampling(id: Int, sample: Int) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "$testServiceName",
"columnName": "$testColumnName",
"id": $id
}],
"steps": [
{
"step": [{
"label": "$testLabelName",
"direction": "out",
"offset": 0,
"limit": 100,
"sample": $sample
}]
},
{
"step": [{
"label": "$testLabelName",
"direction": "out",
"offset": 0,
"limit": 100,
"sample": $sample
}]
}
]
}""")
def twoQueryWithSampling(id: Int, sample: Int) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "$testServiceName",
"columnName": "$testColumnName",
"id": $id
}],
"steps": [
{
"step": [{
"label": "$testLabelName",
"direction": "out",
"offset": 0,
"limit": 50,
"sample": $sample
},
{
"label": "$testLabelName2",
"direction": "out",
"offset": 0,
"limit": 50
}]
}
]
}""")
val sampleSize = 2
val ts = "1442985659166"
val testId = 22
val bulkEdges = Seq(
toEdge(ts, insert, e, testId, 122, testLabelName),
toEdge(ts, insert, e, testId, 222, testLabelName),
toEdge(ts, insert, e, testId, 322, testLabelName),
toEdge(ts, insert, e, testId, 922, testLabelName2),
toEdge(ts, insert, e, testId, 222, testLabelName2),
toEdge(ts, insert, e, testId, 322, testLabelName2),
toEdge(ts, insert, e, 122, 1122, testLabelName),
toEdge(ts, insert, e, 122, 1222, testLabelName),
toEdge(ts, insert, e, 122, 1322, testLabelName),
toEdge(ts, insert, e, 222, 2122, testLabelName),
toEdge(ts, insert, e, 222, 2222, testLabelName),
toEdge(ts, insert, e, 222, 2322, testLabelName),
toEdge(ts, insert, e, 322, 3122, testLabelName),
toEdge(ts, insert, e, 322, 3222, testLabelName),
toEdge(ts, insert, e, 322, 3322, testLabelName)
)
insertEdgesSync(bulkEdges: _*)
val result1 = getEdgesSync(queryWithSampling(testId, sampleSize))
(result1 \\ "results").as[List[JsValue]].size should be(math.min(sampleSize, bulkEdges.size))
val result2 = getEdgesSync(twoStepQueryWithSampling(testId, sampleSize))
(result2 \\ "results").as[List[JsValue]].size should be(math.min(sampleSize * sampleSize, bulkEdges.size * bulkEdges.size))
val result3 = getEdgesSync(twoQueryWithSampling(testId, sampleSize))
(result3 \\ "results").as[List[JsValue]].size should be(sampleSize + 3) // edges in testLabelName2 = 3
}
test("test query with filterOut query") {
def queryWithFilterOut(id1: String, id2: String) = Json.parse(
s"""{
| "limit": 10,
| "filterOut": {
| "srcVertices": [{
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id1
| }],
| "steps": [{
| "step": [{
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10
| }]
| }]
| },
| "srcVertices": [{
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id2
| }],
| "steps": [{
| "step": [{
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 5
| }]
| }]
|}
""".stripMargin
)
val testId1 = "-23"
val testId2 = "-25"
val bulkEdges = Seq(
toEdge(1, insert, e, testId1, 111, testLabelName, Json.obj(weight -> 10)),
toEdge(2, insert, e, testId1, 222, testLabelName, Json.obj(weight -> 10)),
toEdge(3, insert, e, testId1, 333, testLabelName, Json.obj(weight -> 10)),
toEdge(4, insert, e, testId2, 111, testLabelName, Json.obj(weight -> 1)),
toEdge(5, insert, e, testId2, 333, testLabelName, Json.obj(weight -> 1)),
toEdge(6, insert, e, testId2, 555, testLabelName, Json.obj(weight -> 1))
)
logger.debug(s"${bulkEdges.mkString("\\n")}")
insertEdgesSync(bulkEdges: _*)
val rs = getEdgesSync(queryWithFilterOut(testId1, testId2))
logger.debug(Json.prettyPrint(rs))
val results = (rs \\ "results").as[List[JsValue]]
results.size should be(1)
(results(0) \\ "to").toString should be("555")
}
/** note that this merge two different label result into one */
test("weighted union") {
def queryWithWeightedUnion(id1: String, id2: String) = Json.parse(
s"""
|{
| "limit": 10,
| "weights": [
| 10,
| 1
| ],
| "groupBy": ["weight"],
| "queries": [
| {
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id1
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 5
| }
| ]
| }
| ]
| },
| {
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id2
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName2",
| "direction": "out",
| "offset": 0,
| "limit": 5
| }
| ]
| }
| ]
| }
| ]
|}
""".stripMargin
)
val testId1 = "1"
val testId2 = "2"
val bulkEdges = Seq(
toEdge(1, insert, e, testId1, 111, testLabelName, Json.obj(weight -> 10)),
toEdge(2, insert, e, testId1, 222, testLabelName, Json.obj(weight -> 10)),
toEdge(3, insert, e, testId1, 333, testLabelName, Json.obj(weight -> 10)),
toEdge(4, insert, e, testId2, 444, testLabelName2, Json.obj(weight -> 1)),
toEdge(5, insert, e, testId2, 555, testLabelName2, Json.obj(weight -> 1)),
toEdge(6, insert, e, testId2, 666, testLabelName2, Json.obj(weight -> 1))
)
insertEdgesSync(bulkEdges: _*)
val rs = getEdgesSync(queryWithWeightedUnion(testId1, testId2))
logger.debug(Json.prettyPrint(rs))
val results = (rs \\ "results").as[List[JsValue]]
results.size should be(2)
(results(0) \\ "scoreSum").as[Float] should be(30.0)
(results(0) \\ "agg").as[List[JsValue]].size should be(3)
(results(1) \\ "scoreSum").as[Float] should be(3.0)
(results(1) \\ "agg").as[List[JsValue]].size should be(3)
}
test("weighted union with options") {
def queryWithWeightedUnionWithOptions(id1: String, id2: String) = Json.parse(
s"""
|{
| "limit": 10,
| "weights": [
| 10,
| 1
| ],
| "groupBy": ["to"],
| "select": ["to", "weight"],
| "filterOut": {
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id1
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10
| }
| ]
| }
| ]
| },
| "queries": [
| {
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id1
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 5
| }
| ]
| }
| ]
| },
| {
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id2
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName2",
| "direction": "out",
| "offset": 0,
| "limit": 5
| }
| ]
| }
| ]
| }
| ]
|}
""".stripMargin
)
val testId1 = "-192848"
val testId2 = "-193849"
val bulkEdges = Seq(
toEdge(1, insert, e, testId1, 111, testLabelName, Json.obj(weight -> 10)),
toEdge(2, insert, e, testId1, 222, testLabelName, Json.obj(weight -> 10)),
toEdge(3, insert, e, testId1, 333, testLabelName, Json.obj(weight -> 10)),
toEdge(4, insert, e, testId2, 111, testLabelName2, Json.obj(weight -> 1)),
toEdge(5, insert, e, testId2, 333, testLabelName2, Json.obj(weight -> 1)),
toEdge(6, insert, e, testId2, 555, testLabelName2, Json.obj(weight -> 1))
)
insertEdgesSync(bulkEdges: _*)
val rs = getEdgesSync(queryWithWeightedUnionWithOptions(testId1, testId2))
logger.debug(Json.prettyPrint(rs))
val results = (rs \\ "results").as[List[JsValue]]
results.size should be(1)
}
test("scoreThreshold") {
def queryWithScoreThreshold(id: String, scoreThreshold: Int) = Json.parse(
s"""{
| "limit": 10,
| "scoreThreshold": $scoreThreshold,
| "groupBy": ["to"],
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10
| }
| ]
| },
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10
| }
| ]
| }
| ]
|}
""".stripMargin
)
val testId = "-23903"
val bulkEdges = Seq(
toEdge(1, insert, e, testId, 101, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId, 102, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId, 103, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, 101, 102, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, 101, 103, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, 101, 104, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, 102, 103, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, 102, 104, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, 103, 105, testLabelName, Json.obj(weight -> 10))
)
// expected: 104 -> 2, 103 -> 2, 102 -> 1,, 105 -> 1
insertEdgesSync(bulkEdges: _*)
var rs = getEdgesSync(queryWithScoreThreshold(testId, 2))
logger.debug(Json.prettyPrint(rs))
var results = (rs \\ "results").as[List[JsValue]]
results.size should be(2)
rs = getEdgesSync(queryWithScoreThreshold(testId, 1))
logger.debug(Json.prettyPrint(rs))
results = (rs \\ "results").as[List[JsValue]]
results.size should be(4)
}
test("scorePropagateOp test") {
def queryWithPropertyOp(id: String, op: String, shrinkageVal: Long) = Json.parse(
s"""{
| "limit": 10,
| "groupBy": ["from"],
| "duplicate": "sum",
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10,
| "groupBy": ["from"],
| "duplicate": "sum",
| "index": "idx_1",
| "scoring": {
| "weight":1,
| "time": 0
| },
| "transform": [["_from"]]
| }
| ]
| }, {
| "step": [
| {
| "label": "$testLabelName2",
| "direction": "out",
| "offset": 0,
| "limit": 10,
| "scorePropagateOp": "$op",
| "scorePropagateShrinkage": $shrinkageVal
| }
| ]
| }
| ]
|}
""".stripMargin
)
def querySingleVertexWithOp(id: String, op: String, shrinkageVal: Long) = Json.parse(
s"""{
| "limit": 10,
| "groupBy": ["from"],
| "duplicate": "sum",
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "id": $id
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10,
| "groupBy": ["from"],
| "duplicate": "countSum",
| "transform": [["_from"]]
| }
| ]
| }, {
| "step": [
| {
| "label": "$testLabelName2",
| "direction": "out",
| "offset": 0,
| "limit": 10,
| "scorePropagateOp": "$op",
| "scorePropagateShrinkage": $shrinkageVal
| }
| ]
| }
| ]
|}
""".stripMargin
)
def queryMultiVerticesWithOp(id: String, id2: String, op: String, shrinkageVal: Long) = Json.parse(
s"""{
| "limit": 10,
| "groupBy": ["from"],
| "duplicate": "sum",
| "srcVertices": [
| {
| "serviceName": "$testServiceName",
| "columnName": "$testColumnName",
| "ids": [$id, $id2]
| }
| ],
| "steps": [
| {
| "step": [
| {
| "label": "$testLabelName",
| "direction": "out",
| "offset": 0,
| "limit": 10,
| "groupBy": ["from"],
| "duplicate": "countSum",
| "transform": [["_from"]]
| }
| ]
| }, {
| "step": [
| {
| "label": "$testLabelName2",
| "direction": "out",
| "offset": 0,
| "limit": 10,
| "scorePropagateOp": "$op",
| "scorePropagateShrinkage": $shrinkageVal
| }
| ]
| }
| ]
|}
""".stripMargin
)
val testId = "-30000"
val testId2 = "-4000"
val bulkEdges = Seq(
toEdge(1, insert, e, testId, 101, testLabelName, Json.obj(weight -> -10)),
toEdge(1, insert, e, testId, 102, testLabelName, Json.obj(weight -> -10)),
toEdge(1, insert, e, testId, 103, testLabelName, Json.obj(weight -> -10)),
toEdge(1, insert, e, testId, 102, testLabelName2, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId, 103, testLabelName2, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId, 104, testLabelName2, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId, 105, testLabelName2, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId2, 101, testLabelName, Json.obj(weight -> -10)),
toEdge(1, insert, e, testId2, 102, testLabelName, Json.obj(weight -> -10)),
toEdge(1, insert, e, testId2, 103, testLabelName, Json.obj(weight -> -10)),
toEdge(1, insert, e, testId2, 102, testLabelName2, Json.obj(weight -> 10)),
toEdge(1, insert, e, testId2, 105, testLabelName2, Json.obj(weight -> 10))
)
insertEdgesSync(bulkEdges: _*)
val firstStepEdgeCount = 3l
val secondStepEdgeCount = 4l
var shrinkageVal = 10l
var rs = getEdgesSync(querySingleVertexWithOp(testId, "divide", shrinkageVal))
logger.debug(Json.prettyPrint(rs))
var results = (rs \\ "results").as[List[JsValue]]
results.size should be(1)
var scoreSum = secondStepEdgeCount.toDouble / (firstStepEdgeCount.toDouble + shrinkageVal)
(results(0) \\ "scoreSum").as[Double] should be(scoreSum)
rs = getEdgesSync(queryMultiVerticesWithOp(testId, testId2, "divide", shrinkageVal))
logger.debug(Json.prettyPrint(rs))
results = (rs \\ "results").as[List[JsValue]]
results.size should be(2)
scoreSum = secondStepEdgeCount.toDouble / (firstStepEdgeCount.toDouble + shrinkageVal)
(results(0) \\ "scoreSum").as[Double] should be(scoreSum)
scoreSum = 2.toDouble / (3.toDouble + shrinkageVal)
(results(1) \\ "scoreSum").as[Double] should be(scoreSum)
// check for divide zero case
shrinkageVal = 30l
rs = getEdgesSync(queryWithPropertyOp(testId, "divide", shrinkageVal))
logger.debug(Json.prettyPrint(rs))
results = (rs \\ "results").as[List[JsValue]]
results.size should be(1)
(results(0) \\ "scoreSum").as[Double] should be(0)
// "plus" operation
rs = getEdgesSync(querySingleVertexWithOp(testId, "plus", shrinkageVal))
logger.debug(Json.prettyPrint(rs))
results = (rs \\ "results").as[List[JsValue]]
results.size should be(1)
scoreSum = (firstStepEdgeCount + 1) * secondStepEdgeCount
(results(0) \\ "scoreSum").as[Long] should be(scoreSum)
// "multiply" operation
rs = getEdgesSync(querySingleVertexWithOp(testId, "multiply", shrinkageVal))
logger.debug(Json.prettyPrint(rs))
results = (rs \\ "results").as[List[JsValue]]
results.size should be(1)
scoreSum = (firstStepEdgeCount * 1) * secondStepEdgeCount
(results(0) \\ "scoreSum").as[Long] should be(scoreSum)
}
def querySingle(id: Int, offset: Int = 0, limit: Int = 100) = Json.parse(
s"""
{ "srcVertices": [
{ "serviceName": "$testServiceName",
"columnName": "$testColumnName",
"id": $id
}],
"steps": [
[ {
"label": "$testLabelName",
"direction": "out",
"offset": $offset,
"limit": $limit
}
]]
}
""")
def queryGlobalLimit(id: Int, limit: Int): JsValue = Json.obj(
"limit" -> limit,
"srcVertices" -> Json.arr(
Json.obj("serviceName" -> testServiceName, "columnName" -> testColumnName, "id" -> id)
),
"steps" -> Json.arr(
Json.obj(
"step" -> Json.arr(
Json.obj(
"label" -> testLabelName
)
)
)
)
)
// called by each test, each
override def beforeEach = initTestData()
// called by start test, once
override def initTestData(): Unit = {
super.initTestData()
insertEdgesSync(
toEdge(1000, insert, e, 0, 1, testLabelName, Json.obj(weight -> 40, is_hidden -> true)),
toEdge(2000, insert, e, 0, 2, testLabelName, Json.obj(weight -> 30, is_hidden -> false)),
toEdge(3000, insert, e, 2, 0, testLabelName, Json.obj(weight -> 20)),
toEdge(4000, insert, e, 2, 1, testLabelName, Json.obj(weight -> 10)),
toEdge(3000, insert, e, 10, 20, testLabelName, Json.obj(weight -> 20)),
toEdge(4000, insert, e, 20, 20, testLabelName, Json.obj(weight -> 10)),
toEdge(1, insert, e, -1, 1000, testLabelName),
toEdge(1, insert, e, -1, 2000, testLabelName),
toEdge(1, insert, e, -1, 3000, testLabelName),
toEdge(1, insert, e, 1000, 10000, testLabelName),
toEdge(1, insert, e, 1000, 11000, testLabelName),
toEdge(1, insert, e, 2000, 11000, testLabelName),
toEdge(1, insert, e, 2000, 12000, testLabelName),
toEdge(1, insert, e, 3000, 12000, testLabelName),
toEdge(1, insert, e, 3000, 13000, testLabelName),
toEdge(1, insert, e, 10000, 100000, testLabelName),
toEdge(2, insert, e, 11000, 200000, testLabelName),
toEdge(3, insert, e, 12000, 300000, testLabelName)
)
}
}
| jongwook/incubator-s2graph | s2core/src/test/scala/org/apache/s2graph/core/Integrate/QueryTest.scala | Scala | apache-2.0 | 36,817 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.{Algo, Strategy}
import org.apache.spark.mllib.tree.{DecisionTree, RandomForest, impurity}
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
import org.apache.spark.{SparkConf, SparkContext}
import scopt.OptionParser
import scala.language.reflectiveCalls
/**
* An example runner for decision trees and random forests. Run with
* {{{
* ./bin/run-example org.apache.spark.examples.mllib.DecisionTreeRunner [options]
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
*
* Note: This script treats all features as real-valued (not categorical).
* To include categorical features, modify categoricalFeaturesInfo.
*/
object DecisionTreeRunner {
object ImpurityType extends Enumeration {
type ImpurityType = Value
val Gini, Entropy, Variance = Value
}
import ImpurityType._
case class Params(
input: String = null,
testInput: String = "",
dataFormat: String = "libsvm",
algo: Algo = Classification,
maxDepth: Int = 5,
impurity: ImpurityType = Gini,
maxBins: Int = 32,
minInstancesPerNode: Int = 1,
minInfoGain: Double = 0.0,
numTrees: Int = 1,
featureSubsetStrategy: String = "auto",
fracTest: Double = 0.2,
useNodeIdCache: Boolean = false,
checkpointDir: Option[String] = None,
checkpointInterval: Int = 10) extends AbstractParams[Params]
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("DecisionTreeRunner") {
head("DecisionTreeRunner: an example decision tree app.")
opt[String]("algo")
.text(s"algorithm (${Algo.values.mkString(",")}), default: ${defaultParams.algo}")
.action((x, c) => c.copy(algo = Algo.withName(x)))
opt[String]("impurity")
.text(s"impurity type (${ImpurityType.values.mkString(",")}), " +
s"default: ${defaultParams.impurity}")
.action((x, c) => c.copy(impurity = ImpurityType.withName(x)))
opt[Int]("maxDepth")
.text(s"max depth of the tree, default: ${defaultParams.maxDepth}")
.action((x, c) => c.copy(maxDepth = x))
opt[Int]("maxBins")
.text(s"max number of bins, default: ${defaultParams.maxBins}")
.action((x, c) => c.copy(maxBins = x))
opt[Int]("minInstancesPerNode")
.text(s"min number of instances required at child nodes to create the parent split," +
s" default: ${defaultParams.minInstancesPerNode}")
.action((x, c) => c.copy(minInstancesPerNode = x))
opt[Double]("minInfoGain")
.text(s"min info gain required to create a split, default: ${defaultParams.minInfoGain}")
.action((x, c) => c.copy(minInfoGain = x))
opt[Int]("numTrees")
.text(s"number of trees (1 = decision tree, 2+ = random forest)," +
s" default: ${defaultParams.numTrees}")
.action((x, c) => c.copy(numTrees = x))
opt[String]("featureSubsetStrategy")
.text(s"feature subset sampling strategy" +
s" (${RandomForest.supportedFeatureSubsetStrategies.mkString(", ")}), " +
s"default: ${defaultParams.featureSubsetStrategy}")
.action((x, c) => c.copy(featureSubsetStrategy = x))
opt[Double]("fracTest")
.text(s"fraction of data to hold out for testing. If given option testInput, " +
s"this option is ignored. default: ${defaultParams.fracTest}")
.action((x, c) => c.copy(fracTest = x))
opt[Boolean]("useNodeIdCache")
.text(s"whether to use node Id cache during training, " +
s"default: ${defaultParams.useNodeIdCache}")
.action((x, c) => c.copy(useNodeIdCache = x))
opt[String]("checkpointDir")
.text(s"checkpoint directory where intermediate node Id caches will be stored, " +
s"default: ${
defaultParams.checkpointDir match {
case Some(strVal) => strVal
case None => "None"
}
}")
.action((x, c) => c.copy(checkpointDir = Some(x)))
opt[Int]("checkpointInterval")
.text(s"how often to checkpoint the node Id cache, " +
s"default: ${defaultParams.checkpointInterval}")
.action((x, c) => c.copy(checkpointInterval = x))
opt[String]("testInput")
.text(s"input path to test dataset. If given, option fracTest is ignored." +
s" default: ${defaultParams.testInput}")
.action((x, c) => c.copy(testInput = x))
opt[String]("dataFormat")
.text("data format: libsvm (default), dense (deprecated in Spark v1.1)")
.action((x, c) => c.copy(dataFormat = x))
arg[String]("<input>")
.text("input path to labeled examples")
.required()
.action((x, c) => c.copy(input = x))
checkConfig { params =>
if (params.fracTest < 0 || params.fracTest > 1) {
failure(s"fracTest ${params.fracTest} value incorrect; should be in [0,1].")
} else {
if (params.algo == Classification &&
(params.impurity == Gini || params.impurity == Entropy)) {
success
} else if (params.algo == Regression && params.impurity == Variance) {
success
} else {
failure(s"Algo ${params.algo} is not compatible with impurity ${params.impurity}.")
}
}
}
}
parser.parse(args, defaultParams) match {
case Some(params) => run(params)
case _ => sys.exit(1)
}
}
/**
* Load training and test data from files.
*
* @param input Path to input dataset.
* @param dataFormat "libsvm" or "dense"
* @param testInput Path to test dataset.
* @param algo Classification or Regression
* @param fracTest Fraction of input data to hold out for testing. Ignored if testInput given.
* @return (training dataset, test dataset, number of classes),
* where the number of classes is inferred from data (and set to 0 for Regression)
*/
private[mllib] def loadDatasets(
sc: SparkContext,
input: String,
dataFormat: String,
testInput: String,
algo: Algo,
fracTest: Double): (RDD[LabeledPoint], RDD[LabeledPoint], Int) = {
// Load training data and cache it.
val origExamples = dataFormat match {
case "dense" => MLUtils.loadLabeledPoints(sc, input).cache()
case "libsvm" => MLUtils.loadLibSVMFile(sc, input).cache()
}
// For classification, re-index classes if needed.
val (examples, classIndexMap, numClasses) = algo match {
case Classification =>
// classCounts: class --> # examples in class
val classCounts = origExamples.map(_.label).countByValue()
val sortedClasses = classCounts.keys.toList.sorted
val numClasses = classCounts.size
// classIndexMap: class --> index in 0,...,numClasses-1
val classIndexMap = {
if (classCounts.keySet != Set(0.0, 1.0)) {
sortedClasses.zipWithIndex.toMap
} else {
Map[Double, Int]()
}
}
val examples = {
if (classIndexMap.isEmpty) {
origExamples
} else {
origExamples.map(lp => LabeledPoint(classIndexMap(lp.label), lp.features))
}
}
val numExamples = examples.count()
println(s"numClasses = $numClasses.")
println(s"Per-class example fractions, counts:")
println(s"Class\\tFrac\\tCount")
sortedClasses.foreach { c =>
val frac = classCounts(c) / numExamples.toDouble
println(s"$c\\t$frac\\t${classCounts(c)}")
}
(examples, classIndexMap, numClasses)
case Regression =>
(origExamples, null, 0)
case _ =>
throw new IllegalArgumentException("Algo ${params.algo} not supported.")
}
// Create training, test sets.
val splits = if (testInput != "") {
// Load testInput.
val numFeatures = examples.take(1)(0).features.size
val origTestExamples = dataFormat match {
case "dense" => MLUtils.loadLabeledPoints(sc, testInput)
case "libsvm" => MLUtils.loadLibSVMFile(sc, testInput, numFeatures)
}
algo match {
case Classification =>
// classCounts: class --> # examples in class
val testExamples = {
if (classIndexMap.isEmpty) {
origTestExamples
} else {
origTestExamples.map(lp => LabeledPoint(classIndexMap(lp.label), lp.features))
}
}
Array(examples, testExamples)
case Regression =>
Array(examples, origTestExamples)
}
} else {
// Split input into training, test.
examples.randomSplit(Array(1.0 - fracTest, fracTest))
}
val training = splits(0).cache()
val test = splits(1).cache()
val numTraining = training.count()
val numTest = test.count()
println(s"numTraining = $numTraining, numTest = $numTest.")
examples.unpersist(blocking = false)
(training, test, numClasses)
}
def run(params: Params): Unit = {
val conf = new SparkConf().setAppName(s"DecisionTreeRunner with $params")
val sc = new SparkContext(conf)
println(s"DecisionTreeRunner with parameters:\\n$params")
// Load training and test data and cache it.
val (training, test, numClasses) = loadDatasets(sc, params.input, params.dataFormat,
params.testInput, params.algo, params.fracTest)
val impurityCalculator = params.impurity match {
case Gini => impurity.Gini
case Entropy => impurity.Entropy
case Variance => impurity.Variance
}
params.checkpointDir.foreach(sc.setCheckpointDir)
val strategy
= new Strategy(
algo = params.algo,
impurity = impurityCalculator,
maxDepth = params.maxDepth,
maxBins = params.maxBins,
numClasses = numClasses,
minInstancesPerNode = params.minInstancesPerNode,
minInfoGain = params.minInfoGain,
useNodeIdCache = params.useNodeIdCache,
checkpointInterval = params.checkpointInterval)
if (params.numTrees == 1) {
val startTime = System.nanoTime()
val model = DecisionTree.train(training, strategy)
val elapsedTime = (System.nanoTime() - startTime) / 1e9
println(s"Training time: $elapsedTime seconds")
if (model.numNodes < 20) {
println(model.toDebugString) // Print full model.
} else {
println(model) // Print model summary.
}
if (params.algo == Classification) {
val trainAccuracy =
new MulticlassMetrics(training.map(lp => (model.predict(lp.features), lp.label))).accuracy
println(s"Train accuracy = $trainAccuracy")
val testAccuracy =
new MulticlassMetrics(test.map(lp => (model.predict(lp.features), lp.label))).accuracy
println(s"Test accuracy = $testAccuracy")
}
if (params.algo == Regression) {
val trainMSE = meanSquaredError(model, training)
println(s"Train mean squared error = $trainMSE")
val testMSE = meanSquaredError(model, test)
println(s"Test mean squared error = $testMSE")
}
} else {
val randomSeed = Utils.random.nextInt()
if (params.algo == Classification) {
val startTime = System.nanoTime()
val model = RandomForest.trainClassifier(training, strategy, params.numTrees,
params.featureSubsetStrategy, randomSeed)
val elapsedTime = (System.nanoTime() - startTime) / 1e9
println(s"Training time: $elapsedTime seconds")
if (model.totalNumNodes < 30) {
println(model.toDebugString) // Print full model.
} else {
println(model) // Print model summary.
}
val trainAccuracy =
new MulticlassMetrics(training.map(lp => (model.predict(lp.features), lp.label))).accuracy
println(s"Train accuracy = $trainAccuracy")
val testAccuracy =
new MulticlassMetrics(test.map(lp => (model.predict(lp.features), lp.label))).accuracy
println(s"Test accuracy = $testAccuracy")
}
if (params.algo == Regression) {
val startTime = System.nanoTime()
val model = RandomForest.trainRegressor(training, strategy, params.numTrees,
params.featureSubsetStrategy, randomSeed)
val elapsedTime = (System.nanoTime() - startTime) / 1e9
println(s"Training time: $elapsedTime seconds")
if (model.totalNumNodes < 30) {
println(model.toDebugString) // Print full model.
} else {
println(model) // Print model summary.
}
val trainMSE = meanSquaredError(model, training)
println(s"Train mean squared error = $trainMSE")
val testMSE = meanSquaredError(model, test)
println(s"Test mean squared error = $testMSE")
}
}
sc.stop()
}
/**
* Calculates the mean squared error for regression.
*
* This is just for demo purpose. In general, don't copy this code because it is NOT efficient
* due to the use of structural types, which leads to one reflection call per record.
*/
// scalastyle:off structural.type
private[mllib] def meanSquaredError(
model: {def predict(features: Vector): Double},
data: RDD[LabeledPoint]): Double = {
data.map { y =>
val err = model.predict(y.features) - y.label
err * err
}.mean()
}
// scalastyle:on structural.type
}
// scalastyle:on println
| chgm1006/spark-app | src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala | Scala | apache-2.0 | 17,177 |
/*******************************************************************************
Copyright (c) 2012-2013, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.tests
import junit.framework.TestSuite
import _root_.java.io.File
import _root_.java.io.FileFilter
import kr.ac.kaist.jsaf.ProjectProperties
import kr.ac.kaist.jsaf.Shell
object NightlyInterpreterJUTest {
//val SEP = File.separator
//val INTERPRETER_FAIL_TESTS_DIR = "tests/interpreter_tests"
val INTERPRETER_NIGHTLY_FAIL_TESTS_DIR = "tests/interpreter_nightly_tests"
def main(args: String*) = junit.textui.TestRunner.run(suite)
def suite() = {
val suite = new TestSuite("Test all .js files in 'tests/interpreter_nightly_tests.")
val failsOnly = true // false if we want to print out the test results
def addTestDir(_dir: String): Unit = {
suite.addTest(FileTests.compilerSuite(_dir, failsOnly, false))
var dir = _dir + '/'
// Navigate subdirectories
val dirFilter = new FileFilter() {
def accept(file: File) =
(file.isDirectory && file.getName.charAt(0) != '.')
}
for (subdir <- new File(dir).listFiles(dirFilter))
addTestDir(dir + subdir.getName)
}
//$JUnit-BEGIN$
//suite.addTest(FileTests.compilerSuite(INTERPRETER_FAIL_TESTS_DIR, failsOnly, false))
addTestDir(INTERPRETER_NIGHTLY_FAIL_TESTS_DIR)
//$JUnit-END$
suite
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/tests/NightlyInterpreterJUTest.scala | Scala | bsd-3-clause | 1,658 |
import sbt._
import Keys._
object BenchmarkPlugin {
def benchmarkSettings = Seq(
libraryDependencies ++= Seq(
"com.google.code.caliper" % "caliper" % "1.0-SNAPSHOT" from "http://n0d.es/jars/caliper-1.0-SNAPSHOT.jar",
"com.google.code.java-allocation-instrumenter" % "java-allocation-instrumenter" % "2.0",
"com.google.code.gson" % "gson" % "1.7.1"
),
fork in run := true,
javaOptions in run <<= (fullClasspath in Runtime) map { cp => Seq("-cp", Build.data(cp).mkString(":")) }
)
}
| tekul/szxcvbn | project/BenchmarkPlugin.scala | Scala | mit | 523 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.io._
import scala.Serializable
import scala.collection.Map
import scala.collection.immutable.NumericRange
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import org.apache.spark._
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.util.Utils
private[spark] class ParallelCollectionPartition[T: ClassTag](
var rddId: Long,
var slice: Int,
var values: Seq[T]
) extends Partition with Serializable {
def iterator: Iterator[T] = values.iterator
override def hashCode(): Int = (41 * (41 + rddId) + slice).toInt
override def equals(other: Any): Boolean = other match {
case that: ParallelCollectionPartition[_] =>
this.rddId == that.rddId && this.slice == that.slice
case _ => false
}
override def index: Int = slice
@throws(classOf[IOException])
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
val sfactory = SparkEnv.get.serializer
// Treat java serializer with default action rather than going thru serialization, to avoid a
// separate serialization header.
sfactory match {
case js: JavaSerializer => out.defaultWriteObject()
case _ =>
out.writeLong(rddId)
out.writeInt(slice)
val ser = sfactory.newInstance()
Utils.serializeViaNestedStream(out, ser)(_.writeObject(values))
}
}
@throws(classOf[IOException])
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
val sfactory = SparkEnv.get.serializer
sfactory match {
case js: JavaSerializer => in.defaultReadObject()
case _ =>
rddId = in.readLong()
slice = in.readInt()
val ser = sfactory.newInstance()
Utils.deserializeViaNestedStream(in, ser)(ds => values = ds.readObject[Seq[T]]())
}
}
}
private[spark] class ParallelCollectionRDD[T: ClassTag](
sc: SparkContext,
@transient private val data: Seq[T],
numSlices: Int,
locationPrefs: Map[Int, Seq[String]])
extends RDD[T](sc, Nil) {
// TODO: Right now, each split sends along its full data, even if later down the RDD chain it gets
// cached. It might be worthwhile to write the data to a file in the DFS and read it in the split
// instead.
// UPDATE: A parallel collection can be checkpointed to HDFS, which achieves this goal.
override def getPartitions: Array[Partition] = {
val slices = ParallelCollectionRDD.slice(data, numSlices).toArray
slices.indices.map(i => new ParallelCollectionPartition(id, i, slices(i))).toArray
}
override def compute(s: Partition, context: TaskContext): Iterator[T] = {
new InterruptibleIterator(context, s.asInstanceOf[ParallelCollectionPartition[T]].iterator)
}
override def getPreferredLocations(s: Partition): Seq[String] = {
locationPrefs.getOrElse(s.index, Nil)
}
}
private object ParallelCollectionRDD {
/**
* Slice a collection into numSlices sub-collections. One extra thing we do here is to treat Range
* collections specially, encoding the slices as other Ranges to minimize memory cost. This makes
* it efficient to run Spark over RDDs representing large sets of numbers. And if the collection
* is an inclusive Range, we use inclusive range for the last slice.
*/
def slice[T: ClassTag](seq: Seq[T], numSlices: Int): Seq[Seq[T]] = {
if (numSlices < 1) {
throw new IllegalArgumentException("Positive number of slices required")
}
// Sequences need to be sliced at the same set of index positions for operations
// like RDD.zip() to behave as expected
def positions(length: Long, numSlices: Int): Iterator[(Int, Int)] = {
(0 until numSlices).iterator.map { i =>
val start = ((i * length) / numSlices).toInt
val end = (((i + 1) * length) / numSlices).toInt
(start, end)
}
}
seq match {
case r: Range =>
positions(r.length, numSlices).zipWithIndex.map { case ((start, end), index) =>
// If the range is inclusive, use inclusive range for the last slice
if (r.isInclusive && index == numSlices - 1) {
new Range.Inclusive(r.start + start * r.step, r.end, r.step)
}
else {
new Range(r.start + start * r.step, r.start + end * r.step, r.step)
}
}.toSeq.asInstanceOf[Seq[Seq[T]]]
case nr: NumericRange[_] =>
// For ranges of Long, Double, BigInteger, etc
val slices = new ArrayBuffer[Seq[T]](numSlices)
var r = nr
for ((start, end) <- positions(nr.length, numSlices)) {
val sliceSize = end - start
slices += r.take(sliceSize).asInstanceOf[Seq[T]]
r = r.drop(sliceSize)
}
slices
case _ =>
val array = seq.toArray // To prevent O(n^2) operations for List etc
positions(array.length, numSlices).map { case (start, end) =>
array.slice(start, end).toSeq
}.toSeq
}
}
}
| sh-cho/cshSpark | rdd/ParallelCollectionRDD.scala | Scala | apache-2.0 | 5,815 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import org.apache.calcite.plan.RelOptRule.{none, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptUtil}
import org.apache.calcite.rel.core.JoinRelType
import org.apache.calcite.rex.{RexProgram, RexProgramBuilder, RexUtil}
import org.apache.flink.table.plan.nodes.logical.{FlinkLogicalCalc, FlinkLogicalJoin}
import org.apache.flink.table.plan.util.PythonUtil.containsPythonCall
import scala.collection.JavaConversions._
/**
* Rule will splits the [[FlinkLogicalJoin]] which contains Python Functions in join condition
* into a [[FlinkLogicalJoin]] and a [[FlinkLogicalCalc]] with python Functions. Currently, only
* inner join is supported.
*
* After this rule is applied, there will be no Python Functions in the condition of the
* [[FlinkLogicalJoin]].
*/
class SplitPythonConditionFromJoinRule extends RelOptRule(
operand(classOf[FlinkLogicalJoin], none),
"SplitPythonConditionFromJoinRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val join: FlinkLogicalJoin = call.rel(0).asInstanceOf[FlinkLogicalJoin]
val joinType: JoinRelType = join.getJoinType
// matches if it is inner join and it contains Python functions in condition
joinType == JoinRelType.INNER && Option(join.getCondition).exists(containsPythonCall)
}
override def onMatch(call: RelOptRuleCall): Unit = {
val join: FlinkLogicalJoin = call.rel(0).asInstanceOf[FlinkLogicalJoin]
val rexBuilder = join.getCluster.getRexBuilder
val joinFilters = RelOptUtil.conjunctions(join.getCondition)
val pythonFilters = joinFilters.filter(containsPythonCall)
val remainingFilters = joinFilters.filter(!containsPythonCall(_))
val newJoinCondition = RexUtil.composeConjunction(rexBuilder, remainingFilters)
val bottomJoin = new FlinkLogicalJoin(
join.getCluster,
join.getTraitSet,
join.getLeft,
join.getRight,
newJoinCondition,
join.getJoinType)
val rexProgram = new RexProgramBuilder(bottomJoin.getRowType, rexBuilder).getProgram
val topCalcCondition = RexUtil.composeConjunction(rexBuilder, pythonFilters)
val topCalc = new FlinkLogicalCalc(
join.getCluster,
join.getTraitSet,
bottomJoin,
RexProgram.create(
bottomJoin.getRowType,
rexProgram.getExprList,
topCalcCondition,
bottomJoin.getRowType,
rexBuilder))
call.transformTo(topCalc)
}
}
object SplitPythonConditionFromJoinRule {
val INSTANCE = new SplitPythonConditionFromJoinRule
}
| gyfora/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/logical/SplitPythonConditionFromJoinRule.scala | Scala | apache-2.0 | 3,385 |
import java.io.{PrintWriter, File}
import com.typesafe.scalalogging.StrictLogging
object Application extends StrictLogging {
val csvFilePath = "src/main/resources/hadoop_output_no_header.csv"
val rand = new scala.util.Random
val NR_OF_GIFS_EACH_FILE = 40
val NO_OF_LINES = 1487855 //nr of lines in hadoop_output_no_header.csv
def main(args: Array[String]): Unit = {
logger.debug("Begin")
// 1. Read tsv into a list of Link objects
val linkObjects: List[Link] = new CSVParser().parseFile(new File(csvFilePath))
val NUMBER_OF_FILES: Int = linkObjects.size / NR_OF_GIFS_EACH_FILE
logger.info("nr of files: " + NUMBER_OF_FILES)
// 2. build index part files
for (num <- 0 to NUMBER_OF_FILES) {
buildSingleHtmlPart(num, linkObjects)
}
logger.debug("end.")
}
def buildSingleHtmlPart(pageNumber: Int, allLinks: List[Link]) = {
val startIndex: Int = pageNumber * NR_OF_GIFS_EACH_FILE
val endIndex: Int = startIndex + NR_OF_GIFS_EACH_FILE
val elements: List[Link] = allLinks.slice(startIndex, endIndex)
if (elements.size > 0) {
new PrintWriter(new File("out/index" + pageNumber + ".html")) {
write("<div id=\\"content\\">\\n")
for (link <- elements) {
write("<img src=" + link.url + ">\\n")
}
write("</div>")
close
}
}
}
}
| softberries/ugproject | HTMLgenerator/src/main/scala/Application.scala | Scala | unlicense | 1,353 |
package com.twitter.finagle
import com.twitter.finagle.stats.DefaultStatsReceiver
import com.twitter.finagle.toggle.{StandardToggleMap, ToggleMap}
package object thriftmux {
/**
* The name of the finagle-thriftmux [[ToggleMap]].
*/
private[this] val LibraryName: String =
"com.twitter.finagle.thriftmux"
/**
* The [[ToggleMap]] used for finagle-thriftmux.
*/
private[finagle] val Toggles: ToggleMap =
StandardToggleMap(LibraryName, DefaultStatsReceiver)
}
| luciferous/finagle | finagle-thriftmux/src/main/scala/com/twitter/finagle/thriftmux/package.scala | Scala | apache-2.0 | 488 |
//
// Item
//
abstract class Item(name: String, enc: Double, cost: Int,
protected var _amount: Int, private var _owner: Character) {
require(_amount >= 1)
def hand(newowner: Character): Character = {
_owner = newowner
_owner
}
def stock(am: Int): Int = {
_amount = _amount + am
_amount
}
def use(am: Int = 1): Int = {
_amount = _amount - am
_amount
}
def owner: Character = _owner
def amount: Int = _amount
def weight = enc * _amount
def value = (unit: Int) => {
require(unit > 0)
cost * _amount / unit.toDouble
}
def valueGP = value(Item.unitGP)
}
object Item {
val unitCP = 1
val unitSP = 10
val unitEP = 50
val unitGP = 100
val unitPP = 500
val valueUnit = unitGP
}
| eiji-a/dnd | src/main/scala/Item.scala | Scala | bsd-3-clause | 748 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.streaming.parser
import java.nio.charset.Charset
import java.text.SimpleDateFormat
import org.apache.carbondata.core.constants.CarbonCommonConstants
object FieldConverter {
/**
* Return a String representation of the input value
* @param value input value
* @param serializationNullFormat string for null value
* @param delimiterLevel1 level 1 delimiter for complex type
* @param delimiterLevel2 level 2 delimiter for complex type
* @param timeStampFormat timestamp format
* @param dateFormat date format
* @param isVarcharType whether it is varchar type. A varchar type has no string length limit
* @param level level for recursive call
*/
def objectToString(
value: Any,
serializationNullFormat: String,
delimiterLevel1: String,
delimiterLevel2: String,
timeStampFormat: SimpleDateFormat,
dateFormat: SimpleDateFormat,
isVarcharType: Boolean = false,
level: Int = 1): String = {
if (value == null) {
serializationNullFormat
} else {
value match {
case s: String => if (!isVarcharType &&
s.length > CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT) {
throw new Exception("Dataload failed, String length cannot exceed " +
CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT + " characters")
} else {
s
}
case d: java.math.BigDecimal => d.toPlainString
case i: java.lang.Integer => i.toString
case d: java.lang.Double => d.toString
case t: java.sql.Timestamp => timeStampFormat format t
case d: java.sql.Date => dateFormat format d
case b: java.lang.Boolean => b.toString
case s: java.lang.Short => s.toString
case f: java.lang.Float => f.toString
case bs: Array[Byte] => new String(bs,
Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET))
case s: scala.collection.Seq[Any] =>
val delimiter = if (level == 1) {
delimiterLevel1
} else {
delimiterLevel2
}
val builder = new StringBuilder()
s.foreach { x =>
builder.append(objectToString(x, serializationNullFormat, delimiterLevel1,
delimiterLevel2, timeStampFormat, dateFormat, isVarcharType, level + 1))
.append(delimiter)
}
builder.substring(0, builder.length - delimiter.length())
case m: scala.collection.Map[_, _] =>
throw new Exception("Unsupported data type: Map")
case r: org.apache.spark.sql.Row =>
val delimiter = if (level == 1) {
delimiterLevel1
} else {
delimiterLevel2
}
val builder = new StringBuilder()
for (i <- 0 until r.length) {
builder.append(objectToString(r(i), serializationNullFormat, delimiterLevel1,
delimiterLevel2, timeStampFormat, dateFormat, isVarcharType, level + 1))
.append(delimiter)
}
builder.substring(0, builder.length - delimiter.length())
case other => other.toString
}
}
}
}
| sgururajshetty/carbondata | streaming/src/main/scala/org/apache/carbondata/streaming/parser/FieldConverter.scala | Scala | apache-2.0 | 4,013 |
package tifmo.dcstree
sealed abstract class DCSTreeEdge {
def inRole: SemRole
}
case class DCSTreeEdgeNormal(inRole: SemRole) extends DCSTreeEdge
case class DCSTreeEdgeQuantifier(inRole: SemRole, quantifier: Quantifier) extends DCSTreeEdge
/**
* A DCS tree edge marked with relation
* @param inRole The role of the parent node on this edge
* @param relation The relation this edge is marked with
* @param parentToChild `true` if parent is the first parameter of `relation` while `children` being the second, and vice versa.
*/
case class DCSTreeEdgeRelation(inRole: SemRole, relation: Relation, parentToChild: Boolean) extends DCSTreeEdge
| tomtung/tifmo | src/main/scala/tifmo/dcstree/DCSTreeEdge.scala | Scala | bsd-2-clause | 652 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.