code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package eu.ace_design.island.arena.exporters
import java.io.{File, PrintWriter}
import java.nio.file.{Files, Paths, StandardCopyOption}
import eu.ace_design.island.game.{ExplorationEvent, Game, GameBoard}
import eu.ace_design.island.map.IslandMap
import eu.ace_design.island.viewer.PoiJSONViewer
import eu.ace_design.island.viewer.svg.{FogOfWar, FogOfWarViewer}
trait ResultExporter { val outputDir: String }
trait CommonExporter extends ResultExporter {
def apply(board: GameBoard, m: IslandMap)
}
trait PlayerExporter extends ResultExporter {}
case class POIsExporter(override val outputDir: String) extends CommonExporter {
override def apply(board: GameBoard, m: IslandMap) {
val viewer = PoiJSONViewer(board)
val tmp = Paths.get(viewer(m).getAbsolutePath)
val out = (new File(s"$outputDir/_pois.json")).toPath
Files.move(tmp,out, StandardCopyOption.REPLACE_EXISTING)
}
}
case class GameLogExporter(override val outputDir: String) extends PlayerExporter {
def apply(name: String, events: Seq[ExplorationEvent]): Unit = {
val jsonEvents = events map { _.toJson } mkString("[", ",", "]")
val writer = new PrintWriter(new File(s"$outputDir/$name.json"))
try { writer.write(jsonEvents) } finally { writer.close() }
}
}
case class VisitedMapExporter(override val outputDir: String) extends PlayerExporter {
def apply(name: String, m: IslandMap, game: Game, tileUnit: Int, board: GameBoard) {
val fog = new FogOfWar(factor = tileUnit, visited = game.visited, scanned = game.scanned,
pois = board.pois.values.flatten.toSet, size = m.size)
val viewer = FogOfWarViewer(fog)
val tmp = Paths.get(viewer(m).getAbsolutePath)
val out = (new File(s"$outputDir/$name.svg")).toPath
Files.move(tmp,out, StandardCopyOption.REPLACE_EXISTING)
}
}
| ace-design/island | arena/src/main/scala/eu/ace_design/island/arena/exporters/ResultExporters.scala | Scala | lgpl-3.0 | 1,838 |
package de.fosd.typechef.parser.c
import junit.framework.Assert._
import de.fosd.typechef.featureexpr._
import org.junit.{Assert, Test}
import de.fosd.typechef.conditional.Opt
import java.util
import util.Collections
class ConsistencyTest extends TestHelper {
def parseFile(fileName: String, featureModel: FeatureModel) {
val inputStream = getClass.getResourceAsStream("/" + fileName)
assertNotNull("file not found " + fileName, inputStream)
val p = new CParser(featureModel)
val result = p.translationUnit(
lexStream(inputStream, fileName, Collections.singletonList("testfiles/boa/"), featureModel), FeatureExprFactory.True)
println("parsing done.")
(result: @unchecked) match {
case p.Success(ast, unparsed) => {
checkASTAssumptions(ast.asInstanceOf[TranslationUnit], featureModel)
//success
}
case p.NoSuccess(msg, unparsed, inner) =>
println(unparsed.context)
Assert.fail(msg + " at " + unparsed + " " + inner)
}
}
def checkASTAssumptions(ast: TranslationUnit, featureModel: FeatureModel) {
val knownExternals = new util.IdentityHashMap[ExternalDef, FeatureExpr]();
for (Opt(f, ext) <- ast.defs) {
assert(f.isSatisfiable(featureModel), "unsatisfiable code in AST: " + ext.getPositionFrom.getLine + " for " + ext)
if (f.isSatisfiable(featureModel))
if (!knownExternals.containsKey(ext)) {
knownExternals.put(ext, f);
} else {
val priorFexpr = knownExternals.get(ext)
assert((f mex priorFexpr).isTautology(featureModel), "!" + priorFexpr + " mex " + f + " in " + ext.getPositionFrom.getLine + " for " + ext)
knownExternals.put(ext, f or priorFexpr)
}
}
}
@Test
def testRz1000() {
val oldDefault = FeatureExprFactory.dflt
FeatureExprFactory.setDefault(FeatureExprFactory.bdd)
try {
//had serious problems with this file during type checking
val fmStream = getClass.getResourceAsStream("/other/approx.fm")
val f: FeatureExpr = new FeatureExprParser(FeatureExprFactory.dflt).parseFile(fmStream)
// val f: FeatureExpr = new FeatureExprParser(FeatureExprFactory.dflt).parseFile("other/approx.fm")
val fm = FeatureExprFactory.dflt.featureModelFactory.create(f and FeatureExprFactory.createDefinedExternal("CONFIG_BLK_DEV_RZ1000") and FeatureExprFactory.createDefinedExternal("CONFIG_IDE"))
parseFile("other/rz1000.pi", fm)
} finally {
FeatureExprFactory.setDefault(oldDefault)
}
}
} | ckaestne/TypeChef | CParser/src/test/scala/de/fosd/typechef/parser/c/ConsistencyTest.scala | Scala | lgpl-3.0 | 2,550 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.model
import akka.util.ByteString
import com.netflix.atlas.json.Json
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.infra.Blackhole
/**
* ```
* > jmh:run -prof gc -wi 10 -i 10 -f1 -t1 .*LwcMessagesParse.*
* ```
*
* Throughput:
*
* ```
* Benchmark Mode Cnt Score Error Units
* parseDatapoint thrpt 10 1138229.827 ± 24986.495 ops/s
* parseDatapointBatch thrpt 10 2151771.057 ± 54618.924 ops/s
* parseDatapointByteString thrpt 10 1334170.515 ± 9543.687 ops/s
* parseDatapointByteStringUTF8 thrpt 10 1026735.084 ± 6455.626 ops/s
* ```
*
* Allocations:
*
* ```
* Benchmark Mode Cnt Score Error Units
* parseDatapoint alloc 10 1680.000 ± 0.001 B/op
* parseDatapointBatch alloc 10 1528.000 ± 0.001 B/op
* parseDatapointByteString alloc 10 1976.000 ± 0.001 B/op
* parseDatapointByteStringUTF8 alloc 10 2176.000 ± 0.001 B/op
* ```
**/
@State(Scope.Thread)
class LwcMessagesParse {
private val tags = Map(
"nf.app" -> "atlas_backend",
"nf.cluster" -> "atlas_backend-dev",
"nf.node" -> "i-123456789",
"name" -> "jvm.gc.pause",
"statistic" -> "totalTime"
)
private val datapoint = LwcDatapoint(1234567890L, "i-12345", tags, 42.0)
private val json = Json.encode(datapoint)
private val bytes = ByteString(json)
private val batchBytes = LwcMessages.encodeBatch(Seq(datapoint))
@Benchmark
def parseDatapoint(bh: Blackhole): Unit = {
bh.consume(LwcMessages.parse(json))
}
@Benchmark
def parseDatapointByteString(bh: Blackhole): Unit = {
bh.consume(LwcMessages.parse(bytes))
}
@Benchmark
def parseDatapointByteStringUTF8(bh: Blackhole): Unit = {
bh.consume(LwcMessages.parse(bytes.utf8String))
}
@Benchmark
def parseDatapointBatch(bh: Blackhole): Unit = {
bh.consume(LwcMessages.parseBatch(batchBytes))
}
}
| Netflix/atlas | atlas-jmh/src/main/scala/com/netflix/atlas/eval/model/LwcMessagesParse.scala | Scala | apache-2.0 | 2,835 |
package org.au9ustine.puzzles.s99
import org.scalatest.{Matchers, FlatSpec}
/**
* Problem 14 Unit Testing
*
* Created by shaotch on 10/15/15.
*/
class P14$Test extends FlatSpec with Matchers {
"P14.duplicate" should "duplicate the elements of a list" in {
val lst = List('a, 'b, 'c, 'c, 'd)
val expected = List('a, 'a, 'b, 'b, 'c, 'c, 'c, 'c, 'd, 'd)
P14.duplicate(lst) should be (expected)
}
}
| au9ustine/org.au9ustine.puzzles.s99 | src/test/scala/org/au9ustine/puzzles/s99/P14$Test.scala | Scala | apache-2.0 | 420 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.{lang => jl}
import java.sql.{Date, Timestamp}
import scala.collection.mutable
import scala.util.Random
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogRelation, CatalogStatistics}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils}
import org.apache.spark.sql.test.SQLTestData.ArrayData
import org.apache.spark.sql.types._
/**
* End-to-end suite testing statistics collection and use on both entire table and columns.
*/
class StatisticsCollectionSuite extends StatisticsCollectionTestBase with SharedSQLContext {
import testImplicits._
private def checkTableStats(tableName: String, expectedRowCount: Option[Int])
: Option[CatalogStatistics] = {
val df = spark.table(tableName)
val stats = df.queryExecution.analyzed.collect { case rel: LogicalRelation =>
assert(rel.catalogTable.get.stats.flatMap(_.rowCount) === expectedRowCount)
rel.catalogTable.get.stats
}
assert(stats.size == 1)
stats.head
}
test("estimates the size of a limit 0 on outer join") {
withTempView("test") {
Seq(("one", 1), ("two", 2), ("three", 3), ("four", 4)).toDF("k", "v")
.createOrReplaceTempView("test")
val df1 = spark.table("test")
val df2 = spark.table("test").limit(0)
val df = df1.join(df2, Seq("k"), "left")
val sizes = df.queryExecution.analyzed.collect { case g: Join =>
g.stats(conf).sizeInBytes
}
assert(sizes.size === 1, s"number of Join nodes is wrong:\\n ${df.queryExecution}")
assert(sizes.head === BigInt(96),
s"expected exact size 96 for table 'test', got: ${sizes.head}")
}
}
test("analyze column command - unsupported types and invalid columns") {
val tableName = "column_stats_test1"
withTable(tableName) {
Seq(ArrayData(Seq(1, 2, 3), Seq(Seq(1, 2, 3)))).toDF().write.saveAsTable(tableName)
// Test unsupported data types
val err1 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS data")
}
assert(err1.message.contains("does not support statistics collection"))
// Test invalid columns
val err2 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS some_random_column")
}
assert(err2.message.contains("does not exist"))
}
}
test("test table-level statistics for data source table") {
val tableName = "tbl"
withTable(tableName) {
sql(s"CREATE TABLE $tableName(i INT, j STRING) USING parquet")
Seq(1 -> "a", 2 -> "b").toDF("i", "j").write.mode("overwrite").insertInto(tableName)
// noscan won't count the number of rows
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS noscan")
checkTableStats(tableName, expectedRowCount = None)
// without noscan, we count the number of rows
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS")
checkTableStats(tableName, expectedRowCount = Some(2))
}
}
test("SPARK-15392: DataFrame created from RDD should not be broadcasted") {
val rdd = sparkContext.range(1, 100).map(i => Row(i, i))
val df = spark.createDataFrame(rdd, new StructType().add("a", LongType).add("b", LongType))
assert(df.queryExecution.analyzed.stats(conf).sizeInBytes >
spark.sessionState.conf.autoBroadcastJoinThreshold)
assert(df.selectExpr("a").queryExecution.analyzed.stats(conf).sizeInBytes >
spark.sessionState.conf.autoBroadcastJoinThreshold)
}
test("column stats round trip serialization") {
// Make sure we serialize and then deserialize and we will get the result data
val df = data.toDF(stats.keys.toSeq :+ "carray" : _*)
stats.zip(df.schema).foreach { case ((k, v), field) =>
withClue(s"column $k with type ${field.dataType}") {
val roundtrip = ColumnStat.fromMap("table_is_foo", field, v.toMap(k, field.dataType))
assert(roundtrip == Some(v))
}
}
}
test("analyze column command - result verification") {
// (data.head.productArity - 1) because the last column does not support stats collection.
assert(stats.size == data.head.productArity - 1)
val df = data.toDF(stats.keys.toSeq :+ "carray" : _*)
checkColStats(df, stats)
}
test("column stats collection for null columns") {
val dataTypes: Seq[(DataType, Int)] = Seq(
BooleanType, ByteType, ShortType, IntegerType, LongType,
DoubleType, FloatType, DecimalType.SYSTEM_DEFAULT,
StringType, BinaryType, DateType, TimestampType
).zipWithIndex
val df = sql("select " + dataTypes.map { case (tpe, idx) =>
s"cast(null as ${tpe.sql}) as col$idx"
}.mkString(", "))
val expectedColStats = dataTypes.map { case (tpe, idx) =>
(s"col$idx", ColumnStat(0, None, None, 1, tpe.defaultSize.toLong, tpe.defaultSize.toLong))
}
checkColStats(df, mutable.LinkedHashMap(expectedColStats: _*))
}
test("number format in statistics") {
val numbers = Seq(
BigInt(0) -> ("0.0 B", "0"),
BigInt(100) -> ("100.0 B", "100"),
BigInt(2047) -> ("2047.0 B", "2.05E+3"),
BigInt(2048) -> ("2.0 KB", "2.05E+3"),
BigInt(3333333) -> ("3.2 MB", "3.33E+6"),
BigInt(4444444444L) -> ("4.1 GB", "4.44E+9"),
BigInt(5555555555555L) -> ("5.1 TB", "5.56E+12"),
BigInt(6666666666666666L) -> ("5.9 PB", "6.67E+15"),
BigInt(1L << 10 ) * (1L << 60) -> ("1024.0 EB", "1.18E+21"),
BigInt(1L << 11) * (1L << 60) -> ("2.36E+21 B", "2.36E+21")
)
numbers.foreach { case (input, (expectedSize, expectedRows)) =>
val stats = Statistics(sizeInBytes = input, rowCount = Some(input))
val expectedString = s"sizeInBytes=$expectedSize, rowCount=$expectedRows," +
s" isBroadcastable=${stats.isBroadcastable}"
assert(stats.simpleString == expectedString)
}
}
}
/**
* The base for test cases that we want to include in both the hive module (for verifying behavior
* when using the Hive external catalog) as well as in the sql/core module.
*/
abstract class StatisticsCollectionTestBase extends QueryTest with SQLTestUtils {
import testImplicits._
private val dec1 = new java.math.BigDecimal("1.000000000000000000")
private val dec2 = new java.math.BigDecimal("8.000000000000000000")
private val d1 = Date.valueOf("2016-05-08")
private val d2 = Date.valueOf("2016-05-09")
private val t1 = Timestamp.valueOf("2016-05-08 00:00:01")
private val t2 = Timestamp.valueOf("2016-05-09 00:00:02")
/**
* Define a very simple 3 row table used for testing column serialization.
* Note: last column is seq[int] which doesn't support stats collection.
*/
protected val data = Seq[
(jl.Boolean, jl.Byte, jl.Short, jl.Integer, jl.Long,
jl.Double, jl.Float, java.math.BigDecimal,
String, Array[Byte], Date, Timestamp,
Seq[Int])](
(false, 1.toByte, 1.toShort, 1, 1L, 1.0, 1.0f, dec1, "s1", "b1".getBytes, d1, t1, null),
(true, 2.toByte, 3.toShort, 4, 5L, 6.0, 7.0f, dec2, "ss9", "bb0".getBytes, d2, t2, null),
(null, null, null, null, null, null, null, null, null, null, null, null, null)
)
/** A mapping from column to the stats collected. */
protected val stats = mutable.LinkedHashMap(
"cbool" -> ColumnStat(2, Some(false), Some(true), 1, 1, 1),
"cbyte" -> ColumnStat(2, Some(1.toByte), Some(2.toByte), 1, 1, 1),
"cshort" -> ColumnStat(2, Some(1.toShort), Some(3.toShort), 1, 2, 2),
"cint" -> ColumnStat(2, Some(1), Some(4), 1, 4, 4),
"clong" -> ColumnStat(2, Some(1L), Some(5L), 1, 8, 8),
"cdouble" -> ColumnStat(2, Some(1.0), Some(6.0), 1, 8, 8),
"cfloat" -> ColumnStat(2, Some(1.0f), Some(7.0f), 1, 4, 4),
"cdecimal" -> ColumnStat(2, Some(Decimal(dec1)), Some(Decimal(dec2)), 1, 16, 16),
"cstring" -> ColumnStat(2, None, None, 1, 3, 3),
"cbinary" -> ColumnStat(2, None, None, 1, 3, 3),
"cdate" -> ColumnStat(2, Some(DateTimeUtils.fromJavaDate(d1)),
Some(DateTimeUtils.fromJavaDate(d2)), 1, 4, 4),
"ctimestamp" -> ColumnStat(2, Some(DateTimeUtils.fromJavaTimestamp(t1)),
Some(DateTimeUtils.fromJavaTimestamp(t2)), 1, 8, 8)
)
private val randomName = new Random(31)
/**
* Compute column stats for the given DataFrame and compare it with colStats.
*/
def checkColStats(
df: DataFrame,
colStats: mutable.LinkedHashMap[String, ColumnStat]): Unit = {
val tableName = "column_stats_test_" + randomName.nextInt(1000)
withTable(tableName) {
df.write.saveAsTable(tableName)
// Collect statistics
sql(s"analyze table $tableName compute STATISTICS FOR COLUMNS " +
colStats.keys.mkString(", "))
// Validate statistics
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
assert(table.stats.isDefined)
assert(table.stats.get.colStats.size == colStats.size)
colStats.foreach { case (k, v) =>
withClue(s"column $k") {
assert(table.stats.get.colStats(k) == v)
}
}
}
}
// This test will be run twice: with and without Hive support
test("SPARK-18856: non-empty partitioned table should not report zero size") {
withTable("ds_tbl", "hive_tbl") {
spark.range(100).select($"id", $"id" % 5 as "p").write.partitionBy("p").saveAsTable("ds_tbl")
val stats = spark.table("ds_tbl").queryExecution.optimizedPlan.stats(conf)
assert(stats.sizeInBytes > 0, "non-empty partitioned table should not report zero size.")
if (spark.conf.get(StaticSQLConf.CATALOG_IMPLEMENTATION) == "hive") {
sql("CREATE TABLE hive_tbl(i int) PARTITIONED BY (j int)")
sql("INSERT INTO hive_tbl PARTITION(j=1) SELECT 1")
val stats2 = spark.table("hive_tbl").queryExecution.optimizedPlan.stats(conf)
assert(stats2.sizeInBytes > 0, "non-empty partitioned table should not report zero size.")
}
}
}
// This test will be run twice: with and without Hive support
test("conversion from CatalogStatistics to Statistics") {
withTable("ds_tbl", "hive_tbl") {
// Test data source table
checkStatsConversion(tableName = "ds_tbl", isDatasourceTable = true)
// Test hive serde table
if (spark.conf.get(StaticSQLConf.CATALOG_IMPLEMENTATION) == "hive") {
checkStatsConversion(tableName = "hive_tbl", isDatasourceTable = false)
}
}
}
private def checkStatsConversion(tableName: String, isDatasourceTable: Boolean): Unit = {
// Create an empty table and run analyze command on it.
val createTableSql = if (isDatasourceTable) {
s"CREATE TABLE $tableName (c1 INT, c2 STRING) USING PARQUET"
} else {
s"CREATE TABLE $tableName (c1 INT, c2 STRING)"
}
sql(createTableSql)
// Analyze only one column.
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS c1")
val (relation, catalogTable) = spark.table(tableName).queryExecution.analyzed.collect {
case catalogRel: CatalogRelation => (catalogRel, catalogRel.tableMeta)
case logicalRel: LogicalRelation => (logicalRel, logicalRel.catalogTable.get)
}.head
val emptyColStat = ColumnStat(0, None, None, 0, 4, 4)
// Check catalog statistics
assert(catalogTable.stats.isDefined)
assert(catalogTable.stats.get.sizeInBytes == 0)
assert(catalogTable.stats.get.rowCount == Some(0))
assert(catalogTable.stats.get.colStats == Map("c1" -> emptyColStat))
// Check relation statistics
assert(relation.stats(conf).sizeInBytes == 0)
assert(relation.stats(conf).rowCount == Some(0))
assert(relation.stats(conf).attributeStats.size == 1)
val (attribute, colStat) = relation.stats(conf).attributeStats.head
assert(attribute.name == "c1")
assert(colStat == emptyColStat)
}
}
| MLnick/spark | sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala | Scala | apache-2.0 | 12,855 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.indexer.lucene
import org.apache.lucene.document.Document
trait DocumentRecovery[T] {
def toEntity(d: Document): T
}
| j-mckitrick/ensime-sbt | src/sbt-test/ensime-sbt/ensime-server/core/src/main/scala/org/ensime/indexer/lucene/DocumentRecovery.scala | Scala | apache-2.0 | 269 |
package com.thetestpeople.trt.webdriver.screens
import play.api.test.TestBrowser
import org.openqa.selenium.WebDriver
import com.thetestpeople.trt.webdriver.screens.RichSelenium._
import org.openqa.selenium.By._
import org.openqa.selenium.WebElement
import com.thetestpeople.trt.utils.Utils
import org.openqa.selenium.By
object SearchLogScreen {
val QueryFieldLocator = By.id("query-field")
}
class SearchLogsScreen(implicit automationContext: AutomationContext) extends AbstractScreen with HasMainMenu {
import SearchLogScreen._
webDriver.waitForDisplayedAndEnabled(id("page-SearchLogs"))
def query: String =
webDriver.waitForDisplayedAndEnabled(QueryFieldLocator).getAttribute("value")
def query_=(text: String) = {
log(s"Set search query: text")
val elem = webDriver.waitForDisplayedAndEnabled(QueryFieldLocator)
elem.clear()
elem.sendKeys(text)
}
def clickSearch() {
log("Click 'Search'")
webDriver.waitForDisplayedAndEnabled(By.id("search-button")).click()
}
def executionRows: Seq[ExecutionRow] = {
val executionRowElements = webDriver.findElements_(cssSelector("tr.execution-row"))
val fragmentRowElements = webDriver.findElements_(cssSelector("tr.fragment-row"))
for (((rowElement, fragmentElement), index) ← executionRowElements.zip(fragmentRowElements).zipWithIndex)
yield ExecutionRow(rowElement, fragmentElement, index)
}
case class ExecutionRow(rowElement: WebElement, fragmentElement: WebElement, index: Int) {
private def ordinal = Utils.ordinalName(index + 1)
def fragmentText: String = fragmentElement.getText
}
} | thetestpeople/trt | test/com/thetestpeople/trt/webdriver/screens/SearchLogsScreen.scala | Scala | mit | 1,627 |
package ee.cone.c4actor.dep
trait DepAskFactoryApp {
def depAskFactory: DepAskFactory
}
| conecenter/c4proto | extra_lib/src/main/scala/ee/cone/c4actor/dep/DepAskFactoryApp.scala | Scala | apache-2.0 | 91 |
package mesosphere.marathon.integration.setup
import java.lang.management.ManagementFactory
import org.eclipse.jetty.server.{ Request, Server }
import org.eclipse.jetty.server.handler.AbstractHandler
import javax.servlet.http.{ HttpServletResponse, HttpServletRequest }
import akka.actor.ActorSystem
import spray.client.pipelining._
import scala.concurrent.Await._
import scala.concurrent.duration._
class AppMock(appId: String, version: String, url: String) extends AbstractHandler {
import mesosphere.util.ThreadPoolContext.context
implicit val system = ActorSystem()
val pipeline = sendReceive
val waitTime = 30.seconds
val processId = ManagementFactory.getRuntimeMXBean.getName
def start(port: Int) {
val server = new Server(port)
server.setHandler(this)
server.start()
println(s"AppMock[$appId $version]: has taken the stage at port $port. Will query $url for health status.")
server.join()
println(s"AppMock[$appId $version]: says goodbye")
}
override def handle(target: String,
baseRequest: Request,
request: HttpServletRequest,
response: HttpServletResponse): Unit = {
val res = result(pipeline(Get(url)), waitTime)
println(s"AppMock[$appId $version]: current health is $res")
response.setStatus(res.status.intValue)
baseRequest.setHandled(true)
response.getWriter.print(res.entity.asString)
}
}
object AppMock {
def main(args: Array[String]) {
val port = sys.env("PORT0").toInt
val appId = args(0)
val version = args(1)
val url = args(2) + "/" + port
new AppMock(appId, version, url).start(port)
}
}
| 14Zen/marathon | src/test/scala/mesosphere/marathon/integration/setup/AppMock.scala | Scala | apache-2.0 | 1,670 |
package fpinscala.errorhandling
import org.scalatest._
import scala.util.Try
class OptionTest extends FlatSpec with Matchers {
"The Option trait" should "apply f if the Option is not None." in {
val someInteger = Some(2)
val someIntegerMultiplyBy2 = someInteger.map(element => element * 2)
someIntegerMultiplyBy2 should be(Some(4))
val none = None
val noneMultiplyBy2 = none.map(element => element.toString)
noneMultiplyBy2 should be(None)
}
it should "returns the result inside the Some case of the Option, or if the Option is None, returns the given default value" in {
val someInteger = Some(3)
val someIntegerGetOrElse = someInteger.getOrElse(4)
someIntegerGetOrElse should be(3)
val someNone = None
val someNoneGetOrElse = someNone.getOrElse(4)
someNoneGetOrElse should be(4)
}
it should "apply f, which may fail, to the Option if not None." in {
val someInteger = Some(2)
val someIntegerMultiplyBy2 = someInteger.flatMap(element => Some(element * 2))
someIntegerMultiplyBy2 should be(Some(4))
val none = None
val noneMultiplyBy2 = none.flatMap(element => Some(element.toString))
noneMultiplyBy2 should be(None)
}
it should "returns the first Option if it’s defined; otherwise, it returns the second Option" in {
val someInteger = Some(5)
val orElse = someInteger.orElse(Some(4))
orElse should be(Some(5))
val someNone = None
val orElseNone = someNone.orElse(Some(2))
orElseNone should be(Some(2))
}
it should "convert Some to None if the value doesn’t satisfy f." in {
val someInteger = Some(5)
val someIntegerFilteredTrue = someInteger.filter(element => element > 2)
val someIntegerFilteredFalse = someInteger.filter(element => element > 10)
someIntegerFilteredTrue should be(Some(5))
someIntegerFilteredFalse should be(None)
}
it should "work out the variance" in {
val someSequence = Seq[Double](600, 470, 170, 430, 300)
val variance = Option.variance(someSequence)
variance should be(Some(21704))
}
it should "combine two Option values using a binary function, If either Option value is None, then the return value is too" in {
def addTwoInteger(integer1: Int, integer2: Int): Double = 2.0
val someValue = Some(3)
val noneValue = None
val optionsCombined = Option.map2(someValue, noneValue)(addTwoInteger)
optionsCombined should be(None)
val someValue2 = Some(3)
val noneValue2 = Some(4)
val optionsCombined2 = Option.map2(someValue2, noneValue2)(addTwoInteger)
optionsCombined2 should be(Some(2.0))
}
it should "combine a list of Options into one Option containing a list of all the Some values in the original list" in {
val listWithNone = List(Some(3), None, Some(1))
val sequenceListWithNone = Option.sequence(listWithNone)
sequenceListWithNone should be(None)
val listWithSomes = List(Some(3), Some(1), Some(2))
val sequenceListWithoutNones = Option.sequence(listWithSomes)
sequenceListWithoutNones should be(Some(List(3, 1, 2)))
}
it should "traverse a list and apply a function" in {
/*val listWithNone = List("1", "2", "hola")
val traverseListWithNone = Option.traverse[String, Int](listWithNone)(i => Some(i.toInt))
traverseListWithNone should be(None)*/
}
}
| ardlema/fpinscala | exercises/src/test/scala/fpinscala/errorhandling/OptionTest.scala | Scala | mit | 3,350 |
package com.sopranoworks.bolt
import org.joda.time.DateTime
import org.specs2.mutable.Specification
class TimestampParserTest extends Specification {
"parse timestamp" should {
"normally success" in{
val (tm,n) = TimestampParser("2014-09-27 12:30:00.45 America/Los_Angeles")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
tm.getHourOfDay must_== 12
tm.getMinuteOfHour must_== 30
tm.getSecondOfMinute must_== 0
tm.getMillisOfSecond must_== 450
}
"with zone suffix" in {
val (tm,n) = TimestampParser("2014-09-27 12:30:00.45-08")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
tm.getHourOfDay must_== 12
tm.getMinuteOfHour must_== 30
tm.getSecondOfMinute must_== 0
tm.getMillisOfSecond must_== 450
}
"with ISO format" in {
val (tm,n) = TimestampParser("2014-09-27T12:30:00.45-08")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
tm.getHourOfDay must_== 12
tm.getMinuteOfHour must_== 30
tm.getSecondOfMinute must_== 0
tm.getMillisOfSecond must_== 450
}
"with ISO format2" in {
val (tm,n) = TimestampParser("2014-09-27T12:30:00.45Z")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
tm.getHourOfDay must_== 12
tm.getMinuteOfHour must_== 30
tm.getSecondOfMinute must_== 0
tm.getMillisOfSecond must_== 450
}
"only Date" in {
val (tm,n) = TimestampParser("2014-09-27")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
}
"date and timezone" in {
val (tm,n) = TimestampParser("2014-09-27 America/Los_Angeles")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
}
"hour only" in{
val (tm,n) = TimestampParser("2014-09-27 12")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
tm.getHourOfDay must_== 12
}
"hour and minute" in{
val (tm,n) = TimestampParser("2014-09-27 12:30")
tm.getYear must_== 2014
tm.getMonthOfYear must_== 9
tm.getDayOfMonth must_== 27
tm.getHourOfDay must_== 12
tm.getMinuteOfHour must_== 30
}
}
}
| OsamuTakahashi/bolt | src/test/scala/com/sopranoworks/bolt/TimestampParserTest.scala | Scala | mit | 2,460 |
/*
* Copyright (C) 2005 - 2019 Schlichtherle IT Services.
* All rights reserved. Use is subject to license terms.
*/
package global.namespace.truelicense.tests.v2.xml
import global.namespace.truelicense.tests.core.LicenseConsumerPerformance
object V2XmlLicenseConsumerPerformance extends LicenseConsumerPerformance with V2XmlTestContext {
def main(args: Array[String]): Unit = run()
}
| christian-schlichtherle/truelicense | tests/src/test/scala/global/namespace/truelicense/tests/v2/xml/V2XmlLicenseConsumerPerformance.scala | Scala | apache-2.0 | 393 |
package dk.bayes.math.numericops
trait isIdentical[X] {
def apply(x1:X,x2:X,tolerance:Double):Boolean
} | danielkorzekwa/bayes-scala | src/main/scala/dk/bayes/math/numericops/isIdentical.scala | Scala | bsd-2-clause | 107 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.util.internal
import org.junit.Test
import org.junit.Assert._
class InterpolatorTest {
import Interpolator._
val args: Seq[String] = Array("email", "User", "null")
@Test
def string_with_no_internpolation_variables() {
assertEquals("is required", interpolateNumericVars("is required", Nil))
assertEquals("is required", interpolateVars("is required", Nil))
}
@Test
def string_with_numeric_internpolation_variables() {
assertEquals("email in User cannot be null", interpolateNumericVars("{1} in {2} cannot be {3}", args))
assertEquals("emailUsernull", interpolateNumericVars("{1}{2}{3}", args))
assertEquals(" null User email ", interpolateNumericVars(" {3} {2} {1} ", args))
assertEquals("error:email is null", interpolateNumericVars("error:{1} is {3}", args))
}
@Test
def simple_interpolation() {
assertEquals("/owner/12/", interpolateVars("/owner/{ownerId}/", Seq(12)))
assertEquals("/owner/12/pet/3", interpolateVars("/owner/{ownerId}/pet/{petId}", Seq(12, 3)))
}
@Test
def escape_interpolation_variables() {
assertEquals("email in User cannot be {null}", interpolateNumericVars("{1} in {2} cannot be \\\\{null}", args))
assertEquals("email in User cannot be {null}", interpolateVars("{1} in {2} cannot be \\\\{null}", args))
}
} | sptz45/coeus | src/test/scala/com/tzavellas/coeus/util/internal/InterpolatorTest.scala | Scala | apache-2.0 | 1,477 |
package org.dsa.iot.rx.core
import org.dsa.iot.rx.RxMerger3
/**
* Combines three Observables into a single Observable of Tuple3, emitting a new tuple each time any
* of the sources emits a new item.
*
* @see <a href="http://reactivex.io/documentation/operators/combinelatest.html">ReactiveX operators documentation: CombineLatest</a>
*/
class CombineLatest3[T1, T2, T3] extends RxMerger3[T1, T2, T3, (T1, T2, T3)] {
protected def compute = source1.in combineLatest source2.in combineLatest source3.in map {
case ((i1, i2), i3) => (i1, i2, i3)
}
}
/**
* Factory for [[CombineLatest3]] instances.
*/
object CombineLatest3 {
/**
* Creates a new CombineLatest3 instance.
*/
def apply[T1, T2, T3]: CombineLatest3[T1, T2, T3] = new CombineLatest3[T1, T2, T3]
} | IOT-DSA/dslink-scala-ignition | src/main/scala/org/dsa/iot/rx/core/CombineLatest3.scala | Scala | apache-2.0 | 784 |
package breeze
package object polynomial extends DensePolynomial {
}
| wavelets/breeze | src/main/scala/breeze/polynomial/package.scala | Scala | apache-2.0 | 70 |
package net.sansa_stack.inference.rules.minimizer
import scala.collection.mutable.{ArrayBuffer, Buffer}
import scalax.collection.Graph
import scalax.collection.edge.LDiEdge
import scala.collection.JavaConverters._
import scala.collection.mutable
import scalax.collection.GraphTraversal.Parameters
import org.apache.jena.graph.{Node, NodeFactory}
import org.apache.jena.reasoner.TriplePattern
import org.apache.jena.reasoner.rulesys.Rule
import org.jgrapht.alg.CycleDetector
import org.jgrapht.alg.cycle.TarjanSimpleCycles
import net.sansa_stack.inference.rules.RuleDependencyGraph
import net.sansa_stack.inference.rules.RuleDependencyGraphGenerator.{asString, debug, sameElements}
import net.sansa_stack.inference.utils.{GraphUtils, RuleUtils}
import net.sansa_stack.inference.utils.graph.LabeledEdge
import net.sansa_stack.inference.utils.RuleUtils._
import scalax.collection.GraphTraversal.Parameters
import scalax.collection._
import scalax.collection.edge.Implicits._
import scalax.collection.edge._
import scalax.collection.mutable.DefaultGraphImpl
import scalax.collection.GraphPredef._
import scalax.collection.GraphEdge._
/**
* @author Lorenz Buehmann
*/
abstract class RuleDependencyGraphMinimizer extends MinimizationRuleExecutor {
def batches: Seq[Batch] = Seq(
Batch("Default Minimization", Once,
RemoveLoops,
RemoveEdgesWithCycleOverTCNode,
RemoveEdgesWithPredicateAlreadyTC,
RemoveEdgeIfLongerPathToSameNodeExists,
RemoveCyclesInBothDirections
// RemoveCyclesIfPredicateIsTC,
))
object RemoveLoops extends MinimizationRule {
def apply(graph: RuleDependencyGraph): RuleDependencyGraph = {
debug("removing non-TC loops")
var edges2Remove = Seq[Graph[Rule, LDiEdge]#EdgeT]()
graph.nodes.toSeq.foreach(node => {
debug(s"node " + node.value.getName)
val loopEdge = node.outgoing.find(_.target == node)
if (loopEdge.isDefined) {
val edge = loopEdge.get
val rule = node.value
val isTC = RuleUtils.isTransitiveClosure(rule, edge.label.asInstanceOf[TriplePattern].getPredicate)
if (!isTC) {
edges2Remove :+= edge
debug(s"loop of node $node")
}
}
})
val newNodes = graph.nodes.map(node => node.value)
val newEdges = graph.edges.clone().filterNot(e => edges2Remove.contains(e)).map(edge => edge.toOuter)
new RuleDependencyGraph(newNodes, newEdges)
}
}
object RemoveCyclesIfPredicateIsTC extends MinimizationRule {
def apply(graph: RuleDependencyGraph): RuleDependencyGraph = {
debug("removeCyclesIfPredicateIsTC")
var redundantEdges = Seq[Graph[Rule, LDiEdge]#EdgeT]()
// for each node n in G
graph.nodes.toSeq.foreach(node => {
val rule = node.value
// we only handle cyclic rules
if(node.innerEdgeTraverser.exists(e => e.source == node && e.target == node)) {
debug("#" * 20)
debug(s"NODE:${node.value.getName}")
debug(s"Rule:${node.value}")
val bodyTPs = rule.bodyTriplePatterns()
val headTPs = rule.headTriplePatterns()
// for now we assume only 1 TP in head
if(headTPs.size > 1) {
throw new RuntimeException("Rules with more than 1 triple pattern in head not supported yet!")
}
val head = headTPs.head
// transform to graph
val ruleGraph = RuleUtils.asGraph(rule)
val subjectNode = ruleGraph.get(head.getSubject)
val objectNode = ruleGraph.get(head.getObject)
val headEdge = subjectNode.innerEdgeTraverser.filter(e => e.target == objectNode && e.label == head.getPredicate).head
// check if there is a path in body from the same subject to the same object
val pathOpt = subjectNode.withSubgraph(edges = !_.equals(headEdge)) pathTo objectNode
debug(pathOpt.toString)
// check if there is some other triple pattern in body
if(pathOpt.isDefined) {
val path = pathOpt.get
val predicateOpt: Option[Node] = path.length match {
case 1 =>
val p1 = path.edges.head.label.asInstanceOf[Node]
val p2 = headEdge.label.asInstanceOf[Node]
val p1Node = ruleGraph.get(p1)
val p2Node = ruleGraph.get(p2)
val pEdge = ruleGraph.edges.filter(e => e.source == p1Node && e.target == p2Node).head
Some(pEdge.label.asInstanceOf[Node])
case 2 =>
val otherEdges = path.edges.filterNot(e => e.label == headEdge.label)
if(otherEdges.nonEmpty) {
Some(otherEdges.head.label.asInstanceOf[Node])
} else {
None
}
case _ => None
}
if(predicateOpt.isDefined) {
val predicate = predicateOpt.get
debug(s"Predicate:$predicate")
// check if predicate TC will be materialized before in the RDG
val tcMaterialized = node.innerNodeTraverser.filter(n => {
n.value.headTriplePatterns().exists(tp => tp.getPredicate.matches(predicate)) &&
(n.innerEdgeTraverser.exists(e => e == LDiEdge(n, n)(predicate)) || n.findCycle.isDefined)
})
if(tcMaterialized.nonEmpty) {
debug(s"$predicate already materialized in node(s) ${tcMaterialized.map(n => n.value.getName)}")
val edge = node.innerEdgeTraverser.filter(e =>
e.source == node &&
e.target == node &&
e.label.asInstanceOf[TriplePattern].equals(head)
).head
// val edge = (node ~+> node)(head)
redundantEdges +:= edge
debug(s"remove edge $edge")
}
}
}
}
})
val newNodes = graph.nodes.map(node => node.value)
val newEdges = graph.edges.clone().filterNot(e => redundantEdges.contains(e)).map(edge => edge.toOuter)
new RuleDependencyGraph(newNodes, newEdges)
}
}
object RemoveEdgesWithPredicateAlreadyTC extends MinimizationRule {
// get all nodes that depend on a TC node for a predicate p and another node for p
def apply(graph: RuleDependencyGraph): RuleDependencyGraph = {
debug("removeEdgesWithPredicateAlreadyTC")
var redundantEdges = Seq[Graph[Rule, LDiEdge]#EdgeT]()
// for each node n in G
graph.nodes.toSeq.foreach(node => {
debug("#" * 20)
debug(s"NODE:${node.value.getName}")
// check for nodes that do compute the TC
val outgoingEdges = node.outgoing.withFilter(e => e.target != node)
outgoingEdges.foreach(e => {
val targetNode = e.target
val rule = targetNode.value
val edgeLabel = e.label
val predicate = edgeLabel.asInstanceOf[TriplePattern].getPredicate
// check if the target node computes the TC for the current edge predicate
val isTCNode = RuleUtils.isTransitiveClosure(rule, predicate)
debug(s"Direct successor:${rule.getName}\\t\\tisTC = $isTCNode")
// if it depends on a TC node
if(isTCNode) {
// check for dependency on other nodes that produce the same predicate
val samePredicateEdges = outgoingEdges
.withFilter(e2 => e != e2)
.withFilter(e2 => e2.label.asInstanceOf[TriplePattern].getPredicate.matches(predicate))
debug(s"Redundant edges:${samePredicateEdges.map(e => e.toOuter.source.value.getName + "->" + e.toOuter.target.value.getName)}")
redundantEdges ++:= samePredicateEdges
}
})
})
val newNodes = graph.nodes.map(node => node.value)
val newEdges = graph.edges.clone().filterNot(e => redundantEdges.contains(e)).map(edge => edge.toOuter)
new RuleDependencyGraph(newNodes, newEdges)
}
}
object RemoveEdgesWithCycleOverTCNode extends MinimizationRule {
// for cycles x -p-> y -p-> z -s-> x with y being TC node for p, we can remove edge (z -s-> x)
def apply(graph: RuleDependencyGraph): RuleDependencyGraph = {
debug("removeEdgesWithCycleOverTCNode")
var redundantEdges = Seq[Graph[Rule, LDiEdge]#EdgeT]()
// convert to JGraphT graph for algorithms not contained in Scala Graph API
val g = GraphUtils.asJGraphtRuleSetGraph(graph)
// get cycles of length 3
val cycleDetector = new TarjanSimpleCycles[Rule, LabeledEdge[Rule, TriplePattern]](g)
val cycles = cycleDetector.findSimpleCycles().asScala.filter(c => c.size() == 3)
cycles.foreach(c => {
debug(s"cycle:$c")
val pathNodes = c.asScala.map(n => graph get n)
// get nodes that are TC with same in and out edge
val anchorNodes = pathNodes.filter(n => {
// in and out edge with same predicate
val inPred = n.incoming.head.label.asInstanceOf[TriplePattern].getPredicate
val outPred = n.outgoing.head.label.asInstanceOf[TriplePattern].getPredicate
inPred.matches(outPred) && RuleUtils.isTransitiveClosure(n.value, inPred)
})
if(anchorNodes.size == 1) {
val anchor = anchorNodes.head
// remove edge between two other nodes
val edge = pathNodes.indexOf(anchor) match {
case 0 => pathNodes(1).outgoing.filter(e => e.target == pathNodes(2)).head
case 1 => pathNodes(2).outgoing.filter(e => e.target == pathNodes(0)).head
case 2 => pathNodes(0).outgoing.filter(e => e.target == pathNodes(1)).head
}
debug(s"Redundant edge:${edge}")
redundantEdges +:= edge
}
})
val newNodes = graph.nodes.map(node => node.value)
val newEdges = graph.edges.clone().filterNot(e => redundantEdges.contains(e)).map(edge => edge.toOuter)
new RuleDependencyGraph(newNodes, newEdges)
}
}
object RemoveCyclesInBothDirections extends MinimizationRule {
override def apply(graph: RuleDependencyGraph): RuleDependencyGraph = {
debug("removing redundant cycles")
var edges2Remove = collection.mutable.Set[Graph[Rule, LDiEdge]#EdgeT]()
graph.findCycle
// convert to JGraphT graph for algorithms not contained in Scala Graph API
val g = GraphUtils.asJGraphtRuleSetGraph(graph)
val cycleDetector = new CycleDetector[Rule, LabeledEdge[Rule, TriplePattern]](g)
val cycleDetector2 = new TarjanSimpleCycles[Rule, LabeledEdge[Rule, TriplePattern]](g)
val allCycles = cycleDetector2.findSimpleCycles()
graph.nodes.toSeq.foreach(node => {
debug(s"NODE ${node.value.getName}")
// get cycles of length 3
// val cycles = cycleDetector.findCyclesContainingVertex(node.value)
// debug(cycles.asScala.mkString(","))
// cycles that contain the current node
val cyclesWithNode: mutable.Buffer[mutable.Buffer[Rule]] = allCycles.asScala
.filter(cycle => cycle.contains(node.value))
.map(cycle => cycle.asScala)
debug("Cycles: " + cyclesWithNode.map(c => c.map(r => r.getName)).mkString(","))
// cycles that use the same property
val cyclesWithNodeSameProp: Map[Node, scala.List[mutable.Buffer[graph.EdgeT]]] = cyclesWithNode.map(cycle => {
debug("Cycle: " + cycle.map(r => r.getName).mkString(", "))
// pairs of rules (r1, r2)
var pairsOfRules = cycle zip cycle.tail
pairsOfRules :+= (cycle.last, cycle(0))
// map to list of edges
val edges: mutable.Buffer[graph.EdgeT] = pairsOfRules.flatMap(e => {
val node1 = graph get e._1
val node2 = graph get e._2
node1.outgoing.filter(_.target == node2)
})
debug("Edges: " + edges.mkString(", "))
// map to edge labels, i.e. the predicates
var predicates = edges.map(_.label.asInstanceOf[TriplePattern].getPredicate)
if(predicates.forall(_.isVariable)) predicates = ArrayBuffer(NodeFactory.createVariable("p"))
debug("predicates:" + predicates)
// return predicate if it's commonly used for all edges
val samePred = predicates.size == 1
if (samePred) Some(predicates(0), edges) else None
}).filter(_.isDefined).map(_.get).groupBy(e => e._1).mapValues(e => e.map(x => x._2).toList)
var removedCycles: collection.mutable.Set[mutable.Buffer[graph.EdgeT]] = collection.mutable.Set()
val tmp: Map[Node, Map[Int, List[mutable.Buffer[graph.EdgeT]]]] =
cyclesWithNodeSameProp
.mapValues(value =>
value.map(cycle => (cycle.size, cycle))
.groupBy(_._1)
.mapValues(e => e.map(x => x._2)))
tmp.foreach(predicate2Cycles => {
debug("predicate: " + predicate2Cycles._1)
predicate2Cycles._2.foreach(entry => {
debug(s"length ${entry._1}")
val prop2Cycle = entry._2
var pairsOfCycles = prop2Cycle zip prop2Cycle.tail
pairsOfCycles.foreach(pair => {
val cycle1 = pair._1
val cycle2 = pair._2
val cycle1Nodes = cycle1.map(_.source).toSet
val cycle2Nodes = cycle2.map(_.source).toSet
debug(cycle1Nodes.map(_.value.getName).mkString(", ") + " ???? " + cycle2Nodes.map(_.value.getName).mkString(", "))
// check if both cycles contain the same nodes
if(cycle1Nodes == cycle2Nodes) {
debug("redundant cycle " + pair._1.map(_.source.value.getName))
// we can remove cycle1 if cycle2 wasn't removed before
if (!removedCycles.exists(c => sameElements(c, cycle2))) {
removedCycles += cycle1
}
}
})
})
})
removedCycles.map(c => c.map(_.asInstanceOf[Graph[Rule, LDiEdge]#EdgeT])).foreach(c =>
{
edges2Remove ++= c
})
})
val newNodes = graph.nodes.map(node => node.value)
val newEdges = graph.edges.clone().filterNot(e => edges2Remove.contains(e)).map(edge => edge.toOuter)
new RuleDependencyGraph(newNodes, newEdges)
}
override def debug(msg: => String): Unit = println(msg)
}
object RemoveEdgeIfLongerPathToSameNodeExists extends MinimizationRule {
def apply(graph: RuleDependencyGraph): RuleDependencyGraph = {
var redundantEdges = Seq[Graph[Rule, LDiEdge]#EdgeT]()
// graph.outerNodeTraverser.foreach(n => println(n))
// for each node n in G
graph.nodes.toSeq.foreach(node => {
debug("#" * 20)
debug(s"NODE:${node.value.getName}")
// get all direct successors
var successors = node.innerNodeTraverser.withParameters(Parameters(maxDepth = 1)).toList
// remove node itself, if it's a cyclic node
successors = successors.filterNot(_.equals(node))
debug(s"DIRECT SUCCESSORS:${successors.map(n => n.value.getName).mkString(", ")}")
if (successors.size > 1) {
// get pairs of successors
val pairs = successors zip successors.tail
pairs.foreach(pair => {
debug(s"PAIR:${pair._1.value.getName},${pair._2.value.getName}")
val n1 = pair._1
val edge1 = node.innerEdgeTraverser.filter(e => e.source == node && e.target == n1).head
val n2 = pair._2
val edge2 = node.innerEdgeTraverser.filter(e => e.source == node && e.target == n2).head
// n --p--> n1
val path1 = node.withSubgraph(edges = e => !e.equals(edge2) && !redundantEdges.contains(e)) pathTo n2
if (path1.isDefined) {
debug(s"PATH TO ${n2.value.getName}: ${path1.get.edges.toList.map(edge => asString(edge))}")
val edges = path1.get.edges.toList
edges.foreach(edge => {
debug(s"EDGE:${asString(edge)}")
})
val last = edges.last.value
if (last.label == edge2.label) {
debug(s"redundant edge $edge2")
redundantEdges :+= edge2
}
} else {
debug(s"NO OTHER PATH FROM ${node.value.getName} TO ${n2.value.getName}")
}
val path2 = node.withSubgraph(edges = e => !e.equals(edge1) && !redundantEdges.contains(e)) pathTo n1
if (path2.isDefined) {
debug(s"PATH TO:${n1.value.getName}")
debug(s"PATH:${path2.get.edges.toList.map(edge => asString(edge))}")
val edges = path2.get.edges.toList
edges.foreach(edge => {
debug(s"EDGE:${asString(edge)}")
})
val last = edges.last.value
if (last.label == edge1.label) {
debug(s"redundant edge $edge1")
redundantEdges :+= edge1
}
} else {
debug(s"NO OTHER PATH FROM ${node.value.getName} TO ${n1.value.getName}")
}
})
}
})
val newNodes = graph.nodes.map(node => node.value)
val newEdges = graph.edges.clone().filterNot(e => redundantEdges.contains(e)).map(edge => edge.toOuter)
new RuleDependencyGraph(newNodes, newEdges)
}
}
}
| SANSA-Stack/SANSA-RDF | sansa-inference/sansa-inference-common/src/main/scala/net/sansa_stack/inference/rules/minimizer/RuleDependencyGraphMinimizer.scala | Scala | apache-2.0 | 17,686 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.controllers
import com.codahale.metrics.{Counter, Timer}
import com.kenshoo.play.metrics.{Metrics => PlayMetrics}
import com.typesafe.config.ConfigFactory
import org.scalatestplus.play.guice.GuiceOneAppPerTest
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.inject.Injector
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.{Application, Configuration}
import uk.gov.hmrc.helptosavefrontend.metrics.Metrics
import scala.concurrent.ExecutionContext
trait ControllerSpecWithGuiceAppPerTest extends ControllerSpecBase with GuiceOneAppPerTest with I18nSupport {
lazy val additionalConfig = Configuration()
def buildFakeApplication(additionalConfig: Configuration): Application =
new GuiceApplicationBuilder()
.configure(
Configuration(
ConfigFactory.parseString("""
| metrics.jvm = false
| metrics.enabled = true
| play.modules.disabled = [ "play.api.mvc.CookiesModule",
| "uk.gov.hmrc.helptosavefrontend.config.HealthCheckModule",
| "akka.event.slf4j.Slf4jLogger"
| ]
| mongodb.session.expireAfter = 5 seconds
""".stripMargin)
).withFallback(additionalConfig)
)
.build()
override def fakeApplication = buildFakeApplication(additionalConfig)
lazy val injector: Injector = fakeApplication.injector
implicit lazy val ec: ExecutionContext = injector.instanceOf[ExecutionContext]
val messagesApi = injector.instanceOf(classOf[MessagesApi])
override val mockMetrics = new Metrics(stub[PlayMetrics]) {
override def timer(name: String): Timer = new Timer()
override def counter(name: String): Counter = new Counter()
}
}
| hmrc/help-to-save-frontend | test/uk/gov/hmrc/helptosavefrontend/controllers/ControllerSpecWithGuiceAppPerTest.scala | Scala | apache-2.0 | 2,553 |
package provingground.scalahott
import provingground._
import HoTT._
import spire.implicits._
import spire.math._
import org.scalatest._, flatspec._
import QField.{LocalTyp => Q, Literal => rat, _}
class RationalsSpec extends flatspec.AnyFlatSpec {
val x = "x" :: Q
val y = "y" :: Q
"Rational division" should "act on literals" in {
assert(rat(2) / rat(3) == rat(Rational(2, 3)))
}
it should "cancel correctly" in {
assert(x / x == rat(1))
assert(((x + 1) / (x * x)) == (1 / x + 1 / (x * x)))
}
it should "substitute and simplify" in {
val fn = x :-> (x + 1) / (x + 2)
assert(fn(rat(2)) == rat(3) / rat(4))
assert(fn(y - 2) == rat(1) - rat(1) / y)
}
}
| siddhartha-gadgil/ProvingGround | mantle/src/test/scala/provingground/RationalsSpec.scala | Scala | mit | 706 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.webmvc.dispatch
import org.beangle.commons.lang.annotation.spi
import org.beangle.webmvc.config.RouteMapping
import jakarta.servlet.http.HttpServletRequest
@spi
trait RequestMapper {
def resolve(uri: String, request: HttpServletRequest): Option[HandlerHolder]
def resolve(uri: String): Option[HandlerHolder]
def build(): Unit
}
/**
* Url render
*/
trait ActionUriRender {
def render(action: RouteMapping, uri: String): String
}
@spi
trait RouteProvider {
def routes: Iterable[Route]
}
| beangle/webmvc | core/src/main/scala/org/beangle/webmvc/dispatch/spi.scala | Scala | lgpl-3.0 | 1,245 |
package controllers
import models.User
import models.Subscription
import com.mohiva.play.silhouette.core.{ LogoutEvent, Environment, Silhouette }
import com.mohiva.play.silhouette.contrib.services.CachedCookieAuthenticator
import scala.concurrent.Future
import javax.inject.Inject
import models.daos.ProductDAO
import models.daos.ChargeDAO
import forms._
import play.api.Play
import play.api.Logger
import com.conekta.Plan
import utils.ConektaCurrencyMatcher
import models.daos.SubscriptionDAO
/**
* The basic application controller.
*
* @param env The Silhouette environment.
*/
class SubscriptionsController @Inject() (implicit val env: Environment[User, CachedCookieAuthenticator])
extends Silhouette[User, CachedCookieAuthenticator] {
def resume() = SecuredAction.async { implicit request =>
val user = request.identity
user.currentSubscription.map { subscription =>
subscription.resume
Future.successful(Redirect(routes.ApplicationController.index).flashing(
"success" -> "Suscripción reactivada"))
}.getOrElse(Future.successful(NotFound))
}
def pause() = SecuredAction.async { implicit request =>
val user = request.identity
user.currentSubscription.map { subscription =>
subscription.pause
Future.successful(Redirect(routes.ApplicationController.index).flashing(
"success" -> "Suscripción pausada"))
}.getOrElse(Future.successful(NotFound))
}
def cancel() = SecuredAction.async { implicit request =>
val user = request.identity
user.currentSubscription.map { subscription =>
subscription.cancel
Future.successful(Redirect(routes.ApplicationController.index).flashing(
"success" -> "Suscripción cancelada"))
}.getOrElse(Future.successful(NotFound))
}
} | Wirwing/hello-conekta-play-framework | app/controllers/SubscriptionsController.scala | Scala | mit | 1,794 |
package de.leanovate.swaggercheck.shrinkable
import org.scalacheck.Shrink
import org.scalatest.{MustMatchers, WordSpec}
class CheckJsArraySpec extends WordSpec with MustMatchers {
"JsArray" should {
"shrink without min size" in {
val original = CheckJsArray(None, Seq(
CheckJsInteger(None, None, 1000000),
CheckJsString.unformatted("0123456789abcdefghijklmnopqrstuvwxyz"),
CheckJsBoolean(true),
CheckJsBoolean(false),
CheckJsInteger(None, None, 10000),
CheckJsString.unformatted("zyxwvutsrqponmlkjihgfedcba9876543210")
))
val originalJson = original.minified
val shrink = Shrink.shrink(original)
shrink must not be empty
shrink.foreach {
value =>
value.minSize mustBe empty
value.elements.length must be <= 6
}
}
"shrink with min size" in {
val original = CheckJsArray(Some(4), Seq(
CheckJsInteger(None, None, 1000000),
CheckJsString.unformatted( "0123456789abcdefghijklmnopqrstuvwxyz"),
CheckJsBoolean(true),
CheckJsBoolean(false),
CheckJsInteger(None, None, 10000),
CheckJsString.unformatted("zyxwvutsrqponmlkjihgfedcba9876543210")
))
val originalJson = original.minified
val shrink = Shrink.shrink(original)
shrink must not be empty
shrink.foreach {
value =>
value.minSize mustBe Some(4)
value.elements.length must be <= 6
value.elements.length must be >= 4
}
}
"not shrink beneath min size" in {
val original = CheckJsArray(Some(6), Seq(
CheckJsInteger(None, None, 12345678),
CheckJsString.unformatted("0123456789abcdefghijklmnopqrstuvwxyz"),
CheckJsBoolean(true),
CheckJsBoolean(false),
CheckJsInteger(None, None, 87654321),
CheckJsString.unformatted("zyxwvutsrqponmlkjihgfedcba9876543210")
))
val originalJson = original.minified
val shrink = Shrink.shrink(original)
shrink mustBe empty
}
}
}
| leanovate/swagger-check | json-schema-gen/src/test/scala/de/leanovate/swaggercheck/shrinkable/CheckJsArraySpec.scala | Scala | mit | 2,061 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.spark.sql.{Row, QueryTest}
import org.apache.spark.sql.hive.test.TestHive._
/**
* A set of tests that validates commands can also be queried by like a table
* 一组验证命令的测试也可以像表一样被查询
*/
class HiveOperatorQueryableSuite extends QueryTest {
//描述命令的查询结果
test("SPARK-5324 query result of describe command") {
loadTestTable("src")
// register a describe command to be a temp table
// 将描述命令注册为临时表
sql("desc src").registerTempTable("mydesc")
checkAnswer(
sql("desc mydesc"),
Seq(
Row("col_name", "string", "name of the column"),
Row("data_type", "string", "data type of the column"),
Row("comment", "string", "comment of the column")))
checkAnswer(
sql("select * from mydesc"),
Seq(
Row("key", "int", null),
Row("value", "string", null)))
checkAnswer(
sql("select col_name, data_type, comment from mydesc"),
Seq(
Row("key", "int", null),
Row("value", "string", null)))
}
}
| tophua/spark1.52 | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala | Scala | apache-2.0 | 1,940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic.builtin
import org.apache.toree.magic.LineMagic
import org.apache.toree.magic.dependencies.IncludeOutputStream
import java.io.PrintStream
import org.apache.toree.kernel.api.KernelOptions
class ShowTypes extends LineMagic with IncludeOutputStream {
private lazy val printStream = new PrintStream(outputStream)
override def execute(code: String): Unit = {
code match {
case "on" =>
printStream.println(s"Types will be printed.")
KernelOptions.showTypes = true
case "off" =>
printStream.println(s"Types will not be printed")
KernelOptions.showTypes = false
case "" =>
printStream.println(s"ShowTypes is currently ${if (KernelOptions.showTypes) "on" else "off"} ")
case other =>
printStream.println(s"${other} is not a valid option for the ShowTypes magic.")
}
}
}
| asorianostratio/incubator-toree | kernel/src/main/scala/org/apache/toree/magic/builtin/ShowTypes.scala | Scala | apache-2.0 | 1,695 |
package org.scalaide.refactoring.internal
import java.text.Collator
import java.util.Comparator
import scala.tools.refactoring.implementations
import scala.tools.refactoring.implementations.OrganizeImports.ImportsStrategy
import scala.tools.refactoring.implementations.OrganizeImports.OrganizeImportsConfig
import org.eclipse.core.runtime.IProgressMonitor
import org.eclipse.jdt.core.IJavaElement
import org.eclipse.jdt.core.compiler.IProblem
import org.eclipse.jdt.core.search.IJavaSearchConstants
import org.eclipse.jdt.core.search.SearchEngine
import org.eclipse.jdt.core.search.TypeNameMatch
import org.eclipse.jdt.internal.corext.util.QualifiedTypeNameHistory
import org.eclipse.jdt.internal.corext.util.TypeNameMatchCollector
import org.eclipse.jdt.internal.ui.actions.ActionMessages
import org.eclipse.jdt.internal.ui.dialogs.MultiElementListSelectionDialog
import org.eclipse.jdt.internal.ui.util.TypeNameMatchLabelProvider
import org.eclipse.jface.window.Window
import org.scalaide.core.internal.jdt.model.LazyToplevelClass
import org.scalaide.core.internal.jdt.model.ScalaElement
import org.scalaide.core.internal.jdt.model.ScalaSourceFile
import org.scalaide.core.internal.statistics.Features.OrganizeImports
import org.scalaide.ui.internal.preferences.OrganizeImportsPreferences._
import org.scalaide.util.eclipse.EditorUtils
import org.scalaide.util.internal.eclipse.TextEditUtils
/**
* The Scala implemention of Organize Imports.
*
* Organize Imports can work in two different modes, depending on whether there are
* errors in the source file:
*
* - With no errors, the refactoring simply calls the Refactoring Library's Organize Imports with the users' configuration settings.
* - When there are errors, specifically missing types, Organize Imports uses a SearchEngine to find the missing types to import. If
* there are ambiguities, the user is prompted to select the correct import.
*
*/
class OrganizeImports extends RefactoringExecutorWithoutWizard {
override def createRefactoring(selectionStart: Int, selectionEnd: Int, file: ScalaSourceFile) =
new OrganizeImportsScalaIdeRefactoring(file)
override def perform(): Unit = {
/**
* Returns an array of all the types that are missing in the source file.
*/
def getMissingTypeErrorsFromFile(file: ScalaSourceFile): Array[String] = {
val problems = Option(file.getProblems) getOrElse Array[IProblem]()
val typeNotFoundError = "not found: type ([^\\\\s]+).*".r
val valueNotFoundError = "not found: value ([^\\\\s]+)".r
val errors = problems filter (_.isError) map (_.getMessage) collect {
case typeNotFoundError(name) => name
case valueNotFoundError(name) => name
}
errors.distinct
}
/**
* Uses a SearchEngine to find all possible types that match the missing type's names.
* Only types that are visible are returned, types that are inner classes of other
* classes are filtered because they cannot be imported at the top level.
*
* @return Groups of types that are candidates for a missing type.
*/
def findSuggestionsForMissingTypes(missingTypes: Array[String], file: ScalaSourceFile, pm: IProgressMonitor): Iterable[Array[TypeNameMatch]] = {
val resultCollector = new java.util.ArrayList[TypeNameMatch]
val scope = SearchEngine.createJavaSearchScope(Array[IJavaElement](file.getJavaProject))
val typesToSearch = missingTypes map (_.toArray)
new SearchEngine().searchAllTypeNames(null, typesToSearch, scope, new TypeNameMatchCollector(resultCollector), IJavaSearchConstants.WAIT_UNTIL_READY_TO_SEARCH, pm)
val allFoundTypes = resultCollector.toArray[TypeNameMatch](Array[TypeNameMatch]())
val visibleTypes = allFoundTypes filter { typeNameMatch =>
typeNameMatch.getType match {
case se: ScalaElement => se.isVisible
case tpe =>
// if it's not a ScalaElement, it could still be an inner class,
// and we cannot import them at the top level. TODO: Is this check enough?
tpe.getParent match {
case _: LazyToplevelClass => false // Could the parent be an object?
case _ => true
}
}
}
visibleTypes.groupBy(_.getSimpleTypeName).values
}
/**
* Checks if all the problems in the compilation unit have been fixed. If there's no editor,
* true is returned as well to signal that no further processing needs to be attempted.
*/
def allProblemsFixed = {
EditorUtils.withCurrentScalaSourceFile { file =>
Option(file.getProblems).map(_.isEmpty) getOrElse true
} getOrElse true // no editor? then we are in trouble and can abort anyway
}
/**
* Adds the imports to current editor's source file. This needs the current
* editor and source file, we it has to be run in the UI thread. The user's
* selection will be retained if that's possible.
*
* This uses the refactoring library's AddImportStatement refactoring.
*/
def addImports(imports: Iterable[TypeNameMatch], pm: IProgressMonitor): Unit = {
/**
* Creates the change objects that are needed to add the imports to the source file.
*
* @return A list of changes or an empty list if the source file cannot be obtained.
*/
def createChanges(scalaSourceFile: ScalaSourceFile, imports: Iterable[TypeNameMatch], pm: IProgressMonitor) = {
scalaSourceFile.withSourceFile { (sourceFile, compiler) =>
import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits._
val fullyQualifiedNames = imports map (_.getFullyQualifiedName)
compiler.asyncExec {
val refactoring = new implementations.AddImportStatement {
val global = compiler
}
refactoring.addImports(scalaSourceFile.file, fullyQualifiedNames)
}.getOrElse(Nil)()
} getOrElse (Nil)
}
EditorUtils.withCurrentEditor { editor =>
pm.subTask("Waiting for the compiler to finish..")
EditorUtils.withScalaSourceFileAndSelection { (scalaSourceFile, textSelection) =>
pm.subTask("Applying the changes.")
val changes = createChanges(scalaSourceFile, imports, pm)
val document = editor.getDocumentProvider.getDocument(editor.getEditorInput)
TextEditUtils.applyChangesToFileWhileKeepingSelection(document, textSelection, scalaSourceFile.file, changes, false)
None
}
}
}
/**
* Asks the user to choose between ambiguous missing types, using the same machinery as the JDT.
*
* It also updates the QualifiedTypeNameHistory for the chosen types so they will be preferred in
* subsequent runs.
*/
def decideAmbiguousMissingTypes(missingTypes: Array[Array[TypeNameMatch]]): Option[Array[TypeNameMatch]] = {
val typeSearchDialog = {
val labelProvider = new TypeNameMatchLabelProvider(TypeNameMatchLabelProvider.SHOW_FULLYQUALIFIED)
new MultiElementListSelectionDialog(ProgressHelpers.shell, labelProvider) {
setTitle(ActionMessages.OrganizeImportsAction_selectiondialog_title)
setMessage(ActionMessages.OrganizeImportsAction_selectiondialog_message)
}
}
typeSearchDialog.setElements(missingTypes map (_.map (_.asInstanceOf[Object])))
typeSearchDialog.setComparator(new TypeSearchComparator)
if (missingTypes.size > 0 && typeSearchDialog.open() == Window.OK) {
Some(typeSearchDialog.getResult map {
case array: Array[_] if array.length > 0 =>
array(0) match {
case tpeName: TypeNameMatch =>
QualifiedTypeNameHistory.remember(tpeName.getFullyQualifiedName)
tpeName
}
})
} else {
None
}
}
/**
* Maps the missing type names to fully qualified names and adds them as imports to the file.
*
* If there are still problems remaining after all the imports have been added, the function calls
* itself until all the missing type errors are gone. At most three passes are performed.
*/
def addMissingImportsToFile(missingTypes: Array[String], file: ScalaSourceFile, pm: IProgressMonitor): Unit = {
pm.subTask("Finding suggestions for the missing types..")
def iterate(missingTypes: Array[String], remainingPasses: Int): Unit = {
findSuggestionsForMissingTypes(missingTypes, file, pm).partition(_.size <= 1) match {
case (Nil, Nil) =>
case (uniqueTypes, ambiguousTypos) =>
decideAmbiguousMissingTypes(ambiguousTypos.toArray) match {
case Some(missingTypes) =>
addImports(uniqueTypes.flatten ++ missingTypes, pm)
if(!allProblemsFixed && remainingPasses > 0) {
// We restart with an updated list of problems, hoping
// that some errors have been resolved.
iterate(getMissingTypeErrorsFromFile(file), remainingPasses - 1)
}
case None =>
// the user canceled, so we just add the unique types and stop
addImports(uniqueTypes.flatten, pm)
}
}
}
iterate(missingTypes, 3)
}
EditorUtils.withCurrentScalaSourceFile { file =>
getMissingTypeErrorsFromFile(file) match {
case missingTypes if missingTypes.isEmpty =>
// continue with organizing imports
runRefactoringInUiJob()
case missingTypes =>
ProgressHelpers.runInProgressDialogBlockUi { pm =>
pm.beginTask("Organizing Imports", 4)
addMissingImportsToFile(missingTypes, file, pm)
pm.done
}
}
}
}
class OrganizeImportsScalaIdeRefactoring(override val file: ScalaSourceFile) extends ScalaIdeRefactoring(OrganizeImports, "Organize Imports", file, 0, 0) {
lazy val compilationUnitHasProblems = file.getProblems != null && file.getProblems.exists(_.isError)
override val refactoring = withCompiler( c => new implementations.OrganizeImports with FormattingOverrides { override val global = c })
override protected def leaveDirty = true
override def checkInitialConditions(pm: IProgressMonitor) = {
val status = super.checkInitialConditions(pm)
if(compilationUnitHasProblems) {
status.addWarning("There are errors in the file, organizing imports might produce incorrect results.")
}
status
}
override def refactoringParameters = {
val project = file.getJavaProject.getProject
val organizationStrategy = getOrganizeImportStrategy(project)
val options = {
import refactoring.oiWorker.participants._
val expandOrCollapse = organizationStrategy match {
case CollapseImports => List(SortImportSelectors)
case PreserveExistingGroups => Nil // this is not passed as an option
}
val scalaPackageStrategy = if (shouldOmitScalaPackage(project)){
DropScalaPackage
} else {
PrependScalaPackage
}
expandOrCollapse ::: List(scalaPackageStrategy)
}
val deps = {
if(compilationUnitHasProblems) {
// this is safer when there are problems in the compilation unit
refactoring.Dependencies.RemoveUnneeded
} else if (organizationStrategy == PreserveExistingGroups || organizationStrategy == PreserveWildcards) {
// preserve the existing grouping of imports, but still remove all unneeded ones
refactoring.Dependencies.RecomputeAndModify
} else {
refactoring.Dependencies.FullyRecompute
}
}
val organizeImportsConfig = Option(OrganizeImportsConfig(
importsStrategy = ImportsStrategy(organizationStrategy.toString),
wildcards = getWildcardImportsForProject(project).toSet,
groups = getGroupsForProject(project).toList,
scalaPackageStrategy = shouldOmitScalaPackage(project)))
new refactoring.RefactoringParameters(options = options, deps = deps,
config = organizeImportsConfig)
}
}
private class TypeSearchComparator extends Comparator[Object] {
override def compare(o1: Object, o2: Object): Int = o1 match {
case o1: String if o1 == o2 => 0
case _ =>
List(o1, o2) map (o => QualifiedTypeNameHistory.getDefault.getPosition(o.toString)) match {
case x :: y :: Nil if x == y => Collator.getInstance.compare(o1, o2)
case x :: y :: Nil => y - x
case _ => 0
}
}
}
}
| sschaef/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/refactoring/internal/OrganizeImports.scala | Scala | bsd-3-clause | 12,696 |
package de.tudresden.inf.lat.tabulas.table
import java.util.{Comparator, Objects}
import de.tudresden.inf.lat.tabulas.datatype.{PrimitiveTypeValue, Record}
/** Comparator for records.
*
*/
case class RecordComparator(sortingOrder: Seq[String], fieldsWithReverseOrder: Set[String]) extends Comparator[Record] {
override val toString: String = sortingOrder.toString
val getSortingOrder: Seq[String] = sortingOrder
val getFieldsWithReverseOrder: Set[String] = fieldsWithReverseOrder
override def compare(record0: Record, record1: Record): Int = {
val result = if (Objects.isNull(record0)) {
val res = if (Objects.isNull(record1)) {
0
} else {
-1
}
res
} else {
val res = if (Objects.isNull(record1)) {
1
} else {
val maybeDifference = sortingOrder.find(token => {
val comparison = compareValues(record0.get(token), record1.get(token), fieldsWithReverseOrder.contains(token))
comparison != 0
})
.map(token =>
compareValues(record0.get(token), record1.get(token), fieldsWithReverseOrder.contains(token)))
maybeDifference.getOrElse(0)
}
res
}
result
}
def compareValues(optValue0: Option[PrimitiveTypeValue], optValue1: Option[PrimitiveTypeValue], hasReverseOrder: Boolean): Int = {
val result = if (hasReverseOrder) {
compareValues(optValue1, optValue0, hasReverseOrder = false)
} else {
val res = if (optValue0.isDefined) {
if (optValue1.isDefined) {
optValue0.get.compareTo(optValue1.get)
} else {
1
}
} else {
if (optValue1.isDefined) {
-1
} else {
0
}
}
res
}
result
}
}
object RecordComparator {}
| julianmendez/tabulas | tabulas-core/src/main/scala/de/tudresden/inf/lat/tabulas/table/RecordComparator.scala | Scala | apache-2.0 | 1,813 |
package com.codegans.moodmeter.util
import com.codegans.moodmeter.model.{Vote, Presentation, User}
import org.squeryl.Schema
/**
* JavaDoc here
*
* @author Victor Polischuk
* @since 15.03.2015 9:52
*/
object Library extends Schema {
val users = table[User]
val presentations = table[Presentation]
var votes = table[Vote]
}
| victor-cr/mood-meter | server/src/main/scala/com/codegans/moodmeter/util/Library.scala | Scala | mit | 336 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.expressions.{Attribute, Literal}
import org.apache.spark.sql.types.{StructField, StructType}
object LocalRelation {
def apply(output: Attribute*): LocalRelation = new LocalRelation(output)
def apply(output1: StructField, output: StructField*): LocalRelation = {
new LocalRelation(StructType(output1 +: output).toAttributes)
}
def fromExternalRows(output: Seq[Attribute], data: Seq[Row]): LocalRelation = {
val schema = StructType.fromAttributes(output)
val converter = CatalystTypeConverters.createToCatalystConverter(schema)
LocalRelation(output, data.map(converter(_).asInstanceOf[InternalRow]))
}
def fromProduct(output: Seq[Attribute], data: Seq[Product]): LocalRelation = {
val schema = StructType.fromAttributes(output)
val converter = CatalystTypeConverters.createToCatalystConverter(schema)
LocalRelation(output, data.map(converter(_).asInstanceOf[InternalRow]))
}
}
case class LocalRelation(output: Seq[Attribute],
data: Seq[InternalRow] = Nil,
// Indicates whether this relation has data from a streaming source.
override val isStreaming: Boolean = false)
extends LeafNode with analysis.MultiInstanceRelation {
// A local relation must have resolved output.
require(output.forall(_.resolved), "Unresolved attributes found when constructing LocalRelation.")
/**
* Returns an identical copy of this relation with new exprIds for all attributes. Different
* attributes are required when a relation is going to be included multiple times in the same
* query.
*/
override final def newInstance(): this.type = {
LocalRelation(output.map(_.newInstance()), data).asInstanceOf[this.type]
}
override protected def stringArgs: Iterator[Any] = {
if (data.isEmpty) {
Iterator("<empty>", output)
} else {
Iterator(output)
}
}
override def computeStats(): Statistics =
Statistics(sizeInBytes = output.map(n => BigInt(n.dataType.defaultSize)).sum * data.length)
def toSQL(inlineTableName: String): String = {
require(data.nonEmpty)
val types = output.map(_.dataType)
val rows = data.map { row =>
val cells = row.toSeq(types).zip(types).map { case (v, tpe) => Literal(v, tpe).sql }
cells.mkString("(", ", ", ")")
}
"VALUES " + rows.mkString(", ") +
" AS " + inlineTableName +
output.map(_.name).mkString("(", ", ", ")")
}
}
| stanzhai/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LocalRelation.scala | Scala | apache-2.0 | 3,500 |
import scala.reflect.classTag
@deprecated("Suppress warnings", since="2.11")
object Test extends App {
println(classManifest[scala.List[_]])
println(classTag[scala.List[_]])
println(classManifest[scala.collection.immutable.List[_]])
println(classTag[scala.collection.immutable.List[_]])
println(classManifest[Predef.Set[_]])
println(classTag[Predef.Set[_]])
println(classManifest[scala.collection.immutable.Set[_]])
println(classTag[scala.collection.immutable.Set[_]])
}
| som-snytt/dotty | tests/disabled/reflect/run/t6329_vanilla.scala | Scala | apache-2.0 | 488 |
package com.github.j5ik2o.forseti.infrastructure.util
import scala.concurrent.Future
import scalaz.EitherT
import scalaz.syntax.either._
object EitherTUtil {
def createRightOfEitherT[A <: Throwable, B](value: B): EitherT[Future, A, B] = {
EitherT(Future.successful(value.right[A]))
}
def createLeftOfEitherT[A <: Throwable, B](ex: A): EitherT[Future, A, B] = {
EitherT(Future.successful(ex.left[B]))
}
implicit class RightOfEitherTPimp[B](value: B) {
def toRightTFuture[A <: Throwable]: EitherT[Future, A, B] = createRightOfEitherT(value)
}
}
| j5ik2o/forseti | infrastructure/src/main/scala/com/github/j5ik2o/forseti/infrastructure/util/EitherTUtil.scala | Scala | mit | 573 |
package com.twitter.finagle.exp.swift
import com.twitter.finagle.Service
import com.twitter.finagle.thrift.ThriftClientRequest
import com.twitter.util.{Await, Future}
import org.apache.thrift.protocol._
import org.apache.thrift.transport._
import org.junit.runner.RunWith
import org.mockito.ArgumentCaptor
import org.mockito.Matchers.any
import org.mockito.Mockito.{verify, when}
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class ProxyTest extends FunSuite with MockitoSugar {
test("creates valid TMessages") {
val service = mock[Service[ThriftClientRequest, Array[Byte]]]
val proxy = SwiftProxy.newClient[Test1](service)
when(service(any[ThriftClientRequest])).thenReturn(Future.never)
proxy.ping("hello")
val arg = ArgumentCaptor.forClass(classOf[ThriftClientRequest])
verify(service).apply(arg.capture())
val request = arg.getValue()
assert(!request.oneway)
val in = new TBinaryProtocol(
new TMemoryInputTransport(request.message))
val msg = in.readMessageBegin()
assert(msg.`type` === TMessageType.CALL)
in.readStructBegin()
val f = in.readFieldBegin()
assert(f.`type` === TType.STRING)
assert(f.`id` === 1)
in.readFieldEnd()
assert(in.readFieldBegin().`type` === TType.STOP)
in.readFieldEnd()
in.readStructEnd()
}
test("parses replies") {
val service = mock[Service[ThriftClientRequest, Array[Byte]]]
val proxy = SwiftProxy.newClient[Test1](service)
val reply = Util.newMessage("ping", TMessageType.REPLY) { out =>
out.writeFieldBegin(new TField("success", TType.STRING, 0))
out.writeString("YAY")
}
when(service(any[ThriftClientRequest])).thenReturn(Future.value(reply))
assert(Await.result(proxy.ping("OH HEY")) === "YAY")
}
}
| LithiumTD/finagle | finagle-swift/src/test/scala/com/twitter/finagle/swift/ProxyTest.scala | Scala | apache-2.0 | 1,864 |
package scoverage
import org.scalatest.BeforeAndAfter
import org.scalatest.OneInstancePerTest
import org.scalatest.funsuite.AnyFunSuite
/** @author Stephen Samuel */
class CoverageTest
extends AnyFunSuite
with BeforeAndAfter
with OneInstancePerTest {
test("coverage for no statements is 1") {
val coverage = Coverage()
assert(1.0 === coverage.statementCoverage)
}
test("coverage for no invoked statements is 0") {
val coverage = Coverage()
coverage.add(
Statement(
Location("", "", "", ClassType.Object, "", ""),
1,
2,
3,
4,
"",
"",
"",
false,
0
)
)
assert(0 === coverage.statementCoverage)
}
test("coverage for invoked statements") {
val coverage = Coverage()
coverage.add(
Statement(
Location("", "", "", ClassType.Object, "", ""),
1,
2,
3,
4,
"",
"",
"",
false,
3
)
)
coverage.add(
Statement(
Location("", "", "", ClassType.Object, "", ""),
2,
2,
3,
4,
"",
"",
"",
false,
0
)
)
coverage.add(
Statement(
Location("", "", "", ClassType.Object, "", ""),
3,
2,
3,
4,
"",
"",
"",
false,
0
)
)
coverage.add(
Statement(
Location("", "", "", ClassType.Object, "", ""),
4,
2,
3,
4,
"",
"",
"",
false,
0
)
)
assert(0.25 === coverage.statementCoverage)
}
}
| scoverage/scalac-scoverage-plugin | scalac-scoverage-plugin/src/test/scala/scoverage/CoverageTest.scala | Scala | apache-2.0 | 1,702 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.util
import java.io.File
import java.lang.management.ManagementFactory
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration._
import com.sun.management.HotSpotDiagnosticMXBean
// used to diagnose problems on the CI machines
object HeapDumper {
private lazy val diagnostics = ManagementFactory.newPlatformMXBeanProxy(
ManagementFactory.getPlatformMBeanServer(),
"com.sun.management:type=HotSpotDiagnostic",
classOf[HotSpotDiagnosticMXBean]
)
def dump(file: File): Unit = diagnostics.dumpHeap(file.getAbsolutePath, false)
def dumpAndExit(file: File, duration: FiniteDuration = 10.seconds): Unit = {
import ExecutionContext.Implicits.global
Future {
Thread.sleep(duration.toMillis)
dump(file)
sys.exit(1)
}
}
}
| pascr/ensime-server | testutil/src/main/scala/org/ensime/util/HeapDumper.scala | Scala | gpl-3.0 | 947 |
package scamr.mapreduce.lambda
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskInputOutputContext}
import scamr.mapreduce.CounterUpdater
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
// A wrapper around Hadoop's TaskInputOutputContext that only exposes limited functionality.
// Specifically, it doesn't allow calls to any mutating functions except for updateCounter(), setStatus(),
// and progress().
class BaseLambdaContext(override val _context: TaskInputOutputContext[_, _, _, _]) extends CounterUpdater {
// Side-effect-free methods
def getTaskAttemptId: TaskAttemptID = _context.getTaskAttemptID
def getStatus: String = _context.getStatus
def getConfiguration: Configuration = _context.getConfiguration
def getJobId: JobID = _context.getJobID
def getJobName: String = _context.getJobName
def getNumReduceTasks: Int = _context.getNumReduceTasks
def getWorkingDirectory: Path = _context.getWorkingDirectory
// Side-effect-full methods
def setStatus(status: String) {
_context.setStatus(status)
}
def progress() { _context.progress() }
}
| ooyala/scamr | src/main/scala/scamr/mapreduce/lambda/BaseLambdaContext.scala | Scala | apache-2.0 | 1,118 |
package org.brzy.calista.schema
/**
* Document Me..
*
* @author Michael Fortin
*/
class CounterFamily(val name: String) extends Family {
def apply[K](key: K) = new CounterKey(key, this)
override def toString = "CounterFamily(" + name + ")"
}
/**
*
*/
object CounterFamily {
def apply(name: String) = new CounterFamily(name)
}
| m410/calista | src/main/scala/org/brzy/calista/schema/CounterFamily.scala | Scala | apache-2.0 | 343 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.expressions
import org.apache.spark.annotation.Experimental
import org.apache.spark.sql.{Column, catalyst}
import org.apache.spark.sql.catalyst.expressions._
/**
* :: Experimental ::
* A window specification that defines the partitioning, ordering, and frame boundaries.
*
* Use the static methods in [[Window]] to create a [[WindowSpec]].
*
* @since 1.4.0
*/
@Experimental
class WindowSpec private[sql](
partitionSpec: Seq[Expression],
orderSpec: Seq[SortOrder],
frame: catalyst.expressions.WindowFrame) {
/**
* Defines the partitioning columns in a [[WindowSpec]].
* @since 1.4.0
*/
@scala.annotation.varargs
def partitionBy(colName: String, colNames: String*): WindowSpec = {
partitionBy((colName +: colNames).map(Column(_)): _*)
}
/**
* Defines the partitioning columns in a [[WindowSpec]].
* @since 1.4.0
*/
@scala.annotation.varargs
def partitionBy(cols: Column*): WindowSpec = {
new WindowSpec(cols.map(_.expr), orderSpec, frame)
}
/**
* Defines the ordering columns in a [[WindowSpec]].
* @since 1.4.0
*/
@scala.annotation.varargs
def orderBy(colName: String, colNames: String*): WindowSpec = {
orderBy((colName +: colNames).map(Column(_)): _*)
}
/**
* Defines the ordering columns in a [[WindowSpec]].
* @since 1.4.0
*/
@scala.annotation.varargs
def orderBy(cols: Column*): WindowSpec = {
val sortOrder: Seq[SortOrder] = cols.map { col =>
col.expr match {
case expr: SortOrder =>
expr
case expr: Expression =>
SortOrder(expr, Ascending)
}
}
new WindowSpec(partitionSpec, sortOrder, frame)
}
/**
* Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
*
* Both `start` and `end` are relative positions from the current row. For example, "0" means
* "current row", while "-1" means the row before the current row, and "5" means the fifth row
* after the current row.
*
* @param start boundary start, inclusive.
* The frame is unbounded if this is the minimum long value.
* @param end boundary end, inclusive.
* The frame is unbounded if this is the maximum long value.
* @since 1.4.0
*/
def rowsBetween(start: Long, end: Long): WindowSpec = {
between(RowFrame, start, end)
}
/**
* Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
*
* Both `start` and `end` are relative from the current row. For example, "0" means "current row",
* while "-1" means one off before the current row, and "5" means the five off after the
* current row.
*
* @param start boundary start, inclusive.
* The frame is unbounded if this is the minimum long value.
* @param end boundary end, inclusive.
* The frame is unbounded if this is the maximum long value.
* @since 1.4.0
*/
def rangeBetween(start: Long, end: Long): WindowSpec = {
between(RangeFrame, start, end)
}
private def between(typ: FrameType, start: Long, end: Long): WindowSpec = {
val boundaryStart = start match {
case 0 => CurrentRow
case Long.MinValue => UnboundedPreceding
case x if x < 0 => ValuePreceding(-start.toInt)
case x if x > 0 => ValueFollowing(start.toInt)
}
val boundaryEnd = end match {
case 0 => CurrentRow
case Long.MaxValue => UnboundedFollowing
case x if x < 0 => ValuePreceding(-end.toInt)
case x if x > 0 => ValueFollowing(end.toInt)
}
new WindowSpec(
partitionSpec,
orderSpec,
SpecifiedWindowFrame(typ, boundaryStart, boundaryEnd))
}
/**
* Converts this [[WindowSpec]] into a [[Column]] with an aggregate expression.
*/
private[sql] def withAggregate(aggregate: Column): Column = {
val windowExpr = aggregate.expr match {
case Average(child) => WindowExpression(
UnresolvedWindowFunction("avg", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case Sum(child) => WindowExpression(
UnresolvedWindowFunction("sum", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case Count(child) => WindowExpression(
UnresolvedWindowFunction("count", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case First(child) => WindowExpression(
// TODO this is a hack for Hive UDAF first_value
UnresolvedWindowFunction("first_value", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case Last(child) => WindowExpression(
// TODO this is a hack for Hive UDAF last_value
UnresolvedWindowFunction("last_value", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case Min(child) => WindowExpression(
UnresolvedWindowFunction("min", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case Max(child) => WindowExpression(
UnresolvedWindowFunction("max", child :: Nil),
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case wf: WindowFunction => WindowExpression(
wf,
WindowSpecDefinition(partitionSpec, orderSpec, frame))
case x =>
throw new UnsupportedOperationException(s"$x is not supported in window operation.")
}
new Column(windowExpr)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/expressions/WindowSpec.scala | Scala | apache-2.0 | 6,231 |
package es.weso.rdf
import es.weso.rdf.nodes.{ RDFNode, IRI, BNodeId }
import es.weso.rdf.triples.RDFTriple
import scala.collection.Set
import scala.collection.immutable.Map
import es.weso.tgraph.TContext
case class Exists(fn: BNodeId => RDFGraph)(implicit seed: BNodeId) extends RDFGraph {
override def isEmpty = false
override def insertTriple(triple: RDFTriple): RDFGraph = {
Exists { case (bnode) => fn(bnode).insertTriple(triple) }
//Anton: I had to add "case" because of IntellijIDEA highlightning bug that I discovered http://youtrack.jetbrains.com/issue/SCL-6730
}
override def addTriples(triples: Set[RDFTriple]): RDFGraph = {
Exists { case (bnode) => fn(bnode).addTriples(triples) }
}
override def triples(implicit seed: BNodeId): Set[RDFTriple] = {
fn(seed).triples(seed.newBNodeId)
}
override def IRIs: Set[IRI] = {
fn(seed).IRIs
}
/*
* merges this graph with another one
*/
override def merge(other: RDFGraph): RDFGraph = {
Exists { case bnode => fn(bnode).merge(other) }
}
/*
* add triples which can have a set of bNodes
*/
override def addTriplesBNodes(
bnodes: Set[BNodeId],
triples: Set[RDFTriple],
map: Map[BNodeId, BNodeId]
): RDFGraph = {
Exists { case (bnode) => fn(bnode).addTriplesBNodes(bnodes, triples, map) }
}
override def insertTripleMap(
triple: RDFTriple,
map: Map[BNodeId, BNodeId]
): RDFGraph = {
Exists { case (bnode) => fn(bnode).insertTripleMap(triple, map) }
}
override def show(implicit seed: BNodeId): String = {
"Exists " + seed.id + " ( " + fn(seed).show(seed.newBNodeId) + ")"
}
override def foldRDFGraphSeed[A](e: A, f: (A, TContext[RDFNode]) => A, seed: BNodeId): A = {
fn(seed).foldRDFGraphSeed(e, f, seed.newBNodeId)
}
def foldRDFGraphSeedOrd[A](
e: A,
f: (A, TContext[RDFNode]) => A,
seed: BNodeId
)(implicit ord: Ordering[RDFNode]): A = {
fn(seed).foldRDFGraphSeedOrd(e, f, seed.newBNodeId)(ord)
}
} | labra/wesin | src/main/scala/es/weso/rdfgraph/Exists.scala | Scala | lgpl-3.0 | 2,001 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.chill
import scala.util.control.Exception.allCatch
import scala.collection.mutable.{ Map => MMap }
import _root_.java.lang.reflect.Field
/**
* Uses facts about how scala compiles object singletons to Java + reflection
*/
class ObjectSerializer[T] extends KSerializer[T] {
val cachedObj = MMap[Class[_], Option[T]]()
// Does nothing
override def write(kser: Kryo, out: Output, obj: T) {}
protected def createSingleton(cls: Class[_]): Option[T] = {
moduleField(cls).map { _.get(null).asInstanceOf[T] }
}
protected def cachedRead(cls: Class[_]): Option[T] = {
cachedObj.synchronized { cachedObj.getOrElseUpdate(cls, createSingleton(cls)) }
}
override def read(kser: Kryo, in: Input, cls: Class[T]): T = cachedRead(cls).get
def accepts(cls: Class[_]): Boolean = cachedRead(cls).isDefined
protected def moduleField(klass: Class[_]): Option[Field] =
Some(klass)
.filter { _.getName.last == '$' }
.flatMap { k => allCatch.opt(k.getDeclaredField("MODULE$")) }
}
| steveloughran/chill | chill-scala/src/main/scala/com/twitter/chill/ObjectSerializer.scala | Scala | apache-2.0 | 1,586 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.sql.{Connection, ResultSet}
import scala.reflect.ClassTag
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.api.java.function.{Function => JFunction}
import org.apache.spark.internal.Logging
import org.apache.spark.util.NextIterator
private[spark] class JdbcPartition(idx: Int, val lower: Long, val upper: Long) extends Partition {
override def index: Int = idx
}
// TODO: Expose a jdbcRDD function in SparkContext and mark this as semi-private
/**
* An RDD that executes a SQL query on a JDBC connection and reads results.
* For usage example, see test case JdbcRDDSuite.
*
* @param getConnection a function that returns an open Connection.
* The RDD takes care of closing the connection.
* @param sql the text of the query.
* The query must contain two ? placeholders for parameters used to partition the results.
* For example,
* {{{
* select title, author from books where ? <= id and id <= ?
* }}}
* @param lowerBound the minimum value of the first placeholder
* @param upperBound the maximum value of the second placeholder
* The lower and upper bounds are inclusive.
* @param numPartitions the number of partitions.
* Given a lowerBound of 1, an upperBound of 20, and a numPartitions of 2,
* the query would be executed twice, once with (1, 10) and once with (11, 20)
* @param mapRow a function from a ResultSet to a single row of the desired result type(s).
* This should only call getInt, getString, etc; the RDD takes care of calling next.
* The default maps a ResultSet to an array of Object.
*/
class JdbcRDD[T: ClassTag](
sc: SparkContext,
getConnection: () => Connection,
sql: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
mapRow: (ResultSet) => T = JdbcRDD.resultSetToObjectArray _)
extends RDD[T](sc, Nil) with Logging {
override def getPartitions: Array[Partition] = {
// bounds are inclusive, hence the + 1 here and - 1 on end
val length = BigInt(1) + upperBound - lowerBound
(0 until numPartitions).map { i =>
val start = lowerBound + ((i * length) / numPartitions)
val end = lowerBound + (((i + 1) * length) / numPartitions) - 1
new JdbcPartition(i, start.toLong, end.toLong)
}.toArray
}
override def compute(thePart: Partition, context: TaskContext): Iterator[T] = new NextIterator[T]
{
context.addTaskCompletionListener{ context => closeIfNeeded() }
val part = thePart.asInstanceOf[JdbcPartition]
val conn = getConnection()
val stmt = conn.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
val url = conn.getMetaData.getURL
if (url.startsWith("jdbc:mysql:")) {
// setFetchSize(Integer.MIN_VALUE) is a mysql driver specific way to force
// streaming results, rather than pulling entire resultset into memory.
// See the below URL
// dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-implementation-notes.html
stmt.setFetchSize(Integer.MIN_VALUE)
} else {
stmt.setFetchSize(100)
}
logInfo(s"statement fetch size set to: ${stmt.getFetchSize}")
stmt.setLong(1, part.lower)
stmt.setLong(2, part.upper)
val rs = stmt.executeQuery()
override def getNext(): T = {
if (rs.next()) {
mapRow(rs)
} else {
finished = true
null.asInstanceOf[T]
}
}
override def close() {
try {
if (null != rs) {
rs.close()
}
} catch {
case e: Exception => logWarning("Exception closing resultset", e)
}
try {
if (null != stmt) {
stmt.close()
}
} catch {
case e: Exception => logWarning("Exception closing statement", e)
}
try {
if (null != conn) {
conn.close()
}
logInfo("closed connection")
} catch {
case e: Exception => logWarning("Exception closing connection", e)
}
}
}
}
object JdbcRDD {
def resultSetToObjectArray(rs: ResultSet): Array[Object] = {
Array.tabulate[Object](rs.getMetaData.getColumnCount)(i => rs.getObject(i + 1))
}
trait ConnectionFactory extends Serializable {
@throws[Exception]
def getConnection: Connection
}
/**
* Create an RDD that executes a SQL query on a JDBC connection and reads results.
* For usage example, see test case JavaAPISuite.testJavaJdbcRDD.
*
* @param connectionFactory a factory that returns an open Connection.
* The RDD takes care of closing the connection.
* @param sql the text of the query.
* The query must contain two ? placeholders for parameters used to partition the results.
* For example,
* {{{
* select title, author from books where ? <= id and id <= ?
* }}}
* @param lowerBound the minimum value of the first placeholder
* @param upperBound the maximum value of the second placeholder
* The lower and upper bounds are inclusive.
* @param numPartitions the number of partitions.
* Given a lowerBound of 1, an upperBound of 20, and a numPartitions of 2,
* the query would be executed twice, once with (1, 10) and once with (11, 20)
* @param mapRow a function from a ResultSet to a single row of the desired result type(s).
* This should only call getInt, getString, etc; the RDD takes care of calling next.
* The default maps a ResultSet to an array of Object.
*/
def create[T](
sc: JavaSparkContext,
connectionFactory: ConnectionFactory,
sql: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
mapRow: JFunction[ResultSet, T]): JavaRDD[T] = {
val jdbcRDD = new JdbcRDD[T](
sc.sc,
() => connectionFactory.getConnection,
sql,
lowerBound,
upperBound,
numPartitions,
(resultSet: ResultSet) => mapRow.call(resultSet))(fakeClassTag)
new JavaRDD[T](jdbcRDD)(fakeClassTag)
}
/**
* Create an RDD that executes a SQL query on a JDBC connection and reads results. Each row is
* converted into a `Object` array. For usage example, see test case JavaAPISuite.testJavaJdbcRDD.
*
* @param connectionFactory a factory that returns an open Connection.
* The RDD takes care of closing the connection.
* @param sql the text of the query.
* The query must contain two ? placeholders for parameters used to partition the results.
* For example,
* {{{
* select title, author from books where ? <= id and id <= ?
* }}}
* @param lowerBound the minimum value of the first placeholder
* @param upperBound the maximum value of the second placeholder
* The lower and upper bounds are inclusive.
* @param numPartitions the number of partitions.
* Given a lowerBound of 1, an upperBound of 20, and a numPartitions of 2,
* the query would be executed twice, once with (1, 10) and once with (11, 20)
*/
def create(
sc: JavaSparkContext,
connectionFactory: ConnectionFactory,
sql: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int): JavaRDD[Array[Object]] = {
val mapRow = new JFunction[ResultSet, Array[Object]] {
override def call(resultSet: ResultSet): Array[Object] = {
resultSetToObjectArray(resultSet)
}
}
create(sc, connectionFactory, sql, lowerBound, upperBound, numPartitions, mapRow)
}
}
| sh-cho/cshSpark | rdd/JdbcRDD.scala | Scala | apache-2.0 | 8,397 |
package objektwerks.cluster
object SeedNode extends Node | objektwerks/akka.cluster | seednode/src/main/scala/objektwerks/cluster/SeedNode.scala | Scala | apache-2.0 | 57 |
package stoner.board
import stoner.board.BoardDimension.STANDARD_BOARD_DIM
object Position {
def labelClassToPos(label : Double, boardDim : BoardDimension) = {
val p = label.toInt
val c = p / boardDim.row
val r = p - c*boardDim.row
Position(c,r)
}
}
case class Position(column : Int, row : Int) {
def toLabelClass(boardDim : BoardDimension = STANDARD_BOARD_DIM) =
(column*boardDim.row + row).toDouble
}//end case class Position(column : Int, row : Int)
//31337 | RJRyV/stoner | src/main/scala/stoner/board/Position.scala | Scala | gpl-3.0 | 506 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs
/** A base file interface.
*
* File is a file system entry that contains data (in addition to its metadata properties).
*
* Files must be opened before performing I/O and closed afterwards.
*
* `File` interface includes a data streaming API for convenient data reading and writing.
*
* @define entity file
*
* @see [[com.pavelfatin.fs.FileSystemEntry]]
* @see [[com.pavelfatin.fs.Data]]
* @see [[com.pavelfatin.fs.OpenAndClose]]
* @see [[com.pavelfatin.fs.Streams]]
* @see [[com.pavelfatin.fs.StreamIO]]
*/
trait File extends FileSystemEntry with Data with OpenAndClose with Streams with StreamIO {
/** Truncates this file.
*
* If the present length of this file is less than the `length` argument
* then no action will be performed.
*
* @note Does not guarantee that the truncated content will be securely erased.
*
* @param length the new length of this file
* @throws IllegalStateException if this file is closed
* @throws IllegalArgumentException if 'length' argument is negative
* @throws java.io.IOException if an I/O error occurs
*/
def truncate(length: Long)
} | pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/File.scala | Scala | gpl-3.0 | 1,888 |
object A {
opaque type T33 = Int
object T33 {
val a = new Array[T33](3)
}
} | lampepfl/dotty | tests/pos/i6286.scala | Scala | apache-2.0 | 85 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import java.util.concurrent.TimeUnit
import minitest.SimpleTestSuite
import monix.execution.ExecutionModel.AlwaysAsyncExecution
import monix.execution.misc.Local
import monix.execution.FutureUtils.extensions._
import monix.execution.{Cancelable, Scheduler}
import monix.execution.cancelables.SingleAssignCancelable
import monix.execution.exceptions.DummyException
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import scala.util.Success
object TracingSchedulerSuite extends SimpleTestSuite {
test("does not capture snapshot if not a tracing scheduler") {
implicit val ec = TestScheduler()
val local1 = Local(0)
val local2 = Local(0)
local2 := 100
val f = local1.bind(100)(Future(local1.get + local2.get))
local1 := 999
local2 := 999
assertEquals(f.value, None)
ec.tick()
assertEquals(f.value, Some(Success(999 * 2)))
}
test("captures locals in simulated async execution") {
val ec = TestScheduler()
implicit val traced = TracingScheduler(ec)
val local1 = Local(0)
val local2 = Local(0)
local2 := 100
val f = local1.bind(100)(Future(local1.get + local2.get))
local1 := 999
local2 := 999
assertEquals(f.value, None)
ec.tick()
assertEquals(f.value, Some(Success(200)))
}
testAsync("captures locals in actual async execution") {
import monix.execution.Scheduler.Implicits.traced
val local1 = Local(0)
val local2 = Local(0)
local2 := 100
val f = local1.bind(100)(Future(local1.get + local2.get))
local1 := 999
local2 := 999
for (r <- f) yield assertEquals(r, 200)
}
test("captures locals in scheduleOnce") {
val ec = TestScheduler()
implicit val traced = TracingScheduler(ec)
val local1 = Local(0)
val local2 = Local(0)
local2 := 100
val f = local1.bind(100)(Future.delayedResult(1.second)(local1.get + local2.get))
local1 := 999
local2 := 999
assertEquals(f.value, None)
ec.tick(1.second)
assertEquals(f.value, Some(Success(200)))
}
def testPeriodicScheduling(schedule: (Scheduler, Long, Long, TimeUnit, Runnable) => Cancelable) = {
val ec = TestScheduler()
implicit val traced = TracingScheduler(ec)
val local1 = Local(0)
val local2 = Local(0)
local2 := 100
val f = local1.bind(100) {
var sum = 0
var count = 0
val p = Promise[Int]()
val sub = SingleAssignCancelable()
sub := schedule(traced, 1, 1, TimeUnit.SECONDS, new Runnable {
def run(): Unit = {
sum += local1.get + local2.get
count += 1
if (count >= 3) {
p.success(sum)
sub.cancel()
}
}
})
p.future
}
local1 := 999
local2 := 999
assertEquals(f.value, None)
ec.tick(3.second)
assertEquals(f.value, Some(Success(200 * 3)))
}
test("captures locals in scheduleAtFixedRate") {
testPeriodicScheduling { (s, initial, delay, unit, r) =>
s.scheduleAtFixedRate(initial, delay, unit, r)
}
}
test("captures locals in scheduleWithFixedDelay") {
testPeriodicScheduling { (s, initial, delay, unit, r) =>
s.scheduleWithFixedDelay(initial, delay, unit, r)
}
}
test("reportFailure") {
val ec = TestScheduler()
val traced = TracingScheduler(ec)
val dummy = DummyException("dummy")
traced.executeAsync(() => throw dummy)
ec.tick()
assertEquals(ec.state.lastReportedError, dummy)
}
test("clockRealTime") {
val ec = TestScheduler()
val traced = TracingScheduler(ec)
assertEquals(traced.clockRealTime(MILLISECONDS), 0)
ec.tick(1.second)
assertEquals(traced.clockRealTime(MILLISECONDS), 1000)
ec.tick(1.second)
assertEquals(traced.clockRealTime(MILLISECONDS), 2000)
}
test("clockMonotonic") {
val ec = TestScheduler()
val traced = TracingScheduler(ec)
assertEquals(traced.clockMonotonic(MILLISECONDS), 0)
ec.tick(1.second)
assertEquals(traced.clockMonotonic(MILLISECONDS), 1000)
ec.tick(1.second)
assertEquals(traced.clockMonotonic(MILLISECONDS), 2000)
}
test("executionModel") {
val ec = TestScheduler()
val traced = TracingScheduler(ec)
assertEquals(traced.executionModel, ec.executionModel)
implicit val traced2 = traced.withExecutionModel(AlwaysAsyncExecution)
assertEquals(traced2.executionModel, AlwaysAsyncExecution)
val f = Future(true)
assertEquals(f.value, None)
ec.tick()
assertEquals(f.value, Some(Success(true)))
}
}
| Wogan/monix | monix-execution/shared/src/test/scala/monix/execution/schedulers/TracingSchedulerSuite.scala | Scala | apache-2.0 | 5,256 |
package scala.lms
package common
import java.io.PrintWriter
import scala.lms.internal.GenericNestedCodegen
import collection.mutable.ArrayBuffer
import scala.reflect.SourceContext
trait SynchronizedArrayBufferOps extends ArrayBufferOps {
/*
object SynchronizedArrayBuffer {
def apply[A:Manifest](xs: Rep[A]*)(implicit pos: SourceContext) = arraybuffer_new(xs)
}
*/
}
trait SynchronizedArrayBufferOpsExp extends SynchronizedArrayBufferOps with ArrayBufferOpsExp {
case class SyncArrayBufferNew[A:Manifest](xs: Seq[Exp[A]]) extends Def[ArrayBuffer[A]] {
val mA = manifest[A]
}
// all array buffers are synchronized (nackward compat). TODO: separate constructor
override def arraybuffer_new[A:Manifest](xs: Seq[Exp[A]])(implicit pos: SourceContext) = reflectMutable(SyncArrayBufferNew(xs))
}
trait BaseGenSynchronizedArrayBufferOps extends BaseGenArrayBufferOps {
val IR: SynchronizedArrayBufferOpsExp
import IR._
}
trait ScalaGenSynchronizedArrayBufferOps extends BaseGenSynchronizedArrayBufferOps with ScalaGenArrayBufferOps {
val IR: SynchronizedArrayBufferOpsExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case a@SyncArrayBufferNew(xs) => emitValDef(sym, src"(new scala.collection.mutable.ArrayBuffer[${a.mA}] with scala.collection.mutable.SynchronizedBuffer[${a.mA}]) ++= List(${(xs map {quote}).mkString(",")})")
case _ => super.emitNode(sym, rhs)
}
}
trait CLikeGenSynchronizedArrayBufferOps extends BaseGenSynchronizedArrayBufferOps with CLikeGenArrayBufferOps {
val IR: SynchronizedArrayBufferOpsExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = {
rhs match {
case _ => super.emitNode(sym, rhs)
}
}
}
trait CudaGenSynchronizedArrayBufferOps extends CudaGenEffect with CLikeGenSynchronizedArrayBufferOps
trait OpenCLGenSynchronizedArrayBufferOps extends OpenCLGenEffect with CLikeGenSynchronizedArrayBufferOps
trait CGenSynchronizedArrayBufferOps extends CGenEffect with CLikeGenSynchronizedArrayBufferOps
| scalan/virtualization-lms-core | src/common/SynchronizedArrayBufferOps.scala | Scala | bsd-3-clause | 2,051 |
/*
* Copyright 2001-2006 Stephen Colebourne
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joda.time
import java.io.Serializable
import org.joda.time.base.BaseInterval
import org.joda.time.chrono.ISOChronology
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.format.ISOPeriodFormat
/**
* Interval is the standard implementation of an immutable time interval.
* <p>
* A time interval represents a period of time between two instants.
* Intervals are inclusive of the start instant and exclusive of the end.
* The end instant is always greater than or equal to the start instant.
* <p>
* Intervals have a fixed millisecond duration.
* This is the difference between the start and end instants.
* The duration is represented separately by {@link ReadableDuration}.
* As a result, intervals are not comparable.
* To compare the length of two intervals, you should compare their durations.
* <p>
* An interval can also be converted to a {@link ReadablePeriod}.
* This represents the difference between the start and end points in terms of fields
* such as years and days.
* <p>
* Interval is thread-safe and immutable.
*
* @author Brian S O'Neill
* @author Sean Geoghegan
* @author Stephen Colebourne
* @author Julen Parra
* @since 1.0
*/
@SerialVersionUID(4922451897541386752L)
object Interval {
/**
* Parses a {@code Interval} from the specified string.
* <p>
* The String formats are described by {@link ISODateTimeFormat#dateTimeParser()}
* and {@link ISOPeriodFormat#standard()}, and may be 'datetime/datetime',
* 'datetime/period' or 'period/datetime'.
*
* @param str the string to parse, not null
* @since 2.0
*/
def parse(str: String): Interval = {
return new Interval(str)
}
}
@SerialVersionUID(4922451897541386752L)
final class Interval extends BaseInterval with ReadableInterval with Serializable {
/**
* Constructs an interval from a start and end instant with the ISO
* default chronology in the default time zone.
*
* @param startInstant start of this interval, as milliseconds from 1970-01-01T00:00:00Z.
* @param endInstant end of this interval, as milliseconds from 1970-01-01T00:00:00Z.
* @throws IllegalArgumentException if the end is before the start
*/
def this(startInstant: Long, endInstant: Long) {
this()
`super`(startInstant, endInstant, null)
}
/**
* Constructs an interval from a start and end instant with the ISO
* default chronology in the specified time zone.
*
* @param startInstant start of this interval, as milliseconds from 1970-01-01T00:00:00Z.
* @param endInstant end of this interval, as milliseconds from 1970-01-01T00:00:00Z.
* @param zone the time zone to use, null means default zone
* @throws IllegalArgumentException if the end is before the start
* @since 1.5
*/
def this(startInstant: Long, endInstant: Long, zone: DateTimeZone) {
this()
`super`(startInstant, endInstant, ISOChronology.getInstance(zone))
}
/**
* Constructs an interval from a start and end instant with the
* specified chronology.
*
* @param chronology the chronology to use, null is ISO default
* @param startInstant start of this interval, as milliseconds from 1970-01-01T00:00:00Z.
* @param endInstant end of this interval, as milliseconds from 1970-01-01T00:00:00Z.
* @throws IllegalArgumentException if the end is before the start
*/
def this(startInstant: Long, endInstant: Long, chronology: Chronology) {
this()
`super`(startInstant, endInstant, chronology)
}
/**
* Constructs an interval from a start and end instant.
* <p>
* The chronology used is that of the start instant.
*
* @param start start of this interval, null means now
* @param end end of this interval, null means now
* @throws IllegalArgumentException if the end is before the start
*/
def this(start: ReadableInstant, end: ReadableInstant) {
this()
`super`(start, end)
}
/**
* Constructs an interval from a start instant and a duration.
*
* @param start start of this interval, null means now
* @param duration the duration of this interval, null means zero length
* @throws IllegalArgumentException if the end is before the start
* @throws ArithmeticException if the end instant exceeds the capacity of a long
*/
def this(start: ReadableInstant, duration: ReadableDuration) {
this()
`super`(start, duration)
}
/**
* Constructs an interval from a millisecond duration and an end instant.
*
* @param duration the duration of this interval, null means zero length
* @param end end of this interval, null means now
* @throws IllegalArgumentException if the end is before the start
* @throws ArithmeticException if the start instant exceeds the capacity of a long
*/
def this(duration: ReadableDuration, end: ReadableInstant) {
this()
`super`(duration, end)
}
/**
* Constructs an interval from a start instant and a time period.
* <p>
* When forming the interval, the chronology from the instant is used
* if present, otherwise the chronology of the period is used.
*
* @param start start of this interval, null means now
* @param period the period of this interval, null means zero length
* @throws IllegalArgumentException if the end is before the start
* @throws ArithmeticException if the end instant exceeds the capacity of a long
*/
def this(start: ReadableInstant, period: ReadablePeriod) {
this()
`super`(start, period)
}
/**
* Constructs an interval from a time period and an end instant.
* <p>
* When forming the interval, the chronology from the instant is used
* if present, otherwise the chronology of the period is used.
*
* @param period the period of this interval, null means zero length
* @param end end of this interval, null means now
* @throws IllegalArgumentException if the end is before the start
* @throws ArithmeticException if the start instant exceeds the capacity of a long
*/
def this(period: ReadablePeriod, end: ReadableInstant) {
this()
`super`(period, end)
}
/**
* Constructs a time interval by converting or copying from another object.
* <p>
* The recognised object types are defined in
* {@link org.joda.time.convert.ConverterManager ConverterManager} and
* include ReadableInterval and String.
* The String formats are described by {@link ISODateTimeFormat#dateTimeParser()}
* and {@link ISOPeriodFormat#standard()}, and may be 'datetime/datetime',
* 'datetime/period' or 'period/datetime'.
*
* @param interval the time interval to copy
* @throws IllegalArgumentException if the interval is invalid
*/
def this(interval: AnyRef) {
this()
`super`(interval, null)
}
/**
* Constructs a time interval by converting or copying from another object,
* overriding the chronology.
* <p>
* The recognised object types are defined in
* {@link org.joda.time.convert.ConverterManager ConverterManager} and
* include ReadableInterval and String.
* The String formats are described by {@link ISODateTimeFormat#dateTimeParser()}
* and {@link ISOPeriodFormat#standard()}, and may be 'datetime/datetime',
* 'datetime/period' or 'period/datetime'.
*
* @param interval the time interval to copy
* @param chronology the chronology to use, null means ISO default
* @throws IllegalArgumentException if the interval is invalid
*/
def this(interval: AnyRef, chronology: Chronology) {
this()
`super`(interval, chronology)
}
/**
* Get this interval as an immutable <code>Interval</code> object
* by returning <code>this</code>.
*
* @return <code>this</code>
*/
override def toInterval: Interval = {
return this
}
/**
* Gets the overlap between this interval and another interval.
* <p>
* Intervals are inclusive of the start instant and exclusive of the end.
* An interval overlaps another if it shares some common part of the
* datetime continuum. This method returns the amount of the overlap,
* only if the intervals actually do overlap.
* If the intervals do not overlap, then null is returned.
* <p>
* When two intervals are compared the result is one of three states:
* (a) they abut, (b) there is a gap between them, (c) they overlap.
* The abuts state takes precedence over the other two, thus a zero duration
* interval at the start of a larger interval abuts and does not overlap.
* <p>
* The chronology of the returned interval is the same as that of
* this interval (the chronology of the interval parameter is not used).
* Note that the use of the chronology was only correctly implemented
* in version 1.3.
*
* @param interval the interval to examine, null means now
* @return the overlap interval, null if no overlap
* @since 1.1
*/
def overlap(interval: ReadableInterval): Interval = {
interval = DateTimeUtils.getReadableInterval(interval)
if (overlaps(interval) == false) {
return null
}
val start: Long = Math.max(getStartMillis, interval.getStartMillis)
val end: Long = Math.min(getEndMillis, interval.getEndMillis)
return new Interval(start, end, getChronology)
}
/**
* Gets the gap between this interval and another interval.
* The other interval can be either before or after this interval.
* <p>
* Intervals are inclusive of the start instant and exclusive of the end.
* An interval has a gap to another interval if there is a non-zero
* duration between them. This method returns the amount of the gap only
* if the intervals do actually have a gap between them.
* If the intervals overlap or abut, then null is returned.
* <p>
* When two intervals are compared the result is one of three states:
* (a) they abut, (b) there is a gap between them, (c) they overlap.
* The abuts state takes precedence over the other two, thus a zero duration
* interval at the start of a larger interval abuts and does not overlap.
* <p>
* The chronology of the returned interval is the same as that of
* this interval (the chronology of the interval parameter is not used).
* Note that the use of the chronology was only correctly implemented
* in version 1.3.
*
* @param interval the interval to examine, null means now
* @return the gap interval, null if no gap
* @since 1.1
*/
def gap(interval: ReadableInterval): Interval = {
interval = DateTimeUtils.getReadableInterval(interval)
val otherStart: Long = interval.getStartMillis
val otherEnd: Long = interval.getEndMillis
val thisStart: Long = getStartMillis
val thisEnd: Long = getEndMillis
if (thisStart > otherEnd) {
return new Interval(otherEnd, thisStart, getChronology)
}
else if (otherStart > thisEnd) {
return new Interval(thisEnd, otherStart, getChronology)
}
else {
return null
}
}
/**
* Does this interval abut with the interval specified.
* <p>
* Intervals are inclusive of the start instant and exclusive of the end.
* An interval abuts if it starts immediately after, or ends immediately
* before this interval without overlap.
* A zero duration interval abuts with itself.
* <p>
* When two intervals are compared the result is one of three states:
* (a) they abut, (b) there is a gap between them, (c) they overlap.
* The abuts state takes precedence over the other two, thus a zero duration
* interval at the start of a larger interval abuts and does not overlap.
* <p>
* For example:
* <pre>
* [09:00 to 10:00) abuts [08:00 to 08:30) = false (completely before)
* [09:00 to 10:00) abuts [08:00 to 09:00) = true
* [09:00 to 10:00) abuts [08:00 to 09:01) = false (overlaps)
*
* [09:00 to 10:00) abuts [09:00 to 09:00) = true
* [09:00 to 10:00) abuts [09:00 to 09:01) = false (overlaps)
*
* [09:00 to 10:00) abuts [10:00 to 10:00) = true
* [09:00 to 10:00) abuts [10:00 to 10:30) = true
*
* [09:00 to 10:00) abuts [10:30 to 11:00) = false (completely after)
*
* [14:00 to 14:00) abuts [14:00 to 14:00) = true
* [14:00 to 14:00) abuts [14:00 to 15:00) = true
* [14:00 to 14:00) abuts [13:00 to 14:00) = true
* </pre>
*
* @param interval the interval to examine, null means now
* @return true if the interval abuts
* @since 1.1
*/
def abuts(interval: ReadableInterval): Boolean = {
if (interval == null) {
val now: Long = DateTimeUtils.currentTimeMillis
return (getStartMillis == now || getEndMillis == now)
}
else {
return (interval.getEndMillis == getStartMillis || getEndMillis == interval.getStartMillis)
}
}
/**
* Creates a new interval with the same start and end, but a different chronology.
*
* @param chronology the chronology to use, null means ISO default
* @return an interval with a different chronology
*/
def withChronology(chronology: Chronology): Interval = {
if (getChronology eq chronology) {
return this
}
return new Interval(getStartMillis, getEndMillis, chronology)
}
/**
* Creates a new interval with the specified start millisecond instant.
*
* @param startInstant the start instant for the new interval
* @return an interval with the end from this interval and the specified start
* @throws IllegalArgumentException if the resulting interval has end before start
*/
def withStartMillis(startInstant: Long): Interval = {
if (startInstant == getStartMillis) {
return this
}
return new Interval(startInstant, getEndMillis, getChronology)
}
/**
* Creates a new interval with the specified start instant.
*
* @param start the start instant for the new interval, null means now
* @return an interval with the end from this interval and the specified start
* @throws IllegalArgumentException if the resulting interval has end before start
*/
def withStart(start: ReadableInstant): Interval = {
val startMillis: Long = DateTimeUtils.getInstantMillis(start)
return withStartMillis(startMillis)
}
/**
* Creates a new interval with the specified start millisecond instant.
*
* @param endInstant the end instant for the new interval
* @return an interval with the start from this interval and the specified end
* @throws IllegalArgumentException if the resulting interval has end before start
*/
def withEndMillis(endInstant: Long): Interval = {
if (endInstant == getEndMillis) {
return this
}
return new Interval(getStartMillis, endInstant, getChronology)
}
/**
* Creates a new interval with the specified end instant.
*
* @param end the end instant for the new interval, null means now
* @return an interval with the start from this interval and the specified end
* @throws IllegalArgumentException if the resulting interval has end before start
*/
def withEnd(end: ReadableInstant): Interval = {
val endMillis: Long = DateTimeUtils.getInstantMillis(end)
return withEndMillis(endMillis)
}
/**
* Creates a new interval with the specified duration after the start instant.
*
* @param duration the duration to add to the start to get the new end instant, null means zero
* @return an interval with the start from this interval and a calculated end
* @throws IllegalArgumentException if the duration is negative
*/
def withDurationAfterStart(duration: ReadableDuration): Interval = {
val durationMillis: Long = DateTimeUtils.getDurationMillis(duration)
if (durationMillis == toDurationMillis) {
return this
}
val chrono: Chronology = getChronology
val startMillis: Long = getStartMillis
val endMillis: Long = chrono.add(startMillis, durationMillis, 1)
return new Interval(startMillis, endMillis, chrono)
}
/**
* Creates a new interval with the specified duration before the end instant.
*
* @param duration the duration to subtract from the end to get the new start instant, null means zero
* @return an interval with the end from this interval and a calculated start
* @throws IllegalArgumentException if the duration is negative
*/
def withDurationBeforeEnd(duration: ReadableDuration): Interval = {
val durationMillis: Long = DateTimeUtils.getDurationMillis(duration)
if (durationMillis == toDurationMillis) {
return this
}
val chrono: Chronology = getChronology
val endMillis: Long = getEndMillis
val startMillis: Long = chrono.add(endMillis, durationMillis, -1)
return new Interval(startMillis, endMillis, chrono)
}
/**
* Creates a new interval with the specified period after the start instant.
*
* @param period the period to add to the start to get the new end instant, null means zero
* @return an interval with the start from this interval and a calculated end
* @throws IllegalArgumentException if the period is negative
*/
def withPeriodAfterStart(period: ReadablePeriod): Interval = {
if (period == null) {
return withDurationAfterStart(null)
}
val chrono: Chronology = getChronology
val startMillis: Long = getStartMillis
val endMillis: Long = chrono.add(period, startMillis, 1)
return new Interval(startMillis, endMillis, chrono)
}
/**
* Creates a new interval with the specified period before the end instant.
*
* @param period the period to subtract from the end to get the new start instant, null means zero
* @return an interval with the end from this interval and a calculated start
* @throws IllegalArgumentException if the period is negative
*/
def withPeriodBeforeEnd(period: ReadablePeriod): Interval = {
if (period == null) {
return withDurationBeforeEnd(null)
}
val chrono: Chronology = getChronology
val endMillis: Long = getEndMillis
val startMillis: Long = chrono.add(period, endMillis, -1)
return new Interval(startMillis, endMillis, chrono)
}
} | aparo/scalajs-joda | src/main/scala/org/joda/time/Interval.scala | Scala | apache-2.0 | 18,671 |
package com.rocketfuel.sdbc.base
import cats.effect.Async
import fs2.Stream
import scala.collection.{AbstractIterator, GenTraversableOnce}
/**
* `CloseableIterator` has a similar interface to scala.collection.Iterator,
* but also is Closeable, and combinators return CloseableIterators.
*
* The iterator will close itself when you get the last value,
* but otherwise you'll want to close it manually. For instance, calls
* to `drop` and `take` return a CloseableIterator, but it won't close
* itself when you fully consume it unless it also happens to fully consume
* the original iterator.
*
* @define closes
* This method fully consumes the iterator, and so it closes itself.
* @define doesNotClose
* This method might not consume the iterator, and so you should close it manually.
* @define resultCloses
* If you fully consume the resulting iterator, the resource will be closed.
* @define resultDoesNotClose
* If you fully consume the resulting iterator, the resource might not be
* closed, and so you should close it manually.
*/
class CloseableIterator[+A] private (
private val underlying: Iterator[A],
private[base] val closer: CloseableIterator.CloseTracking
) extends TraversableOnce[A]
with AutoCloseable {
def this(original: Iterator[A], resource: AutoCloseable) {
this(original, CloseableIterator.CloseTracking(original, resource))
}
override def close(): Unit = {
closer.close()
}
/*
Make sure that the iterator is closed at the end whether the user is calling
`hasNext` or not.
*/
private var calledHasNext = false
private var _hasNext = false
def hasNext: Boolean = {
if (!calledHasNext) {
_hasNext = underlying.hasNext
calledHasNext = true
closer.closeIfConsumed()
}
_hasNext
}
def next(): A = {
if (hasNext) {
calledHasNext = false
underlying.next()
} else Iterator.empty.next()
}
/**
* Use this iterator wherever you need a [[scala.Iterator]]. You lose the ability
* to close the resource manually, but if the iterator is consumed it will still
* close itself.
*/
override lazy val toIterator: Iterator[A] = {
new AbstractIterator[A] {
override def hasNext: Boolean = {
CloseableIterator.this.hasNext
}
override def next(): A = {
CloseableIterator.this.next()
}
}
}
override def hasDefiniteSize: Boolean = {
underlying.hasDefiniteSize
}
override def seq: TraversableOnce[A] = this
override def toTraversable: Traversable[A] = toIterator.toTraversable
override def toStream: scala.Stream[A] = toIterator.toStream
def nextOption(): Option[A] = if (hasNext) Some(next()) else None
/**
* An iterator that shares the same close method as the parent. It closes
* only if the parent is consumed.
*/
private def mapped[B](
mappedUnderlying: Iterator[B]
): CloseableIterator[B] =
new CloseableIterator[B](mappedUnderlying, closer)
def map[B](f: A => B): CloseableIterator[B] =
mapped[B](underlying.map(f))
/**
* @note Reuse: $resultCloses
*/
def flatMap[B](f: A => GenTraversableOnce[B]): CloseableIterator[B] =
mapped[B](underlying.flatMap(f))
/**
* @note Reuse: $resultCloses
*/
def filter(p: A => Boolean): CloseableIterator[A] =
mapped(underlying.filter(p))
/**
* @note Reuse: $doesNotClose
*/
def corresponds[B](that: GenTraversableOnce[B])(p: (A, B) => Boolean): Boolean =
toIterator.corresponds(that)(p)
/**
* @note Reuse: $doesNotClose
*/
def corresponds[B](that: CloseableIterator[B])(p: (A, B) => Boolean): Boolean =
toIterator.corresponds(that.toIterator)(p)
/**
* @note Reuse: $resultCloses
*/
def filterNot(p: A => Boolean): CloseableIterator[A] =
mapped(underlying.filterNot(p))
/**
* @note Reuse: $resultCloses
*/
def collect[B](pf: PartialFunction[A, B]): CloseableIterator[B] =
mapped(underlying.collect(pf))
/**
* @note Reuse: $resultCloses
*/
def scanLeft[B](z: B)(op: (B, A) => B): CloseableIterator[B] =
mapped(underlying.scanLeft(z)(op))
/**
* @note Reuse: $resultDoesNotClose
*/
def take(n: Int): CloseableIterator[A] =
mapped(underlying.take(n))
/**
* @note Reuse: $resultCloses
*/
def drop(n: Int): CloseableIterator[A] = {
mapped(underlying.drop(n))
}
/**
* @note Reuse: $resultDoesNotClose
*/
def slice(from: Int, until: Int): CloseableIterator[A] =
mapped(underlying.slice(from, until))
/**
* @note Reuse: $resultDoesNotClose
*/
def takeWhile(p: A => Boolean): CloseableIterator[A] =
mapped(underlying.takeWhile(p))
/**
* @note Consuming the first iterator might close the resource. If not, the second will.
*/
def span(p: A => Boolean): (CloseableIterator[A], CloseableIterator[A]) = {
val (has, afterHasNot) = toIterator.span(p)
(mapped(has), mapped(afterHasNot))
}
/**
* @note Reuse: $resultCloses
*/
def dropWhile(p: A => Boolean): CloseableIterator[A] =
mapped[A](underlying.dropWhile(p))
/**
* @note Reuse: $resultDoesNotClose
*/
def zip[B](that: CloseableIterator[B]): CloseableIterator[(A, B)] =
mapped(underlying.zip(that.toIterator))
/**
* @note Reuse: $resultDoesNotClose
*/
def zip[B](that: Iterator[B]): CloseableIterator[(A, B)] =
mapped(underlying.zip(that))
/**
* @note Reuse: $resultCloses
*/
def zipWithIndex: CloseableIterator[(A, Int)] =
mapped(underlying.zipWithIndex)
/**
* @note Reuse: $resultCloses
*/
def zipAll[B, A1 >: A, B1 >: B](that: Iterator[B], thisElem: A1, thatElem: B1): CloseableIterator[(A1, B1)] =
mapped[(A1, B1)](underlying.zipAll(that, thisElem, thatElem))
/**
* @note Reuse: $resultCloses
*/
def zipAll[B, A1 >: A, B1 >: B](that: CloseableIterator[B], thisElem: A1, thatElem: B1): CloseableIterator[(A1, B1)] =
mapped[(A1, B1)](underlying.zipAll(that.toIterator, thisElem, thatElem))
/**
* @note Reuse: $resultCloses
*/
def grouped[B >: A](size: Int): CloseableIterator[Seq[B]] =
mapped(underlying.grouped[B](size))
/**
* @note Reuse: $resultCloses
*/
def sliding[B >: A](size: Int, step: Int = 1): CloseableIterator[Seq[B]] =
mapped(underlying.sliding(size, step))
/**
* @note Reuse: $closes
*/
override def size: Int = underlying.size
/**
* @note Reuse: $closes
*/
def length: Int = this.size
/**
* @note Reuse: $resultCloses
*/
def duplicate: (CloseableIterator[A], CloseableIterator[A]) = {
val (first, second) = toIterator.duplicate
(mapped(first), mapped(second))
}
/**
* @note Reuse: $doesNotClose
*/
def sameElements(that: Iterator[_]): Boolean =
toIterator.sameElements(that)
/**
* @note Reuse: $doesNotClose
*/
def sameElements(that: CloseableIterator[_]): Boolean =
toIterator.sameElements(that.toIterator)
/**
* @note Reuse: $closes
*/
override def foreach[U](f: (A) => U): Unit = toIterator.foreach(f)
override def isEmpty: Boolean = !hasNext
/**
* @note Reuse: $doesNotClose
*/
override def forall(p: (A) => Boolean): Boolean = toIterator.forall(p)
/**
* @note Reuse: $doesNotClose
*/
override def exists(p: (A) => Boolean): Boolean = toIterator.exists(p)
/**
* @note Reuse: $doesNotClose
*/
override def find(p: (A) => Boolean): Option[A] = toIterator.find(p)
/**
* @note Reuse: $doesNotClose
*/
override def copyToArray[B >: A](xs: Array[B], start: Int, len: Int): Unit = toIterator.copyToArray(xs, start, len)
override def isTraversableAgain: Boolean = underlying.isTraversableAgain
override def toString: String = "<closeable iterator>"
}
object CloseableIterator {
def toStream[F[x], A](i: F[CloseableIterator[A]])(implicit a: Async[F]): fs2.Stream[F, A] = {
fs2.Stream.bracket[F, CloseableIterator[A]](i)(i => a.delay(i.close())).flatMap(i => Stream.fromIterator[F](i.toIterator))
}
private val _empty: CloseableIterator[Nothing] = {
val i = new CloseableIterator(Iterator.empty, CloseTracking(Iterator.empty, new AutoCloseable {
override def close(): Unit = ()
}))
i.close()
i
}
def empty[A]: CloseableIterator[A] = _empty
case class CloseTracking(
original: Iterator[_],
resource: AutoCloseable
) extends AutoCloseable {
override def close(): Unit = {
if (!_isClosed) {
_isClosed = true
resource.close()
}
}
def closeIfConsumed(): Unit = {
if (!original.hasNext) {
close()
}
}
protected var _isClosed: Boolean = false
def isClosed: Boolean = _isClosed
}
}
| rocketfuel/sdbc | base/src/main/scala-2.1x/com/rocketfuel/sdbc/base/CloseableIterator.scala | Scala | bsd-3-clause | 8,759 |
package com.sksamuel.elastic4s.search
import com.sksamuel.elastic4s.ElasticsearchClientUri
import com.sksamuel.elastic4s.http.{ElasticDsl, HttpClient}
import com.sksamuel.elastic4s.testkit.SharedElasticSugar
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.scalatest.{FlatSpec, Matchers}
class MatchQueryTest
extends FlatSpec
with SharedElasticSugar
with Matchers
with ElasticDsl {
val http = HttpClient(ElasticsearchClientUri("elasticsearch://" + node.ipAndPort))
http.execute {
createIndex("units")
}.await
http.execute {
bulk(
indexInto("units/base") fields("name" -> "candela", "scientist.name" -> "Jules Violle", "scientist.country" -> "France")
).refresh(RefreshPolicy.IMMEDIATE)
}.await
"a match query" should "support selecting nested properties" in {
val resp = http.execute {
search("units") query matchQuery("name", "candela") sourceInclude "scientist.name"
}.await
resp.hits.hits.head.sourceAsMap shouldBe Map("scientist.name" -> "Jules Violle")
}
}
| aroundus-inc/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/MatchQueryTest.scala | Scala | apache-2.0 | 1,063 |
package controllers
import play.api.mvc.Controller
import uk.gov.dvla.vehicles.presentation.common.LogFormats.DVLALogger
trait BusinessController extends Controller with DVLALogger {
protected implicit val isPrivateKeeper = false
protected implicit val AllCacheKeys = models.AllCacheKeys
protected implicit val DisposeCacheKeys = models.DisposeCacheKeys
protected implicit val DisposeOnlyCacheKeys = models.DisposeOnlyCacheKeys
} | dvla/vehicles-online | app/controllers/BusinessController.scala | Scala | mit | 441 |
package org.jetbrains.plugins.scala
package lang.parser.parsing
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.BlockExpr
import org.jetbrains.plugins.scala.lang.parser.parsing.patterns.Pattern
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
import org.jetbrains.plugins.scala.lang.parser.{BlockIndentation, ScalaElementType}
/**
* @author kfeodorov
* @since 03.03.14.
*/
object CommonUtils {
def parseInterpolatedString(isPattern: Boolean)(implicit builder: ScalaPsiBuilder): Unit = {
val prefixMarker = builder.mark()
builder.advanceLexer()
val prefixType =
if (isPattern) ScalaElementType.INTERPOLATED_PREFIX_PATTERN_REFERENCE
else ScalaElementType.INTERPOLATED_PREFIX_LITERAL_REFERENCE
prefixMarker.done(prefixType)
val patternArgsMarker = builder.mark()
while (!builder.eof() &&
builder.getTokenType != ScalaTokenTypes.tINTERPOLATED_STRING_END &&
builder.getTokenType != ScalaTokenTypes.tWRONG_LINE_BREAK_IN_STRING
) {
remapRawStringTokens(builder)
if (builder.getTokenType == ScalaTokenTypes.tINTERPOLATED_STRING_INJECTION) {
builder.advanceLexer()
if (isPattern) {
if (builder.getTokenType == ScalaTokenTypes.tIDENTIFIER) {
val idMarker = builder.mark()
builder.advanceLexer()
idMarker.done(ScalaElementType.REFERENCE_PATTERN)
}
else if (builder.getTokenType == ScalaTokenTypes.tLBRACE) {
builder.advanceLexer()
if (!Pattern())
builder.error(ScalaBundle.message("wrong.pattern"))
else if (builder.getTokenType != ScalaTokenTypes.tRBRACE) {
builder.error(ScalaBundle.message("right.brace.expected"))
ParserUtils.parseLoopUntilRBrace(braceReported = true) {}
}
else
builder.advanceLexer()
}
}
else if (!BlockExpr()) {
if (builder.getTokenType == ScalaTokenTypes.tIDENTIFIER) {
val idMarker = builder.mark()
builder.advanceLexer()
idMarker.done(ScalaElementType.REFERENCE_EXPRESSION)
}
else if (builder.getTokenType == ScalaTokenTypes.kTHIS) {
val literalMarker = builder.mark()
builder.advanceLexer()
literalMarker.done(ScalaElementType.THIS_REFERENCE)
}
else if (!builder.getTokenText.startsWith("$"))
builder.error(ScalaBundle.message("bad.interpolated.string.injection"))
}
}
else {
// TODO: are these 2 dead branches? they are not triggered by tests
builder.getTokenType match {
case ScalaTokenTypes.tWRONG_STRING =>
builder.error(ScalaBundle.message("wrong.string.literal"))
case ScalaTokenTypes.tWRONG_LINE_BREAK_IN_STRING =>
builder.error(ScalaBundle.message("wrong.string.literal"))
case _ =>
}
builder.advanceLexer()
}
}
if (isPattern) patternArgsMarker.done(ScalaElementType.PATTERN_ARGS)
else patternArgsMarker.drop()
// FIXME: non-interpolated strings behave a little differently
// e.g. unclosed string literal s"\\" vs "\\"
if (builder.eof() || builder.getTokenType == ScalaTokenTypes.tWRONG_LINE_BREAK_IN_STRING) {
builder.error(ScalaBundle.message("end.of.string.expected"))
}
if (!builder.eof())
builder.advanceLexer()
}
/** see comments to [[ScalaTokenTypes.tINTERPOLATED_RAW_STRING]] and [[ScalaTokenTypes.tINTERPOLATED_MULTILINE_RAW_STRING]] */
def remapRawStringTokens(builder: ScalaPsiBuilder): Unit =
builder.getTokenType match {
case ScalaTokenTypes.tINTERPOLATED_RAW_STRING =>
builder.remapCurrentToken(ScalaTokenTypes.tINTERPOLATED_STRING)
case ScalaTokenTypes.tINTERPOLATED_MULTILINE_RAW_STRING =>
builder.remapCurrentToken(ScalaTokenTypes.tINTERPOLATED_MULTILINE_STRING)
case _ =>
}
def eatAllSemicolons()(implicit builder: ScalaPsiBuilder): Unit = {
while (builder.getTokenType == ScalaTokenTypes.tSEMICOLON) {
builder.advanceLexer()
}
}
def eatAllSemicolons(blockIndentation: BlockIndentation)(implicit builder: ScalaPsiBuilder): Unit = {
while (builder.getTokenType == ScalaTokenTypes.tSEMICOLON) {
blockIndentation.fromHere()
builder.advanceLexer()
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/CommonUtils.scala | Scala | apache-2.0 | 4,553 |
package com.typesafe.slick.testkit.tests
import scala.language.higherKinds
import com.typesafe.slick.testkit.util.{JdbcTestDB, AsyncTest}
class MainTest extends AsyncTest[JdbcTestDB] { mainTest =>
import tdb.profile.api._
case class User(id: Int, first: String, last: String)
class Users(tag: Tag) extends Table[(Int, String, Option[String])](tag, "users") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def first = column[String]("first", O SqlType "varchar(64)")
def last = column[Option[String]]("last")
def * = (id, first, last)
def orders = mainTest.orders filter { _.userID === id }
}
lazy val users = TableQuery[Users]
class Orders(tag: Tag) extends Table[(Int, Int, String, Boolean, Option[Boolean])](tag, "orders") {
def userID = column[Int]("userID")
def orderID = column[Int]("orderID", O.PrimaryKey, O.AutoInc)
def product = column[String]("product")
def shipped = column[Boolean]("shipped")
def rebate = column[Option[Boolean]]("rebate")
def * = (userID, orderID, product, shipped, rebate)
}
lazy val orders = TableQuery[Orders]
def test = {
val ddl = users.schema ++ orders.schema
ddl.create.statements.toSeq.length.should(_ >= 2)
users.map(u => (u.first, u.last)).insertStatement
val q1 = (for(u <- users) yield (u.id, u.first, u.last)).sortBy(_._1)
q1.result.statements.toSeq.length.should(_ >= 1)
val q1b = for(u <- users) yield (u.id, u.first.?, u.last,
(Case If u.id < 3 Then "low" If u.id < 6 Then "medium" Else "high"))
q1b.result.statements.toSeq.length.should(_ >= 1)
val q2 = for(u <- users if u.first === "Apu".bind) yield (u.last, u.id)
q2.result.statements.toSeq.length.should(_ >= 1)
val expectedUserTuples = List(
(1,"Homer",Some("Simpson")),
(2,"Marge",Some("Simpson")),
(3,"Apu",Some("Nahasapeemapetilon")),
(4,"Carl",Some("Carlson")),
(5,"Lenny",Some("Leonard")),
(6,"Santa's Little Helper",None),
(7,"Snowball",None)
)
val p1 = db.stream(((for {
_ <- ddl.create
ins1 <- users.map(u => (u.first, u.last)) += ("Homer", Some("Simpson"))
ins2 <- users.map(u => (u.first, u.last)) ++= Seq(
("Marge", Some("Simpson")), ("Apu", Some("Nahasapeemapetilon")), ("Carl", Some("Carlson")), ("Lenny", Some("Leonard")) )
ins3 <- users.map(_.first) ++= Seq("Santa's Little Helper", "Snowball")
total = for(i2 <- ins2; i3 <- ins3) yield ins1 + i2 + i3
/* All test DBs seem to report the actual number of rows. None would also be acceptable: */
_ = total.map(_ shouldBe 7)
r1 <- q1.result
_ = r1 shouldBe expectedUserTuples
} yield ()) andThen q1.result).withPinnedSession)
materialize(p1.mapResult { case (id,f,l) => User(id,f,l.orNull) }).flatMap { allUsers =>
allUsers shouldBe expectedUserTuples.map{ case (id,f,l) => User(id,f,l.orNull) }
db.run(for {
r1b <- q1b.result
_ = r1b shouldBe expectedUserTuples.map {
case (id,f,l) => (id, Some(f), l, if(id < 3) "low" else if(id < 6) "medium" else "high")
}
_ <- q2.result.head.map(_ shouldBe (Some("Nahasapeemapetilon"),3))
} yield allUsers)
}.flatMap { allUsers =>
//TODO verifyable non-random test
val ordersInserts =
for(u <- allUsers if u.first != "Apu" && u.first != "Snowball"; i <- 1 to 2)
yield orders.map(o => (o.userID, o.product, o.shipped, o.rebate)) += (
u.id, "Gizmo "+((scala.math.random()*10)+1).toInt, i == 2, Some(u.first == "Marge"))
db.run(seq(ordersInserts: _*))
}.flatMap { _ =>
val q3 = for (
u <- users.sortBy(_.first) if u.last.isDefined;
o <- u.orders
) yield (u.first, u.last, o.orderID, o.product, o.shipped, o.rebate)
q3.result.statements.toSeq.length.should(_ >= 1)
// All Orders by Users with a last name by first name:
materialize(db.stream(q3.result)).map(s => s.length shouldBe 8)
}.flatMap { _ =>
val q4 = for {
u <- users
o <- u.orders
if (o.orderID === (for { o2 <- orders filter(o.userID === _.userID) } yield o2.orderID).max)
} yield (u.first, o.orderID)
q4.result.statements.toSeq.length.should(_ >= 1)
def maxOfPer[T <: Table[_], C[_]](c: Query[T, _, C])(m: (T => Rep[Int]), p: (T => Rep[Int])) =
c filter { o => m(o) === (for { o2 <- c if p(o) === p(o2) } yield m(o2)).max }
val q4b = for (
u <- users;
o <- maxOfPer(orders)(_.orderID, _.userID)
if o.userID === u.id
) yield (u.first, o.orderID)
q4b.result.statements.toSeq.length.should(_ >= 1)
val q4d = for (
u <- users if u.first inSetBind List("Homer", "Marge");
o <- orders if o.userID === u.id
) yield (u.first, (LiteralColumn(1) + o.orderID, 1), o.product)
q4d.result.statements.toSeq.length.should(_ >= 1)
val q4e = for (
u <- users if u.first inSetBind List("Homer", "Marge");
o <- orders if o.userID in (u.id, u.id)
) yield (u.first, o.orderID)
q4e.result.statements.toSeq.length.should(_ >= 1)
db.run(for {
r4 <- q4.to[Set].result.named("Latest Order per User")
_ = r4 shouldBe Set(("Homer",2), ("Marge",4), ("Carl",6), ("Lenny",8), ("Santa's Little Helper",10))
r4b <- q4b.to[Set].result.named("Latest Order per User, using maxOfPer")
_ = r4b shouldBe Set(("Homer",2), ("Marge",4), ("Carl",6), ("Lenny",8), ("Santa's Little Helper",10))
_ <- q4d.result.map(r => r.length shouldBe 4)
r4e <- q4e.to[Set].result.named("Orders matched using in(...) syntax")
_ = r4e shouldBe Set(("Homer",1), ("Homer",2), ("Marge",3), ("Marge",4))
} yield ())
}.flatMap { _ =>
val b1 = orders.filter( o => o.shipped && o.shipped ).map( o => o.shipped && o.shipped )
val b2 = orders.filter( o => o.shipped && o.rebate ).map( o => o.shipped && o.rebate )
val b3 = orders.filter( o => o.rebate && o.shipped ).map( o => o.rebate && o.shipped )
val b4 = orders.filter( o => o.rebate && o.rebate ).map( o => o.rebate && o.rebate )
val b5 = orders.filter( o => !o.shipped ).map( o => !o.shipped )
val b6 = orders.filter( o => !o.rebate ).map( o => !o.rebate )
val b7 = orders.map( o => o.shipped === o.shipped )
val b8 = orders.map( o => o.rebate === o.shipped )
val b9 = orders.map( o => o.shipped === o.rebate )
val b10 = orders.map( o => o.rebate === o.rebate )
b1.result.statements.toSeq.length.should(_ >= 1)
b2.result.statements.toSeq.length.should(_ >= 1)
b3.result.statements.toSeq.length.should(_ >= 1)
b4.result.statements.toSeq.length.should(_ >= 1)
b5.result.statements.toSeq.length.should(_ >= 1)
b6.result.statements.toSeq.length.should(_ >= 1)
b7.result.statements.toSeq.length.should(_ >= 1)
b8.result.statements.toSeq.length.should(_ >= 1)
b9.result.statements.toSeq.length.should(_ >= 1)
b10.result.statements.toSeq.length.should(_ >= 1)
val q5 = users filterNot { _.id in orders.map(_.userID) }
q5.result.statements.toSeq.length.should(_ >= 1)
q5.delete.statements.toSeq.length.should(_ >= 1)
val q6 = Query(q5.length)
q6.result.statements.toSeq.length.should(_ >= 1)
db.run(for {
r5 <- q5.to[Set].result.named("Users without Orders")
_ = r5 shouldBe Set((3,"Apu",Some("Nahasapeemapetilon")), (7,"Snowball",None))
deleted <- q5.delete
_ = deleted shouldBe 2
_ <- q6.result.head.map(_ shouldBe 0)
} yield ())
}.flatMap { _ =>
val q7 = Compiled { (s: Rep[String]) => users.filter(_.first === s).map(_.first) }
q7("Homer").updateStatement
val q7b = Compiled { users.filter(_.first === "Homer Jay").map(_.first) }
q7b.updateStatement
db.run(for {
_ <- q7("Homer").update("Homer Jay").map(_ shouldBe 1)
_ <- q7b.update("Homie").map(_ shouldBe 1)
_ <- q7("Marge").map(_.length).result.map(_ shouldBe 1)
_ <- q7("Marge").delete
_ <- q7("Marge").map(_.length).result.map(_ shouldBe 0)
} yield ())
}.flatMap { _ =>
val q8 = for(u <- users if u.last.isEmpty) yield (u.first, u.last)
q8.updateStatement
val q9 = users.length
q9.result.statements.toSeq.length.should(_ >= 1)
val q10 = users.filter(_.last inSetBind Seq()).map(u => (u.first, u.last))
db.run(for {
updated2 <- q8.update("n/a", Some("n/a"))
_ = updated2 shouldBe 1
_ <- q9.result.map(_ shouldBe 4)
_ <- q10.result.map(_ shouldBe Nil)
} yield ())
}
}
}
| slick/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/MainTest.scala | Scala | bsd-2-clause | 8,692 |
package codesniffer.codefunnel.utils
import javax.sql.DataSource
import com.caibowen.gplume.resource.ClassLoaderInputStreamProvider
import gplume.scala.context.{AppContext}
import gplume.scala.jdbc.DB
/**
* Created by Bowen Cai on 2/24/2016.
*/
object DBUtils {
def quote(param: String): String = {
val b = new scala.StringBuilder(param.length * 3 / 2)
b.append(''')
val _idx1 = param.indexOf(''')
if (_idx1 < 0)
b.append(param)
else {
for (c <- param) c match {
case ''' => b append ''' append '''
case o => b append o
}
}
b.append(''').toString()
}
}
| xkommando/CodeSniffer | codefunnel/src/main/scala/codesniffer/codefunnel/utils/DBUtils.scala | Scala | lgpl-3.0 | 659 |
package notebook
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
import scala.concurrent._
import scala.concurrent.duration._
import akka.actor._
import akka.actor.Deploy
import com.typesafe.config.Config
import kernel.remote.{RemoteActorSystem, RemoteActorProcess}
/**
* A kernel is a remote VM with a set of sub-actors, each of which interacts with local resources (for example, WebSockets).
* The local resource must be fully initialized before we will let messages flow through to the remote actor. This is
* accomplished by blocking on actor startup
* to the remote (this is accomplished by blocking on startup waiting for
*/
class Kernel(config:Config, system: ActorSystem) {
implicit val executor = system.dispatcher
val router = system.actorOf(Props(new ExecutionManager))
private val remoteDeployPromise = Promise[Deploy]
def remoteDeployFuture = remoteDeployPromise.future
case object ShutdownNow
def shutdown() { router ! ShutdownNow }
class ExecutionManager extends Actor with ActorLogging {
// These get filled in before we ever receive messages
var remoteInfo: RemoteActorSystem = null
override def preStart() {
remoteInfo = Await.result(RemoteActorSystem.spawn(config, system, "kernel"), 1 minutes)
remoteDeployPromise.success(remoteInfo.deploy)
}
override def postStop() {
if (remoteInfo != null)
remoteInfo.shutdownRemote()
}
def receive = {
case ShutdownNow =>
if (remoteInfo != null) {
remoteInfo.shutdownRemote()
}
}
}
}
object KernelManager {
def shutdown() {
kernels.values foreach { _.shutdown() }
}
val kernels = new ConcurrentHashMap[String, Kernel]().asScala
def get(id: String) = kernels.get(id)
def apply(id: String) = kernels(id)
def add(id:String, kernel: Kernel) {
kernels += id -> kernel
}
def remove(id:String) {
kernels -= id
}
}
| vitan/spark-notebook | modules/subprocess/src/main/scala/notebook/Kernel.scala | Scala | apache-2.0 | 1,967 |
package org.workcraft.gui.propertyeditor.colour
import java.awt.Color
import org.workcraft.gui.propertyeditor.EditableProperty
import org.workcraft.scala.Expressions._
object ColorProperty {
def create(name: String, property: ModifiableExpression[Color]): Expression[EditableProperty] = property.map(value => {
EditableProperty(name, ColorCellEditor, ColorCellRenderer, value, property.set(_: Color))
})
}
| tuura/workcraft-2.2 | Gui/src/main/scala/org/workcraft/gui/propertyeditor/colour/ColorProperty.scala | Scala | gpl-3.0 | 417 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import breeze.linalg.{argmax, argtopk, normalize, sum, DenseMatrix => BDM, DenseVector => BDV}
import breeze.numerics.{exp, lgamma}
import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD}
import org.apache.spark.graphx.{Edge, EdgeContext, Graph, VertexId}
import org.apache.spark.mllib.linalg.{Matrices, Matrix, Vector, Vectors}
import org.apache.spark.mllib.util.{Loader, Saveable}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.util.BoundedPriorityQueue
/**
* Latent Dirichlet Allocation (LDA) model.
*
* This abstraction permits for different underlying representations,
* including local and distributed data structures.
*/
@Since("1.3.0")
abstract class LDAModel private[clustering] extends Saveable {
/** Number of topics */
@Since("1.3.0")
def k: Int
/** Vocabulary size (number of terms or terms in the vocabulary) */
@Since("1.3.0")
def vocabSize: Int
/**
* Concentration parameter (commonly named "alpha") for the prior placed on documents'
* distributions over topics ("theta").
*
* This is the parameter to a Dirichlet distribution.
*/
@Since("1.5.0")
def docConcentration: Vector
/**
* Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics'
* distributions over terms.
*
* This is the parameter to a symmetric Dirichlet distribution.
*
* @note The topics' distributions over terms are called "beta" in the original LDA paper
* by Blei et al., but are called "phi" in many later papers such as Asuncion et al., 2009.
*/
@Since("1.5.0")
def topicConcentration: Double
/**
* Shape parameter for random initialization of variational parameter gamma.
* Used for variational inference for perplexity and other test-time computations.
*/
protected def gammaShape: Double
/**
* Inferred topics, where each topic is represented by a distribution over terms.
* This is a matrix of size vocabSize x k, where each column is a topic.
* No guarantees are given about the ordering of the topics.
*/
@Since("1.3.0")
def topicsMatrix: Matrix
/**
* Return the topics described by weighted terms.
*
* @param maxTermsPerTopic Maximum number of terms to collect for each topic.
* @return Array over topics. Each topic is represented as a pair of matching arrays:
* (term indices, term weights in topic).
* Each topic's terms are sorted in order of decreasing weight.
*/
@Since("1.3.0")
def describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])]
/**
* Return the topics described by weighted terms.
*
* WARNING: If vocabSize and k are large, this can return a large object!
*
* @return Array over topics. Each topic is represented as a pair of matching arrays:
* (term indices, term weights in topic).
* Each topic's terms are sorted in order of decreasing weight.
*/
@Since("1.3.0")
def describeTopics(): Array[(Array[Int], Array[Double])] = describeTopics(vocabSize)
/* TODO (once LDA can be trained with Strings or given a dictionary)
* Return the topics described by weighted terms.
*
* This is similar to [[describeTopics()]] but returns String values for terms.
* If this model was trained using Strings or was given a dictionary, then this method returns
* terms as text. Otherwise, this method returns terms as term indices.
*
* This limits the number of terms per topic.
* This is approximate; it may not return exactly the top-weighted terms for each topic.
* To get a more precise set of top terms, increase maxTermsPerTopic.
*
* @param maxTermsPerTopic Maximum number of terms to collect for each topic.
* @return Array over topics. Each topic is represented as a pair of matching arrays:
* (terms, term weights in topic) where terms are either the actual term text
* (if available) or the term indices.
* Each topic's terms are sorted in order of decreasing weight.
*/
// def describeTopicsAsStrings(maxTermsPerTopic: Int): Array[(Array[Double], Array[String])]
/* TODO (once LDA can be trained with Strings or given a dictionary)
* Return the topics described by weighted terms.
*
* This is similar to [[describeTopics()]] but returns String values for terms.
* If this model was trained using Strings or was given a dictionary, then this method returns
* terms as text. Otherwise, this method returns terms as term indices.
*
* WARNING: If vocabSize and k are large, this can return a large object!
*
* @return Array over topics. Each topic is represented as a pair of matching arrays:
* (terms, term weights in topic) where terms are either the actual term text
* (if available) or the term indices.
* Each topic's terms are sorted in order of decreasing weight.
*/
// def describeTopicsAsStrings(): Array[(Array[Double], Array[String])] =
// describeTopicsAsStrings(vocabSize)
/* TODO
* Compute the log likelihood of the observed tokens, given the current parameter estimates:
* log P(docs | topics, topic distributions for docs, alpha, eta)
*
* Note:
* - This excludes the prior.
* - Even with the prior, this is NOT the same as the data log likelihood given the
* hyperparameters.
*
* @param documents RDD of documents, which are term (word) count vectors paired with IDs.
* The term count vectors are "bags of words" with a fixed-size vocabulary
* (where the vocabulary size is the length of the vector).
* This must use the same vocabulary (ordering of term counts) as in training.
* Document IDs must be unique and >= 0.
* @return Estimated log likelihood of the data under this model
*/
// def logLikelihood(documents: RDD[(Long, Vector)]): Double
/* TODO
* Compute the estimated topic distribution for each document.
* This is often called 'theta' in the literature.
*
* @param documents RDD of documents, which are term (word) count vectors paired with IDs.
* The term count vectors are "bags of words" with a fixed-size vocabulary
* (where the vocabulary size is the length of the vector).
* This must use the same vocabulary (ordering of term counts) as in training.
* Document IDs must be unique and greater than or equal to 0.
* @return Estimated topic distribution for each document.
* The returned RDD may be zipped with the given RDD, where each returned vector
* is a multinomial distribution over topics.
*/
// def topicDistributions(documents: RDD[(Long, Vector)]): RDD[(Long, Vector)]
}
/**
* Local LDA model.
* This model stores only the inferred topics.
*
* @param topics Inferred topics (vocabSize x k matrix).
*/
@Since("1.3.0")
class LocalLDAModel private[spark] (
@Since("1.3.0") val topics: Matrix,
@Since("1.5.0") override val docConcentration: Vector,
@Since("1.5.0") override val topicConcentration: Double,
override protected[spark] val gammaShape: Double = 100)
extends LDAModel with Serializable {
@Since("1.3.0")
override def k: Int = topics.numCols
@Since("1.3.0")
override def vocabSize: Int = topics.numRows
@Since("1.3.0")
override def topicsMatrix: Matrix = topics
@Since("1.3.0")
override def describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])] = {
val brzTopics = topics.asBreeze.toDenseMatrix
Range(0, k).map { topicIndex =>
val topic = normalize(brzTopics(::, topicIndex), 1.0)
val (termWeights, terms) =
topic.toArray.zipWithIndex.sortBy(-_._1).take(maxTermsPerTopic).unzip
(terms.toArray, termWeights.toArray)
}.toArray
}
override protected def formatVersion = "1.0"
@Since("1.5.0")
override def save(sc: SparkContext, path: String): Unit = {
LocalLDAModel.SaveLoadV1_0.save(sc, path, topicsMatrix, docConcentration, topicConcentration,
gammaShape)
}
// TODO: declare in LDAModel and override once implemented in DistributedLDAModel
/**
* Calculates a lower bound on the log likelihood of the entire corpus.
*
* See Equation (16) in original Online LDA paper.
*
* @param documents test corpus to use for calculating log likelihood
* @return variational lower bound on the log likelihood of the entire corpus
*/
@Since("1.5.0")
def logLikelihood(documents: RDD[(Long, Vector)]): Double = logLikelihoodBound(documents,
docConcentration, topicConcentration, topicsMatrix.asBreeze.toDenseMatrix, gammaShape, k,
vocabSize)
/**
* Java-friendly version of [[logLikelihood]]
*/
@Since("1.5.0")
def logLikelihood(documents: JavaPairRDD[java.lang.Long, Vector]): Double = {
logLikelihood(documents.rdd.asInstanceOf[RDD[(Long, Vector)]])
}
/**
* Calculate an upper bound bound on perplexity. (Lower is better.)
* See Equation (16) in original Online LDA paper.
*
* @param documents test corpus to use for calculating perplexity
* @return Variational upper bound on log perplexity per token.
*/
@Since("1.5.0")
def logPerplexity(documents: RDD[(Long, Vector)]): Double = {
val corpusTokenCount = documents
.map { case (_, termCounts) => termCounts.toArray.sum }
.sum()
-logLikelihood(documents) / corpusTokenCount
}
/** Java-friendly version of [[logPerplexity]] */
@Since("1.5.0")
def logPerplexity(documents: JavaPairRDD[java.lang.Long, Vector]): Double = {
logPerplexity(documents.rdd.asInstanceOf[RDD[(Long, Vector)]])
}
/**
* Estimate the variational likelihood bound of from `documents`:
* log p(documents) >= E_q[log p(documents)] - E_q[log q(documents)]
* This bound is derived by decomposing the LDA model to:
* log p(documents) = E_q[log p(documents)] - E_q[log q(documents)] + D(q|p)
* and noting that the KL-divergence D(q|p) >= 0.
*
* See Equation (16) in original Online LDA paper, as well as Appendix A.3 in the JMLR version of
* the original LDA paper.
* @param documents a subset of the test corpus
* @param alpha document-topic Dirichlet prior parameters
* @param eta topic-word Dirichlet prior parameter
* @param lambda parameters for variational q(beta | lambda) topic-word distributions
* @param gammaShape shape parameter for random initialization of variational q(theta | gamma)
* topic mixture distributions
* @param k number of topics
* @param vocabSize number of unique terms in the entire test corpus
*/
private def logLikelihoodBound(
documents: RDD[(Long, Vector)],
alpha: Vector,
eta: Double,
lambda: BDM[Double],
gammaShape: Double,
k: Int,
vocabSize: Long): Double = {
val brzAlpha = alpha.asBreeze.toDenseVector
// transpose because dirichletExpectation normalizes by row and we need to normalize
// by topic (columns of lambda)
val Elogbeta = LDAUtils.dirichletExpectation(lambda.t).t
val ElogbetaBc = documents.sparkContext.broadcast(Elogbeta)
// Sum bound components for each document:
// component for prob(tokens) + component for prob(document-topic distribution)
val corpusPart =
documents.filter(_._2.numNonzeros > 0).map { case (id: Long, termCounts: Vector) =>
val localElogbeta = ElogbetaBc.value
var docBound = 0.0D
val (gammad: BDV[Double], _, _) = OnlineLDAOptimizer.variationalTopicInference(
termCounts, exp(localElogbeta), brzAlpha, gammaShape, k)
val Elogthetad: BDV[Double] = LDAUtils.dirichletExpectation(gammad)
// E[log p(doc | theta, beta)]
termCounts.foreachActive { case (idx, count) =>
docBound += count * LDAUtils.logSumExp(Elogthetad + localElogbeta(idx, ::).t)
}
// E[log p(theta | alpha) - log q(theta | gamma)]
docBound += sum((brzAlpha - gammad) :* Elogthetad)
docBound += sum(lgamma(gammad) - lgamma(brzAlpha))
docBound += lgamma(sum(brzAlpha)) - lgamma(sum(gammad))
docBound
}.sum()
// Bound component for prob(topic-term distributions):
// E[log p(beta | eta) - log q(beta | lambda)]
val sumEta = eta * vocabSize
val topicsPart = sum((eta - lambda) :* Elogbeta) +
sum(lgamma(lambda) - lgamma(eta)) +
sum(lgamma(sumEta) - lgamma(sum(lambda(::, breeze.linalg.*))))
corpusPart + topicsPart
}
/**
* Predicts the topic mixture distribution for each document (often called "theta" in the
* literature). Returns a vector of zeros for an empty document.
*
* This uses a variational approximation following Hoffman et al. (2010), where the approximate
* distribution is called "gamma." Technically, this method returns this approximation "gamma"
* for each document.
* @param documents documents to predict topic mixture distributions for
* @return An RDD of (document ID, topic mixture distribution for document)
*/
@Since("1.3.0")
// TODO: declare in LDAModel and override once implemented in DistributedLDAModel
def topicDistributions(documents: RDD[(Long, Vector)]): RDD[(Long, Vector)] = {
// Double transpose because dirichletExpectation normalizes by row and we need to normalize
// by topic (columns of lambda)
val expElogbeta = exp(LDAUtils.dirichletExpectation(topicsMatrix.asBreeze.toDenseMatrix.t).t)
val expElogbetaBc = documents.sparkContext.broadcast(expElogbeta)
val docConcentrationBrz = this.docConcentration.asBreeze
val gammaShape = this.gammaShape
val k = this.k
documents.map { case (id: Long, termCounts: Vector) =>
if (termCounts.numNonzeros == 0) {
(id, Vectors.zeros(k))
} else {
val (gamma, _, _) = OnlineLDAOptimizer.variationalTopicInference(
termCounts,
expElogbetaBc.value,
docConcentrationBrz,
gammaShape,
k)
(id, Vectors.dense(normalize(gamma, 1.0).toArray))
}
}
}
/** Get a method usable as a UDF for [[topicDistributions()]] */
private[spark] def getTopicDistributionMethod(sc: SparkContext): Vector => Vector = {
val expElogbeta = exp(LDAUtils.dirichletExpectation(topicsMatrix.asBreeze.toDenseMatrix.t).t)
val expElogbetaBc = sc.broadcast(expElogbeta)
val docConcentrationBrz = this.docConcentration.asBreeze
val gammaShape = this.gammaShape
val k = this.k
(termCounts: Vector) =>
if (termCounts.numNonzeros == 0) {
Vectors.zeros(k)
} else {
val (gamma, _, _) = OnlineLDAOptimizer.variationalTopicInference(
termCounts,
expElogbetaBc.value,
docConcentrationBrz,
gammaShape,
k)
Vectors.dense(normalize(gamma, 1.0).toArray)
}
}
/**
* Predicts the topic mixture distribution for a document (often called "theta" in the
* literature). Returns a vector of zeros for an empty document.
*
* Note this means to allow quick query for single document. For batch documents, please refer
* to `topicDistributions()` to avoid overhead.
*
* @param document document to predict topic mixture distributions for
* @return topic mixture distribution for the document
*/
@Since("2.0.0")
def topicDistribution(document: Vector): Vector = {
val expElogbeta = exp(LDAUtils.dirichletExpectation(topicsMatrix.asBreeze.toDenseMatrix.t).t)
if (document.numNonzeros == 0) {
Vectors.zeros(this.k)
} else {
val (gamma, _, _) = OnlineLDAOptimizer.variationalTopicInference(
document,
expElogbeta,
this.docConcentration.asBreeze,
gammaShape,
this.k)
Vectors.dense(normalize(gamma, 1.0).toArray)
}
}
/**
* Java-friendly version of [[topicDistributions]]
*/
@Since("1.4.1")
def topicDistributions(
documents: JavaPairRDD[java.lang.Long, Vector]): JavaPairRDD[java.lang.Long, Vector] = {
val distributions = topicDistributions(documents.rdd.asInstanceOf[RDD[(Long, Vector)]])
JavaPairRDD.fromRDD(distributions.asInstanceOf[RDD[(java.lang.Long, Vector)]])
}
}
/**
* Local (non-distributed) model fitted by [[LDA]].
*
* This model stores the inferred topics only; it does not store info about the training dataset.
*/
@Since("1.5.0")
object LocalLDAModel extends Loader[LocalLDAModel] {
private object SaveLoadV1_0 {
val thisFormatVersion = "1.0"
val thisClassName = "org.apache.spark.mllib.clustering.LocalLDAModel"
// Store the distribution of terms of each topic and the column index in topicsMatrix
// as a Row in data.
case class Data(topic: Vector, index: Int)
def save(
sc: SparkContext,
path: String,
topicsMatrix: Matrix,
docConcentration: Vector,
topicConcentration: Double,
gammaShape: Double): Unit = {
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val k = topicsMatrix.numCols
val metadata = compact(render
(("class" -> thisClassName) ~ ("version" -> thisFormatVersion) ~
("k" -> k) ~ ("vocabSize" -> topicsMatrix.numRows) ~
("docConcentration" -> docConcentration.toArray.toSeq) ~
("topicConcentration" -> topicConcentration) ~
("gammaShape" -> gammaShape)))
sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path))
val topicsDenseMatrix = topicsMatrix.asBreeze.toDenseMatrix
val topics = Range(0, k).map { topicInd =>
Data(Vectors.dense((topicsDenseMatrix(::, topicInd).toArray)), topicInd)
}
spark.createDataFrame(topics).repartition(1).write.parquet(Loader.dataPath(path))
}
def load(
sc: SparkContext,
path: String,
docConcentration: Vector,
topicConcentration: Double,
gammaShape: Double): LocalLDAModel = {
val dataPath = Loader.dataPath(path)
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val dataFrame = spark.read.parquet(dataPath)
Loader.checkSchema[Data](dataFrame.schema)
val topics = dataFrame.collect()
val vocabSize = topics(0).getAs[Vector](0).size
val k = topics.length
val brzTopics = BDM.zeros[Double](vocabSize, k)
topics.foreach { case Row(vec: Vector, ind: Int) =>
brzTopics(::, ind) := vec.asBreeze
}
val topicsMat = Matrices.fromBreeze(brzTopics)
new LocalLDAModel(topicsMat, docConcentration, topicConcentration, gammaShape)
}
}
@Since("1.5.0")
override def load(sc: SparkContext, path: String): LocalLDAModel = {
val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path)
implicit val formats = DefaultFormats
val expectedK = (metadata \\ "k").extract[Int]
val expectedVocabSize = (metadata \\ "vocabSize").extract[Int]
val docConcentration =
Vectors.dense((metadata \\ "docConcentration").extract[Seq[Double]].toArray)
val topicConcentration = (metadata \\ "topicConcentration").extract[Double]
val gammaShape = (metadata \\ "gammaShape").extract[Double]
val classNameV1_0 = SaveLoadV1_0.thisClassName
val model = (loadedClassName, loadedVersion) match {
case (className, "1.0") if className == classNameV1_0 =>
SaveLoadV1_0.load(sc, path, docConcentration, topicConcentration, gammaShape)
case _ => throw new Exception(
s"LocalLDAModel.load did not recognize model with (className, format version):" +
s"($loadedClassName, $loadedVersion). Supported:\\n" +
s" ($classNameV1_0, 1.0)")
}
val topicsMatrix = model.topicsMatrix
require(expectedK == topicsMatrix.numCols,
s"LocalLDAModel requires $expectedK topics, got ${topicsMatrix.numCols} topics")
require(expectedVocabSize == topicsMatrix.numRows,
s"LocalLDAModel requires $expectedVocabSize terms for each topic, " +
s"but got ${topicsMatrix.numRows}")
model
}
}
/**
* Distributed LDA model.
* This model stores the inferred topics, the full training dataset, and the topic distributions.
*/
@Since("1.3.0")
class DistributedLDAModel private[clustering] (
private[clustering] val graph: Graph[LDA.TopicCounts, LDA.TokenCount],
private[clustering] val globalTopicTotals: LDA.TopicCounts,
@Since("1.3.0") val k: Int,
@Since("1.3.0") val vocabSize: Int,
@Since("1.5.0") override val docConcentration: Vector,
@Since("1.5.0") override val topicConcentration: Double,
private[spark] val iterationTimes: Array[Double],
override protected[clustering] val gammaShape: Double = DistributedLDAModel.defaultGammaShape,
private[spark] val checkpointFiles: Array[String] = Array.empty[String])
extends LDAModel {
import LDA._
/**
* Convert model to a local model.
* The local model stores the inferred topics but not the topic distributions for training
* documents.
*/
@Since("1.3.0")
def toLocal: LocalLDAModel = new LocalLDAModel(topicsMatrix, docConcentration, topicConcentration,
gammaShape)
/**
* Inferred topics, where each topic is represented by a distribution over terms.
* This is a matrix of size vocabSize x k, where each column is a topic.
* No guarantees are given about the ordering of the topics.
*
* WARNING: This matrix is collected from an RDD. Beware memory usage when vocabSize, k are large.
*/
@Since("1.3.0")
override lazy val topicsMatrix: Matrix = {
// Collect row-major topics
val termTopicCounts: Array[(Int, TopicCounts)] =
graph.vertices.filter(_._1 < 0).map { case (termIndex, cnts) =>
(index2term(termIndex), cnts)
}.collect()
// Convert to Matrix
val brzTopics = BDM.zeros[Double](vocabSize, k)
termTopicCounts.foreach { case (term, cnts) =>
var j = 0
while (j < k) {
brzTopics(term, j) = cnts(j)
j += 1
}
}
Matrices.fromBreeze(brzTopics)
}
@Since("1.3.0")
override def describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])] = {
val numTopics = k
// Note: N_k is not needed to find the top terms, but it is needed to normalize weights
// to a distribution over terms.
val N_k: TopicCounts = globalTopicTotals
val topicsInQueues: Array[BoundedPriorityQueue[(Double, Int)]] =
graph.vertices.filter(isTermVertex)
.mapPartitions { termVertices =>
// For this partition, collect the most common terms for each topic in queues:
// queues(topic) = queue of (term weight, term index).
// Term weights are N_{wk} / N_k.
val queues =
Array.fill(numTopics)(new BoundedPriorityQueue[(Double, Int)](maxTermsPerTopic))
for ((termId, n_wk) <- termVertices) {
var topic = 0
while (topic < numTopics) {
queues(topic) += (n_wk(topic) / N_k(topic) -> index2term(termId.toInt))
topic += 1
}
}
Iterator(queues)
}.reduce { (q1, q2) =>
q1.zip(q2).foreach { case (a, b) => a ++= b}
q1
}
topicsInQueues.map { q =>
val (termWeights, terms) = q.toArray.sortBy(-_._1).unzip
(terms.toArray, termWeights.toArray)
}
}
/**
* Return the top documents for each topic
*
* @param maxDocumentsPerTopic Maximum number of documents to collect for each topic.
* @return Array over topics. Each element represent as a pair of matching arrays:
* (IDs for the documents, weights of the topic in these documents).
* For each topic, documents are sorted in order of decreasing topic weights.
*/
@Since("1.5.0")
def topDocumentsPerTopic(maxDocumentsPerTopic: Int): Array[(Array[Long], Array[Double])] = {
val numTopics = k
val topicsInQueues: Array[BoundedPriorityQueue[(Double, Long)]] =
topicDistributions.mapPartitions { docVertices =>
// For this partition, collect the most common docs for each topic in queues:
// queues(topic) = queue of (doc topic, doc ID).
val queues =
Array.fill(numTopics)(new BoundedPriorityQueue[(Double, Long)](maxDocumentsPerTopic))
for ((docId, docTopics) <- docVertices) {
var topic = 0
while (topic < numTopics) {
queues(topic) += (docTopics(topic) -> docId)
topic += 1
}
}
Iterator(queues)
}.treeReduce { (q1, q2) =>
q1.zip(q2).foreach { case (a, b) => a ++= b }
q1
}
topicsInQueues.map { q =>
val (docTopics, docs) = q.toArray.sortBy(-_._1).unzip
(docs.toArray, docTopics.toArray)
}
}
/**
* Return the top topic for each (doc, term) pair. I.e., for each document, what is the most
* likely topic generating each term?
*
* @return RDD of (doc ID, assignment of top topic index for each term),
* where the assignment is specified via a pair of zippable arrays
* (term indices, topic indices). Note that terms will be omitted if not present in
* the document.
*/
@Since("1.5.0")
lazy val topicAssignments: RDD[(Long, Array[Int], Array[Int])] = {
// For reference, compare the below code with the core part of EMLDAOptimizer.next().
val eta = topicConcentration
val W = vocabSize
val alpha = docConcentration(0)
val N_k = globalTopicTotals
val sendMsg: EdgeContext[TopicCounts, TokenCount, (Array[Int], Array[Int])] => Unit =
(edgeContext) => {
// E-STEP: Compute gamma_{wjk} (smoothed topic distributions).
val scaledTopicDistribution: TopicCounts =
computePTopic(edgeContext.srcAttr, edgeContext.dstAttr, N_k, W, eta, alpha)
// For this (doc j, term w), send top topic k to doc vertex.
val topTopic: Int = argmax(scaledTopicDistribution)
val term: Int = index2term(edgeContext.dstId)
edgeContext.sendToSrc((Array(term), Array(topTopic)))
}
val mergeMsg: ((Array[Int], Array[Int]), (Array[Int], Array[Int])) => (Array[Int], Array[Int]) =
(terms_topics0, terms_topics1) => {
(terms_topics0._1 ++ terms_topics1._1, terms_topics0._2 ++ terms_topics1._2)
}
// M-STEP: Aggregation computes new N_{kj}, N_{wk} counts.
val perDocAssignments =
graph.aggregateMessages[(Array[Int], Array[Int])](sendMsg, mergeMsg).filter(isDocumentVertex)
perDocAssignments.map { case (docID: Long, (terms: Array[Int], topics: Array[Int])) =>
// TODO: Avoid zip, which is inefficient.
val (sortedTerms, sortedTopics) = terms.zip(topics).sortBy(_._1).unzip
(docID, sortedTerms.toArray, sortedTopics.toArray)
}
}
/** Java-friendly version of [[topicAssignments]] */
@Since("1.5.0")
lazy val javaTopicAssignments: JavaRDD[(java.lang.Long, Array[Int], Array[Int])] = {
topicAssignments.asInstanceOf[RDD[(java.lang.Long, Array[Int], Array[Int])]].toJavaRDD()
}
// TODO
// override def logLikelihood(documents: RDD[(Long, Vector)]): Double = ???
/**
* Log likelihood of the observed tokens in the training set,
* given the current parameter estimates:
* log P(docs | topics, topic distributions for docs, alpha, eta)
*
* Note:
* - This excludes the prior; for that, use [[logPrior]].
* - Even with [[logPrior]], this is NOT the same as the data log likelihood given the
* hyperparameters.
*/
@Since("1.3.0")
lazy val logLikelihood: Double = {
// TODO: generalize this for asymmetric (non-scalar) alpha
val alpha = this.docConcentration(0) // To avoid closure capture of enclosing object
val eta = this.topicConcentration
assert(eta > 1.0)
assert(alpha > 1.0)
val N_k = globalTopicTotals
val smoothed_N_k: TopicCounts = N_k + (vocabSize * (eta - 1.0))
// Edges: Compute token log probability from phi_{wk}, theta_{kj}.
val sendMsg: EdgeContext[TopicCounts, TokenCount, Double] => Unit = (edgeContext) => {
val N_wj = edgeContext.attr
val smoothed_N_wk: TopicCounts = edgeContext.dstAttr + (eta - 1.0)
val smoothed_N_kj: TopicCounts = edgeContext.srcAttr + (alpha - 1.0)
val phi_wk: TopicCounts = smoothed_N_wk :/ smoothed_N_k
val theta_kj: TopicCounts = normalize(smoothed_N_kj, 1.0)
val tokenLogLikelihood = N_wj * math.log(phi_wk.dot(theta_kj))
edgeContext.sendToDst(tokenLogLikelihood)
}
graph.aggregateMessages[Double](sendMsg, _ + _)
.map(_._2).fold(0.0)(_ + _)
}
/**
* Log probability of the current parameter estimate:
* log P(topics, topic distributions for docs | alpha, eta)
*/
@Since("1.3.0")
lazy val logPrior: Double = {
// TODO: generalize this for asymmetric (non-scalar) alpha
val alpha = this.docConcentration(0) // To avoid closure capture of enclosing object
val eta = this.topicConcentration
// Term vertices: Compute phi_{wk}. Use to compute prior log probability.
// Doc vertex: Compute theta_{kj}. Use to compute prior log probability.
val N_k = globalTopicTotals
val smoothed_N_k: TopicCounts = N_k + (vocabSize * (eta - 1.0))
val seqOp: (Double, (VertexId, TopicCounts)) => Double = {
case (sumPrior: Double, vertex: (VertexId, TopicCounts)) =>
if (isTermVertex(vertex)) {
val N_wk = vertex._2
val smoothed_N_wk: TopicCounts = N_wk + (eta - 1.0)
val phi_wk: TopicCounts = smoothed_N_wk :/ smoothed_N_k
(eta - 1.0) * sum(phi_wk.map(math.log))
} else {
val N_kj = vertex._2
val smoothed_N_kj: TopicCounts = N_kj + (alpha - 1.0)
val theta_kj: TopicCounts = normalize(smoothed_N_kj, 1.0)
(alpha - 1.0) * sum(theta_kj.map(math.log))
}
}
graph.vertices.aggregate(0.0)(seqOp, _ + _)
}
/**
* For each document in the training set, return the distribution over topics for that document
* ("theta_doc").
*
* @return RDD of (document ID, topic distribution) pairs
*/
@Since("1.3.0")
def topicDistributions: RDD[(Long, Vector)] = {
graph.vertices.filter(LDA.isDocumentVertex).map { case (docID, topicCounts) =>
(docID.toLong, Vectors.fromBreeze(normalize(topicCounts, 1.0)))
}
}
/**
* Java-friendly version of [[topicDistributions]]
*/
@Since("1.4.1")
def javaTopicDistributions: JavaPairRDD[java.lang.Long, Vector] = {
JavaPairRDD.fromRDD(topicDistributions.asInstanceOf[RDD[(java.lang.Long, Vector)]])
}
/**
* For each document, return the top k weighted topics for that document and their weights.
* @return RDD of (doc ID, topic indices, topic weights)
*/
@Since("1.5.0")
def topTopicsPerDocument(k: Int): RDD[(Long, Array[Int], Array[Double])] = {
graph.vertices.filter(LDA.isDocumentVertex).map { case (docID, topicCounts) =>
// TODO: Remove work-around for the breeze bug.
// https://github.com/scalanlp/breeze/issues/561
val topIndices = if (k == topicCounts.length) {
Seq.range(0, k)
} else {
argtopk(topicCounts, k)
}
val sumCounts = sum(topicCounts)
val weights = if (sumCounts != 0) {
topicCounts(topIndices) / sumCounts
} else {
topicCounts(topIndices)
}
(docID.toLong, topIndices.toArray, weights.toArray)
}
}
/**
* Java-friendly version of [[topTopicsPerDocument]]
*/
@Since("1.5.0")
def javaTopTopicsPerDocument(k: Int): JavaRDD[(java.lang.Long, Array[Int], Array[Double])] = {
val topics = topTopicsPerDocument(k)
topics.asInstanceOf[RDD[(java.lang.Long, Array[Int], Array[Double])]].toJavaRDD()
}
// TODO:
// override def topicDistributions(documents: RDD[(Long, Vector)]): RDD[(Long, Vector)] = ???
override protected def formatVersion = "1.0"
@Since("1.5.0")
override def save(sc: SparkContext, path: String): Unit = {
// Note: This intentionally does not save checkpointFiles.
DistributedLDAModel.SaveLoadV1_0.save(
sc, path, graph, globalTopicTotals, k, vocabSize, docConcentration, topicConcentration,
iterationTimes, gammaShape)
}
}
/**
* Distributed model fitted by [[LDA]].
* This type of model is currently only produced by Expectation-Maximization (EM).
*
* This model stores the inferred topics, the full training dataset, and the topic distribution
* for each training document.
*/
@Since("1.5.0")
object DistributedLDAModel extends Loader[DistributedLDAModel] {
/**
* The [[DistributedLDAModel]] constructor's default arguments assume gammaShape = 100
* to ensure equivalence in LDAModel.toLocal conversion.
*/
private[clustering] val defaultGammaShape: Double = 100
private object SaveLoadV1_0 {
val thisFormatVersion = "1.0"
val thisClassName = "org.apache.spark.mllib.clustering.DistributedLDAModel"
// Store globalTopicTotals as a Vector.
case class Data(globalTopicTotals: Vector)
// Store each term and document vertex with an id and the topicWeights.
case class VertexData(id: Long, topicWeights: Vector)
// Store each edge with the source id, destination id and tokenCounts.
case class EdgeData(srcId: Long, dstId: Long, tokenCounts: Double)
def save(
sc: SparkContext,
path: String,
graph: Graph[LDA.TopicCounts, LDA.TokenCount],
globalTopicTotals: LDA.TopicCounts,
k: Int,
vocabSize: Int,
docConcentration: Vector,
topicConcentration: Double,
iterationTimes: Array[Double],
gammaShape: Double): Unit = {
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val metadata = compact(render
(("class" -> thisClassName) ~ ("version" -> thisFormatVersion) ~
("k" -> k) ~ ("vocabSize" -> vocabSize) ~
("docConcentration" -> docConcentration.toArray.toSeq) ~
("topicConcentration" -> topicConcentration) ~
("iterationTimes" -> iterationTimes.toSeq) ~
("gammaShape" -> gammaShape)))
sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path))
val newPath = new Path(Loader.dataPath(path), "globalTopicTotals").toUri.toString
spark.createDataFrame(Seq(Data(Vectors.fromBreeze(globalTopicTotals)))).write.parquet(newPath)
val verticesPath = new Path(Loader.dataPath(path), "topicCounts").toUri.toString
spark.createDataFrame(graph.vertices.map { case (ind, vertex) =>
VertexData(ind, Vectors.fromBreeze(vertex))
}).write.parquet(verticesPath)
val edgesPath = new Path(Loader.dataPath(path), "tokenCounts").toUri.toString
spark.createDataFrame(graph.edges.map { case Edge(srcId, dstId, prop) =>
EdgeData(srcId, dstId, prop)
}).write.parquet(edgesPath)
}
def load(
sc: SparkContext,
path: String,
vocabSize: Int,
docConcentration: Vector,
topicConcentration: Double,
iterationTimes: Array[Double],
gammaShape: Double): DistributedLDAModel = {
val dataPath = new Path(Loader.dataPath(path), "globalTopicTotals").toUri.toString
val vertexDataPath = new Path(Loader.dataPath(path), "topicCounts").toUri.toString
val edgeDataPath = new Path(Loader.dataPath(path), "tokenCounts").toUri.toString
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val dataFrame = spark.read.parquet(dataPath)
val vertexDataFrame = spark.read.parquet(vertexDataPath)
val edgeDataFrame = spark.read.parquet(edgeDataPath)
Loader.checkSchema[Data](dataFrame.schema)
Loader.checkSchema[VertexData](vertexDataFrame.schema)
Loader.checkSchema[EdgeData](edgeDataFrame.schema)
val globalTopicTotals: LDA.TopicCounts =
dataFrame.first().getAs[Vector](0).asBreeze.toDenseVector
val vertices: RDD[(VertexId, LDA.TopicCounts)] = vertexDataFrame.rdd.map {
case Row(ind: Long, vec: Vector) => (ind, vec.asBreeze.toDenseVector)
}
val edges: RDD[Edge[LDA.TokenCount]] = edgeDataFrame.rdd.map {
case Row(srcId: Long, dstId: Long, prop: Double) => Edge(srcId, dstId, prop)
}
val graph: Graph[LDA.TopicCounts, LDA.TokenCount] = Graph(vertices, edges)
new DistributedLDAModel(graph, globalTopicTotals, globalTopicTotals.length, vocabSize,
docConcentration, topicConcentration, iterationTimes, gammaShape)
}
}
@Since("1.5.0")
override def load(sc: SparkContext, path: String): DistributedLDAModel = {
val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path)
implicit val formats = DefaultFormats
val expectedK = (metadata \\ "k").extract[Int]
val vocabSize = (metadata \\ "vocabSize").extract[Int]
val docConcentration =
Vectors.dense((metadata \\ "docConcentration").extract[Seq[Double]].toArray)
val topicConcentration = (metadata \\ "topicConcentration").extract[Double]
val iterationTimes = (metadata \\ "iterationTimes").extract[Seq[Double]]
val gammaShape = (metadata \\ "gammaShape").extract[Double]
val classNameV1_0 = SaveLoadV1_0.thisClassName
val model = (loadedClassName, loadedVersion) match {
case (className, "1.0") if className == classNameV1_0 =>
DistributedLDAModel.SaveLoadV1_0.load(sc, path, vocabSize, docConcentration,
topicConcentration, iterationTimes.toArray, gammaShape)
case _ => throw new Exception(
s"DistributedLDAModel.load did not recognize model with (className, format version):" +
s"($loadedClassName, $loadedVersion). Supported: ($classNameV1_0, 1.0)")
}
require(model.vocabSize == vocabSize,
s"DistributedLDAModel requires $vocabSize vocabSize, got ${model.vocabSize} vocabSize")
require(model.docConcentration == docConcentration,
s"DistributedLDAModel requires $docConcentration docConcentration, " +
s"got ${model.docConcentration} docConcentration")
require(model.topicConcentration == topicConcentration,
s"DistributedLDAModel requires $topicConcentration docConcentration, " +
s"got ${model.topicConcentration} docConcentration")
require(expectedK == model.k,
s"DistributedLDAModel requires $expectedK topics, got ${model.k} topics")
model
}
}
| Panos-Bletsos/spark-cost-model-optimizer | mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala | Scala | apache-2.0 | 39,428 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.{And, CaseWhen, Expression, If, IsNotNull, Literal, Or, Rand}
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.plans.{Inner, PlanTest}
import org.apache.spark.sql.catalyst.plans.logical.{DeleteFromTable, LocalRelation, LogicalPlan, UpdateTable}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.types.{BooleanType, IntegerType}
class SimplifyConditionalsInPredicateSuite extends PlanTest {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("SimplifyConditionalsInPredicate", FixedPoint(10),
NullPropagation,
ConstantFolding,
BooleanSimplification,
SimplifyConditionals,
SimplifyConditionalsInPredicate) :: Nil
}
private val testRelation =
LocalRelation('i.int, 'b.boolean, 'a.array(IntegerType), 'm.map(IntegerType, IntegerType))
private val anotherTestRelation = LocalRelation('d.int)
test("IF(cond, trueVal, false) => AND(cond, trueVal)") {
val originalCond = If(
UnresolvedAttribute("i") > Literal(10),
UnresolvedAttribute("b"),
FalseLiteral)
val expectedCond = And(
UnresolvedAttribute("i") > Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("IF(cond, trueVal, true) => OR(NOT(cond), trueVal)") {
val originalCond = If(
UnresolvedAttribute("i") > Literal(10),
UnresolvedAttribute("b"),
TrueLiteral)
val expectedCond = Or(
UnresolvedAttribute("i") <= Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("IF(cond, false, falseVal) => AND(NOT(cond), elseVal)") {
val originalCond = If(
UnresolvedAttribute("i") > Literal(10),
FalseLiteral,
UnresolvedAttribute("b"))
val expectedCond = And(
UnresolvedAttribute("i") <= Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("IF(cond, true, falseVal) => OR(cond, elseVal)") {
val originalCond = If(
UnresolvedAttribute("i") > Literal(10),
TrueLiteral,
UnresolvedAttribute("b"))
val expectedCond = Or(
UnresolvedAttribute("i") > Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("CASE WHEN cond THEN trueVal ELSE false END => AND(cond, trueVal)") {
Seq(Some(FalseLiteral), None, Some(Literal(null, BooleanType))).foreach { elseExp =>
val originalCond = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10), UnresolvedAttribute("b"))),
elseExp)
val expectedCond = And(
UnresolvedAttribute("i") > Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
}
test("CASE WHEN cond THEN trueVal ELSE true END => OR(NOT(cond), trueVal)") {
val originalCond = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10), UnresolvedAttribute("b"))),
TrueLiteral)
val expectedCond = Or(
UnresolvedAttribute("i") <= Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("CASE WHEN cond THEN false ELSE elseVal END => AND(NOT(cond), elseVal)") {
val originalCond = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10), FalseLiteral)),
UnresolvedAttribute("b"))
val expectedCond = And(
UnresolvedAttribute("i") <= Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("CASE WHEN cond THEN false END => false") {
val originalCond = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10), FalseLiteral)))
testFilter(originalCond, expectedCond = FalseLiteral)
testJoin(originalCond, expectedCond = FalseLiteral)
testDelete(originalCond, expectedCond = FalseLiteral)
testUpdate(originalCond, expectedCond = FalseLiteral)
testProjection(originalCond, expectedExpr = originalCond)
}
test("CASE WHEN non-deterministic-cond THEN false END") {
val originalCond =
CaseWhen(Seq((UnresolvedAttribute("i") > Rand(0), FalseLiteral)))
val expectedCond = And(UnresolvedAttribute("i") > Rand(0), FalseLiteral)
// nondeterministic expressions are only allowed in Project, Filter, Aggregate or Window,
testFilter(originalCond, expectedCond = FalseLiteral)
testProjection(originalCond, expectedExpr = originalCond)
}
test("CASE WHEN cond THEN true ELSE elseVal END => OR(cond, elseVal)") {
val originalCond = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10), TrueLiteral)),
UnresolvedAttribute("b"))
val expectedCond = Or(
UnresolvedAttribute("i") > Literal(10),
UnresolvedAttribute("b"))
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("CASE WHEN cond THEN true END => cond") {
val originalCond = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10), TrueLiteral)))
val expectedCond = UnresolvedAttribute("i") > Literal(10)
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("Simplify conditional in conditions of CaseWhen inside another CaseWhen") {
val nestedCaseWhen = CaseWhen(
Seq((UnresolvedAttribute("i") > Literal(10)) -> UnresolvedAttribute("b")),
FalseLiteral)
val originalCond = CaseWhen(Seq(IsNotNull(nestedCaseWhen) -> FalseLiteral))
val expectedCond = FalseLiteral
testFilter(originalCond, expectedCond = expectedCond)
testJoin(originalCond, expectedCond = expectedCond)
testDelete(originalCond, expectedCond = expectedCond)
testUpdate(originalCond, expectedCond = expectedCond)
testProjection(originalCond, expectedExpr = originalCond)
}
test("Not expected type - SimplifyConditionalsInPredicate") {
val e = intercept[AnalysisException] {
testFilter(originalCond = Literal(null, IntegerType), expectedCond = FalseLiteral)
}.getMessage
assert(e.contains("'CAST(NULL AS INT)' of type int is not a boolean"))
}
private def testFilter(originalCond: Expression, expectedCond: Expression): Unit = {
test((rel, exp) => rel.where(exp), originalCond, expectedCond)
}
private def testJoin(originalCond: Expression, expectedCond: Expression): Unit = {
test((rel, exp) => rel.join(anotherTestRelation, Inner, Some(exp)), originalCond, expectedCond)
}
private def testProjection(originalExpr: Expression, expectedExpr: Expression): Unit = {
test((rel, exp) => rel.select(exp), originalExpr, expectedExpr)
}
private def testDelete(originalCond: Expression, expectedCond: Expression): Unit = {
test((rel, expr) => DeleteFromTable(rel, Some(expr)), originalCond, expectedCond)
}
private def testUpdate(originalCond: Expression, expectedCond: Expression): Unit = {
test((rel, expr) => UpdateTable(rel, Seq.empty, Some(expr)), originalCond, expectedCond)
}
private def test(
func: (LogicalPlan, Expression) => LogicalPlan,
originalExpr: Expression,
expectedExpr: Expression): Unit = {
val originalPlan = func(testRelation, originalExpr).analyze
val optimizedPlan = Optimize.execute(originalPlan)
val expectedPlan = func(testRelation, expectedExpr).analyze
comparePlans(optimizedPlan, expectedPlan)
}
}
| maropu/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalsInPredicateSuite.scala | Scala | apache-2.0 | 10,608 |
/*
active-learning-scala: Active Learning library for Scala
Copyright (c) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package al.strategies
import ml.Pattern
import ml.classifiers.Learner
import ml.models.Model
case class Entropy(learner: Learner, pool: Seq[Pattern], debug: Boolean = false)
extends StrategyWithLearner with EntropyMeasure {
override val toString = "Entropy"
val abr = "Ent"
val id = 4
protected def next(current_model: Model, unlabeled: Seq[Pattern], labeled: Seq[Pattern]) = {
val selected = unlabeled maxBy {
pa => entropy(current_model.distribution(pa))
}
selected
}
} | active-learning/active-learning-scala | src/main/scala/al/strategies/Entropy.scala | Scala | gpl-2.0 | 1,257 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.table
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedAggFunctions.{CountDistinct, DataViewTestAgg, WeightedAvg}
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.table.planner.runtime.utils.{JavaUserDefinedAggFunctions, StreamingWithStateTestBase, TestingRetractSink, TestingUpsertTableSink}
import org.apache.flink.table.planner.utils.CountMinMax
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Test}
import scala.collection.mutable
/**
* Tests of groupby (without window) aggregations
*/
@RunWith(classOf[Parameterized])
class AggregateITCase(mode: StateBackendMode) extends StreamingWithStateTestBase(mode) {
@Before
override def before(): Unit = {
super.before()
tEnv.getConfig.setIdleStateRetentionTime(Time.hours(1), Time.hours(2))
}
@Test
def testDistinctUDAGG(): Unit = {
val testAgg = new DataViewTestAgg
val t = failingDataSource(tupleData5).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e)
.select('e, testAgg.distinct('d, 'e))
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("1,10", "2,21", "3,12")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctUDAGGMixedWithNonDistinctUsage(): Unit = {
val testAgg = new WeightedAvg
val t = failingDataSource(tupleData5).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e)
.select('e, testAgg.distinct('a, 'a), testAgg('a, 'a))
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("1,3,3", "2,3,4", "3,4,4")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctAggregate(): Unit = {
val data = new mutable.MutableList[(Int, Int, String)]
data.+=((1, 1, "A"))
data.+=((2, 2, "B"))
data.+=((2, 2, "B"))
data.+=((4, 3, "C"))
data.+=((5, 3, "C"))
data.+=((4, 3, "C"))
data.+=((7, 3, "B"))
data.+=((1, 4, "A"))
data.+=((9, 4, "D"))
data.+=((4, 1, "A"))
data.+=((3, 2, "B"))
val testAgg = new WeightedAvg
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
.groupBy('c)
.select('c, 'a.count.distinct, 'a.sum.distinct,
testAgg.distinct('a, 'b), testAgg.distinct('b, 'a), testAgg('a, 'b))
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("A,2,5,1,1,1", "B,3,12,4,2,3", "C,2,9,4,3,4", "D,1,9,9,4,9")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctAggregateMixedWithNonDistinct(): Unit = {
val t = failingDataSource(tupleData5).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e)
.select('e, 'a.count.distinct, 'b.count)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("1,4,5", "2,4,7", "3,2,3")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
// @Test
// def testSimpleLogical(): Unit = {
// val t = failingDataSource(smallTupleData3).toTable(tEnv, 'a, 'b, 'c)
// .select('c.firstValue, 'c.lastValue, 'c.LISTAGG("#"))
//
// val sink = new TestingRetractSink()
// t.toRetractStream[Row].addSink(sink)
// env.execute()
//
// val expected = mutable.MutableList("Hi,Hello world,Hi#Hello#Hello world")
// assertEquals(expected.sorted, sink.getRetractResults.sorted)
// }
@Test
def testDistinct(): Unit = {
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.select('b, nullOf(Types.LONG)).distinct()
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("1,null", "2,null", "3,null", "4,null", "5,null", "6,null")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctAfterAggregate(): Unit = {
val t = failingDataSource(tupleData5).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e).select('e, 'a.count).distinct()
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("1,5", "2,7", "3,3")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testNonKeyedGroupAggregate(): Unit = {
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.select('a.sum, 'b.sum)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("231,91")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testGroupAggregate(): Unit = {
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.sum)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1", "2,5", "3,15", "4,34", "5,65", "6,111")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDoubleGroupAggregation(): Unit = {
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('a.count as 'cnt, 'b)
.groupBy('cnt)
.select('cnt, 'b.count as 'freq, 'b.min as 'min, 'b.max as 'max)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,1,1", "2,1,2,2", "3,1,3,3", "4,1,4,4", "5,1,5,5", "6,1,6,6")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDoubleGroupMaxMinAggregation(): Unit = {
val t = failingDataSource(tupleData5).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('a, 'e)
.select('a, 'e, 'b.max as 'f, 'b.min as 'g)
.groupBy('a)
.select('a, 'f.max, 'g.min)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,1", "2,3,2", "3,6,4", "4,10,7", "5,15,11")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testGroupAggregateWithExpression(): Unit = {
val t = failingDataSource(tupleData5).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e, 'b % 3)
.select('c.min, 'e, 'a.avg, 'd.count)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList(
s"0,1,1,1", s"7,1,4,2", s"2,1,3,2",
s"3,2,3,3", s"1,2,3,3", s"14,2,5,1",
s"12,3,5,1", s"5,3,4,2")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testCollect(): Unit = {
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.collect)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List(
"1,{1=1}",
"2,{2=1, 3=1}",
"3,{4=1, 5=1, 6=1}",
"4,{8=1, 9=1, 10=1, 7=1}",
"5,{11=1, 12=1, 13=1, 14=1, 15=1}",
"6,{16=1, 17=1, 18=1, 19=1, 20=1, 21=1}")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testGroupAggregateWithStateBackend(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
val distinct = new CountDistinct
val testAgg = new DataViewTestAgg
val t = env.fromCollection(data).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, distinct('c), testAgg('c, 'b))
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,2", "2,1,5", "3,1,10", "4,4,20", "5,2,12")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
// verify agg close is called
assert(JavaUserDefinedAggFunctions.isCloseCalled)
}
@Test
def testRemoveDuplicateRecordsWithUpsertSink(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
val t = env.fromCollection(data).toTable(tEnv, 'a, 'b, 'c)
.groupBy('c)
.select('c, 'b.max)
val tableSink = new TestingUpsertTableSink(Array(0)).configure(
Array[String]("c", "bMax"), Array[TypeInformation[_]](Types.STRING, Types.LONG))
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal("testSink", tableSink)
execInsertTableAndWaitResult(t, "testSink")
val expected = List("A,1", "B,2", "C,3")
assertEquals(expected.sorted, tableSink.getUpsertResults.sorted)
}
@Test
def testNonGroupedAggregate(): Unit = {
val testAgg = new CountMinMax
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.aggregate(testAgg('a))
.select('f0, 'f1, 'f2)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("21,1,21")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testAggregate(): Unit = {
val testAgg = new CountMinMax
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.aggregate(testAgg('a))
.select('b, 'f0, 'f1, 'f2)
val sink = new TestingRetractSink()
t.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,1,1", "2,2,2,3", "3,3,4,6", "4,4,7,10", "5,5,11,15", "6,6,16,21")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testGroupAggregateWithDataView(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
val distinct = new CountDistinct
val testAgg = new DataViewTestAgg
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, distinct('c), testAgg('c, 'b))
val sink = new TestingRetractSink
t.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("1,1,2", "2,1,5", "3,1,10", "4,4,20", "5,2,12")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
// verify agg close is called
assert(JavaUserDefinedAggFunctions.isCloseCalled)
}
@Test
def testMaxRetractOptimize(): Unit = {
val t = failingDataSource(tupleData3).toTable(tEnv, 'a, 'b, 'c)
val results = t
.groupBy('b, 'c)
.select('b, 'c, 'a.max as 'a)
.groupBy('b)
.select('b, 'a.max)
.toRetractStream[Row]
val sink = new TestingRetractSink
results.addSink(sink).setParallelism(1)
env.execute()
val expected = mutable.MutableList("1,1", "2,3", "3,6", "4,10", "5,15", "6,21")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/table/AggregateITCase.scala | Scala | apache-2.0 | 12,874 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.stats
import scala.annotation.tailrec
import io.gatling.charts.stats.buffers._
import io.gatling.commons.shared.unstable.model.stats.Group
import io.gatling.commons.stats.OK
import io.gatling.core.config.GatlingConfiguration
private class ResultsHolder(val minTimestamp: Long, val maxTimestamp: Long, val buckets: Array[Int])(implicit configuration: GatlingConfiguration)
extends GeneralStatsBuffers(math.ceil((maxTimestamp - minTimestamp) / 1000.0).toInt)
with Buckets
with RunTimes
with NamesBuffers
with RequestsPerSecBuffers
with ResponseTimeRangeBuffers
with SessionDeltaPerSecBuffers
with ResponsesPerSecBuffers
with ErrorsBuffers
with RequestPercentilesBuffers
with GroupPercentilesBuffers {
def addUserRecord(record: UserRecord): Unit = {
addSessionBuffers(record)
addScenarioName(record)
}
def addGroupRecord(record: GroupRecord): Unit = {
addGroupName(record)
updateGroupGeneralStatsBuffers(record)
updateGroupPercentilesBuffers(record)
updateGroupResponseTimeRangeBuffer(record)
}
@tailrec
private def addAllParentGroups(group: Group): Unit = {
addGroupName(GroupRecord(group, 0, 0, OK, 0, 0))
group.hierarchy.reverse match {
case _ :: tail if tail.nonEmpty => addAllParentGroups(Group(tail.reverse))
case _ =>
}
}
def addRequestRecord(record: RequestRecord): Unit =
if (!record.incoming) {
record.group.foreach(addAllParentGroups)
updateRequestsPerSecBuffers(record)
updateResponsesPerSecBuffers(record)
addRequestName(record)
updateErrorBuffers(record)
updateRequestGeneralStatsBuffers(record)
updateResponseTimeRangeBuffer(record)
updateRequestPercentilesBuffers(record)
}
def addErrorRecord(record: ErrorRecord): Unit = {
updateGlobalError(record.message)
}
}
| gatling/gatling | gatling-charts/src/main/scala/io/gatling/charts/stats/ResultsHolder.scala | Scala | apache-2.0 | 2,525 |
/**
* This file is part of agora-board.
* Copyright (C) 2016 Agora Voting SL <[email protected]>
* agora-board is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License.
* agora-board is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License
* along with agora-board. If not, see <http://www.gnu.org/licenses/>.
**/
package models
import play.api.libs.json.JsValue
import scala.collection.Seq
import scala.concurrent.{Future, Promise}
/**
* This trait defines the Public Bulletin Board `Backend` operations interface.
*/
trait BoardBackend {
/**
* `Post` operation, add a post to the board
*/
def Post(request: PostRequest): Future[BoardAttributes]
/**
* `Get` operation, query the board to get a set of posts
*/
def Get(request: GetRequest): Future[Seq[Post]]
/**
* `Subscribe` operation
*/
def Subscribe(request: SubscribeRequest): Future[SuccessfulSubscribe]
/**
* `Accumulate` operation
*/
def Accumulate(request: AccumulateRequest): Future[JsValue]
/**
* `Unsubscribe` operation
*/
def Unsubscribe(request: UnsubscribeRequest): Future[Unit]
} | agoravoting/agora-board | app/models/Backend.scala | Scala | agpl-3.0 | 1,513 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import com.esotericsoftware.kryo.{Kryo, Serializer}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.SparkConf
import org.apache.spark.internal.config.Kryo._
import org.apache.spark.serializer.KryoRegistrator
import org.apache.spark.sql.test.SharedSparkSession
/**
* Test suite to test Kryo custom registrators.
*/
class DatasetSerializerRegistratorSuite extends QueryTest with SharedSparkSession {
import testImplicits._
override protected def sparkConf: SparkConf = {
// Make sure we use the KryoRegistrator
super.sparkConf.set(KRYO_USER_REGISTRATORS, Seq(TestRegistrator().getClass.getCanonicalName))
}
test("Kryo registrator") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.collect().toSet == Set(KryoData(0), KryoData(0)))
}
}
/** Used to test user provided registrator. */
class TestRegistrator extends KryoRegistrator {
override def registerClasses(kryo: Kryo): Unit =
kryo.register(classOf[KryoData], new ZeroKryoDataSerializer())
}
object TestRegistrator {
def apply(): TestRegistrator = new TestRegistrator()
}
/**
* A `Serializer` that takes a [[KryoData]] and serializes it as KryoData(0).
*/
class ZeroKryoDataSerializer extends Serializer[KryoData] {
override def write(kryo: Kryo, output: Output, t: KryoData): Unit = {
output.writeInt(0)
}
override def read(kryo: Kryo, input: Input, aClass: Class[KryoData]): KryoData = {
KryoData(input.readInt())
}
}
| mahak/spark | sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala | Scala | apache-2.0 | 2,358 |
package quisp.highcharts
import quisp._
import quisp.enums.HcSeriesType
import spray.json.{JsValue, JsonWriter}
import java.awt.Color
import javax.jws.WebMethod
/**
* Generic chart configuration
* @author rodneykinney
*/
case class Chart(
chart: ChartOptions = ChartOptions(),
colors: Seq[Color] = null,
exporting: ExportOptions = ExportOptions(),
legend: Legend = Legend(),
series: IndexedSeq[Series] = Vector(),
subtitle: ChartTitle = null,
plotOptions: PlotSpecificSettings = null,
title: ChartTitle = ChartTitle(),
labels: FloatingLabels = null,
xAxis: IndexedSeq[Axis] = Vector(Axis()),
yAxis: IndexedSeq[Axis] = Vector(Axis()),
additionalFields: Map[String, JsValue] = Map())
extends ExtensibleJsObject {
def html = {
import spray.json._
import HighchartsJson._
val json = scala.xml.Unparsed(this.toJson.toString)
val containerId = json.hashCode.toHexString
<div id={s"container$containerId"}></div>
<script type="text/javascript">
$ (function()
{{$(
{s"'#container$containerId'"}
).highcharts(
{json}
);}}
);
</script>
}
}
class ConfigurableGenericChart(var config: Chart,
val display: ChartDisplay[ConfigurableChart[Chart], Int])
extends ChartAPI[ConfigurableGenericChart]
trait ChartAPI[T <: UpdatableChart[T, Chart]]
extends UpdatableChart[T, Chart] with ExtensibleJsObjectAPI {
@WebMethod(action = "Options for the i-th X Axis (if multiple axes present")
def getXAxis(idx: Int) = {
val axis: Axis = config.xAxis(idx)
axis.api { a =>
update(config.copy(xAxis = config.xAxis.updated(idx, a)))
}
}
@WebMethod(action = "Options for the X Axis")
def xAxis: AxisAPI[T] = getXAxis(0)
@WebMethod(action = "Options for the i-th Y Axis (if multiple axes present")
def getYAxis(idx: Int) = {
val axis: Axis = config.yAxis(idx)
axis.api { a =>
update(config.copy(yAxis = config.yAxis.updated(idx, a)))
}
}
@WebMethod(action = "Options for the Y Axis")
def yAxis = getYAxis(0)
@WebMethod
def addXAxis(axis: Axis = Axis()) = update(config.copy(xAxis = config.xAxis :+ axis))
@WebMethod
def addYAxis(axis: Axis = Axis()) = update(config.copy(yAxis = config.yAxis :+ axis))
@WebMethod(action = "Settings that apply to all data series on this chart")
def defaultSettings = {
val oldPlotOptions = Option(config.plotOptions).getOrElse(PlotSpecificSettings())
val series = Option(oldPlotOptions.series).getOrElse(SeriesSettings())
series.api(s => update(config.copy(plotOptions = oldPlotOptions.copy(series = s))))
}
@WebMethod(action = "Size, borders, margins, etc.")
def layout = config.chart.api(c => update(config.copy(chart = c)))
@WebMethod(action = "Legend layout")
def legend = config.legend.api(c => update(config.copy(legend = c)))
@WebMethod(action = "Export to png, pdf, etc.")
def exporting = config.exporting.api(e => update(config.copy(exporting = e)))
@WebMethod(action = "Data series attributes")
def series(idx: Int) = config.series(idx).api(s => update(config.copy(series = config.series.updated(idx, s))))
@WebMethod(action = "Add new data series")
def addSeries(xyData: SeriesData) = update {
val oldSeries = config.series
val seriesType = if (oldSeries.size > 0) oldSeries(0).`type` else HcSeriesType.line
config.copy(series =
oldSeries :+ Series(data = xyData.points, `type` = seriesType))
}
@WebMethod(action = "Title options")
def title = config.title.api(t => update(config.copy(title = t)))
@WebMethod(action = "Default colors for data series")
def colors(x: Seq[Color]) = update(config.copy(colors = x))
@WebMethod(action = "Add Text Label at (x,y) with CSS style")
def addFloatingLabel(x: Int, y: Int, text: String, style: Map[String, String] = Map()) = {
val oldLabels = Option(config.labels).getOrElse(FloatingLabels(Seq()))
var fullStyle = style
fullStyle += ("left" -> s"${x}px")
fullStyle += ("top" -> s"${y}px")
update(config.copy(labels = FloatingLabels(oldLabels.items :+ FloatingLabel(text, fullStyle))))
}
@WebMethod(action = "Add additional values to the JSON object")
def additionalField[V: JsonWriter](name: String, value: V)
= update(config.copy(additionalFields = config.additionalFields + (name -> implicitly[JsonWriter[V]].write(value))))
}
case class ExportOptions(enabled: Boolean = true,
additionalFields: Map[String, JsValue] = Map()) extends ExtensibleJsObject {
def api[T](update: ExportOptions => T) = new ExportOptionsAPI(this, update)
}
class ExportOptionsAPI[T](e: ExportOptions, update: ExportOptions => T) extends ExtensibleJsObjectAPI {
@WebMethod(action = "Enable export control widget")
def enabled(x: Boolean) = update(e.copy(enabled = x))
@WebMethod(action = "Add additional values to the JSON object")
def additionalField[V: JsonWriter](name: String, value: V)
= update(e.copy(additionalFields = e.additionalFields + (name -> implicitly[JsonWriter[V]].write(value))))
}
case class FloatingLabels(items: Seq[FloatingLabel])
case class FloatingLabel(html: String, style: Map[String, String])
| rodneykinney/quisp | src/main/scala/quisp/highcharts/Chart.scala | Scala | apache-2.0 | 5,230 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class CP87Input(value: Option[Int]) extends CtBoxIdentifier(name = "First year allowance claimed") with CtOptionalInteger with Input
object CP87Input {
def apply(int: Int): CP87Input = CP87Input(Some(int))
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP87Input.scala | Scala | apache-2.0 | 931 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.tools.reducer
import apparat.tools._
import apparat.utils._
import apparat.swf._
import java.awt.image.{BufferedImage => JBufferedImage}
import javax.imageio.{IIOImage => JIIOImage}
import javax.imageio.{ImageIO => JImageIO}
import javax.imageio.{ImageWriteParam => JImageWriteParam}
import java.util.zip.{Inflater => JInflater}
import java.util.zip.{Deflater => JDeflater}
import apparat.actors.Futures._
import apparat.abc.Abc
import apparat.abc.analysis.AbcConstantPoolBuilder
import java.io.{File => JFile, FileOutputStream => JFileOutputStream, ByteArrayOutputStream => JByteArrayOutputStream, ByteArrayInputStream => JByteArrayInputStream}
import apparat.bytecode.optimization.BlockMerge
object Reducer {
def main(args: Array[String]): Unit = ApparatApplication(new ReducerTool, args)
class ReducerTool extends ApparatTool {
var deblock = 0.0f
var quality = 0.99f
var input: JFile = _
var output: JFile = _
var mergeABC: Boolean = false
var sortCPool: Boolean = false
var lzma: Boolean = false
var matryoshkaType: Int = MatryoshkaType.QUIET
var customMatryoshka: Option[JFile] = None
var mergeCF: Boolean = false
override def name: String = "Reducer"
override def help: String = """ -i [file] Input file
-o [file] Output file (optional)
-d [float] Strength of deblocking filter (optional)
-q [float] Quality from 0.0 to 1.0 (optional)
-m [true|false] Merge ABC files
-s [true|false] Sort constant pool (only if -m is specified)
-l [true|false] Use LZMA compression
-t [quiet|preloader|custom] Matryoshka type (default: quiet)
-f [file] Custom matryoshka SWF wrapper (required if -t custom)
-b [true|false] Merge control flow if possible (experimental)"""
override def configure(config: ApparatConfiguration): Unit = configure(ReducerConfigurationFactory fromConfiguration config)
def configure(config: ReducerConfiguration): Unit = {
input = config.input
output = config.output
quality = config.quality
deblock = config.deblock
mergeABC = config.mergeABC
sortCPool = config.sortCPool
lzma = config.lzma
matryoshkaType = config.matryoshkaType
customMatryoshka = config.matryoshka
mergeCF = config.mergeCF
}
override def run() = {
SwfTags.tagFactory = (kind: Int) => kind match {
case SwfTags.DefineBitsLossless2 => Some(new DefineBitsLossless2)
case SwfTags.FileAttributes => Some(new FileAttributes)
case SwfTags.DoABC if mergeABC || mergeCF => Some(new DoABC)
case SwfTags.DoABC1 if mergeABC || mergeCF => Some(new DoABC)
case SwfTags.DefineBinaryData => Some(new DefineBinaryData)
case SwfTags.FileAttributes => Some(new FileAttributes)
case SwfTags.ScriptLimits => Some(new ScriptLimits)
case SwfTags.SetBackgroundColor => Some(new SetBackgroundColor)
case _ => None
}
val source = input
val target = output
val l0 = source length
val cont = TagContainer fromFile source
cont.tags = cont.tags filterNot (tag => tag.kind == SwfTags.Metadata || tag.kind == SwfTags.ProductInfo)
cont mapTags reduce
if(mergeCF) {
log.info("Merging identical control flow ...")
cont foreachTag {
case doABC: DoABC => {
Abc.using(doABC) {
abc => {
for {
method <- abc.methods
body <- method.body
bytecode <- body.bytecode
} {
body.bytecode = Some(BlockMerge(bytecode)._2)
}
}
}
}
}
}
if(mergeABC) {
log.info("Merging ABC files ...")
var buffer: Option[Abc] = None
var result = List.empty[SwfTag]
var i = 0
//
// Note: We cannot use foreachTag or mapTags since the
// order is not gauranteed.
//
for(tag <- cont.tags) {
tag match {
case doABC: DoABC => {
val abc = Abc fromDoABC doABC
abc.loadBytecode()
buffer = buffer match {
case Some(b) => Some(b + abc)
case None => Some(abc)
}
}
case o => {
buffer match {
case Some(b) => {
val doABC = new DoABC()
doABC.flags = 1
doABC.name = "apparat.googlecode.com"
b.bytecodeAvailable = true
if(sortCPool) {
log.info("Rebuilding constant pool ...")
b.cpool = AbcConstantPoolBuilder using b
}
b.saveBytecode()
//Removed IdenticalMethodSort due to Issue 34.
//IdenticalMethodSort(b)
b write doABC
result = o :: doABC :: result
}
case None => result = o :: result
}
buffer = None
}
}
}
cont.tags = result.reverse
}
if(lzma) {
log.info("Creating LZMA compressed file.")
cont.strategy match {
case Some(swfStrategy: SwfStrategy) => matryoshkaType match {
case MatryoshkaType.NONE => IO.using(new JFileOutputStream(target)) {
_ write (swfStrategy.swf getOrElse error("No SWF loaded.")).toByteArray
}
case _ => {
//
// Create a Matryoshka
//
val matryoshka = new MatryoshkaInjector(swfStrategy.swf getOrElse error("No SWF loaded."),
matryoshkaType, customMatryoshka)
val outputStream = new JFileOutputStream(target)
outputStream write matryoshka.toByteArray
outputStream.flush()
outputStream.close()
}
}
case other => {
log.warning("LZMA works only with SWF files. You cannot compress a SWC/ABC.")
cont write target
}
}
} else {
cont write target
}
val delta = l0 - (target length)
log.info("Compression ratio: %.2f%%", ((delta).asInstanceOf[Float] / l0.asInstanceOf[Float]) * 100.0f)
log.info("Total bytes: %d", delta)
}
private def reduce: PartialFunction[SwfTag, SwfTag] = {
case dbl2: DefineBitsLossless2 => {
if (5 == dbl2.bitmapFormat && (dbl2.bitmapWidth * dbl2.bitmapHeight) > 1024) {
lossless2jpg(dbl2)
} else {
dbl2
}
}
case fileAttributes: FileAttributes => {
val result = new FileAttributes()
result.actionScript3 = fileAttributes.actionScript3
result.hasMetadata = false
result.useDirectBlit = fileAttributes.useDirectBlit
result.useGPU = fileAttributes.useGPU
result.useNetwork = fileAttributes.useNetwork
result
}
}
private def lossless2jpg(tag: DefineBitsLossless2) = {
val width = tag.bitmapWidth
val height = tag.bitmapHeight
val inflater = new JInflater();
val lossless = new Array[Byte]((width * height) << 2)
val alphaData = new Array[Byte](width * height)
var needsAlpha = false
// decompress zlib data
inflater setInput tag.zlibBitmapData
var offset = -1
while (0 != offset && !inflater.finished) {
offset = inflater inflate lossless
if (0 == offset && inflater.needsInput) {
error("Need more input.")
}
}
// create buffered image
// fill alpha data
val buffer = new JBufferedImage(width, height, JBufferedImage.TYPE_INT_ARGB)
for (y <- 0 until height; x <- 0 until width) {
val index = (x << 2) + (y << 2) * width
val alpha = lossless(index) & 0xff
val red = lossless(index + 1) & 0xff
val green = lossless(index + 2) & 0xff
val blue = lossless(index + 3) & 0xff
if (0xff != alpha) {
needsAlpha = true
}
// useless to go from premultiplied to normal
//
//if(alpha > 0 && alpha < 0xff) {
// val alphaMultiplier = 255.0f / alpha
// red = clamp(red * alphaMultiplier)
// green = clamp(green * alphaMultiplier)
// blue = clamp(blue * alphaMultiplier)
//}
alphaData(x + y * width) = lossless(index)
buffer.setRGB(x, y, (0xff << 0x18) | (red << 0x10) | (green << 0x08) | blue)
}
// compress alpha data
val deflater = new JDeflater(JDeflater.BEST_COMPRESSION)
deflater setInput alphaData
deflater.finish()
val compressBuffer = new Array[Byte](0x400)
var numBytesCompressed = 0
val alphaOutput = new JByteArrayOutputStream()
do {
numBytesCompressed = deflater deflate compressBuffer
alphaOutput write (compressBuffer, 0, numBytesCompressed)
} while (0 != numBytesCompressed)
alphaOutput.flush()
alphaOutput.close()
// create jpg
val writer = JImageIO getImageWritersByFormatName ("jpg") next ()
val imageOutput = new JByteArrayOutputStream()
writer setOutput JImageIO.createImageOutputStream(imageOutput)
val writeParam = writer.getDefaultWriteParam()
writeParam setCompressionMode JImageWriteParam.MODE_EXPLICIT
writeParam setCompressionQuality quality
writer write (null, new JIIOImage(buffer.getData(), null, null), writeParam)
imageOutput.flush()
imageOutput.close()
writer.dispose()
// create tag
val newTag: SwfTag with KnownLengthTag with DefineTag = if (needsAlpha) {
if (0.0f == deblock) {
val dbj3 = new DefineBitsJPEG3()
dbj3.alphaData = alphaOutput.toByteArray()
dbj3.imageData = imageOutput.toByteArray()
dbj3
} else {
val dbj4 = new DefineBitsJPEG4()
dbj4.alphaData = alphaOutput.toByteArray()
dbj4.imageData = imageOutput.toByteArray()
dbj4.deblock = deblock
dbj4
}
} else {
val dbj2 = new DefineBitsJPEG2()
dbj2.imageData = imageOutput.toByteArray()
dbj2
}
if (newTag.length < tag.length) {
log.info("Compressed character %d.", tag.characterID)
newTag.characterID = tag.characterID
newTag
} else {
tag
}
}
private def clamp(value: Float): Int = value match {
case x if x < 0 => 0
case x if x > 255 => 255
case x => x.asInstanceOf[Int]
}
}
}
| joa/apparat | apparat-core/src/main/scala/apparat/tools/reducer/Reducer.scala | Scala | lgpl-2.1 | 10,430 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import java.io.{ File, InputStream, OutputStream }
import java.util.{ UUID, Properties }
import cascading.scheme.Scheme
import cascading.scheme.local.{ TextLine => CLTextLine, TextDelimited => CLTextDelimited }
import cascading.scheme.hadoop.{
TextLine => CHTextLine,
TextDelimited => CHTextDelimited,
SequenceFile => CHSequenceFile
}
import cascading.tap.hadoop.Hfs
import cascading.tap.MultiSourceTap
import cascading.tap.SinkMode
import cascading.tap.Tap
import cascading.tap.local.FileTap
import cascading.tuple.Fields
import com.etsy.cascading.tap.local.LocalTap
import com.twitter.algebird.{ MapAlgebra, OrVal }
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FileStatus, PathFilter, Path }
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.OutputCollector
import org.apache.hadoop.mapred.RecordReader
import scala.util.{ Try, Success, Failure }
/**
* A base class for sources that take a scheme trait.
*/
abstract class SchemedSource extends Source {
/** The scheme to use if the source is local. */
def localScheme: Scheme[Properties, InputStream, OutputStream, _, _] =
throw ModeException("Cascading local mode not supported for: " + toString)
/** The scheme to use if the source is on hdfs. */
def hdfsScheme: Scheme[JobConf, RecordReader[_, _], OutputCollector[_, _], _, _] =
throw ModeException("Cascading Hadoop mode not supported for: " + toString)
// The mode to use for output taps determining how conflicts with existing output are handled.
val sinkMode: SinkMode = SinkMode.REPLACE
}
trait HfsTapProvider {
def createHfsTap(scheme: Scheme[JobConf, RecordReader[_, _], OutputCollector[_, _], _, _],
path: String,
sinkMode: SinkMode): Hfs =
new Hfs(scheme, path, sinkMode)
}
private[scalding] object CastFileTap {
// The scala compiler has problems with the generics in Cascading
def apply(tap: FileTap): Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]] =
tap.asInstanceOf[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]]
}
/**
* A trait which provides a method to create a local tap.
*/
trait LocalSourceOverride extends SchemedSource {
/** A path to use for the local tap. */
def localPaths: Iterable[String]
// By default, we write to the last path for local paths
def localWritePath = localPaths.last
/**
* Creates a local tap.
*
* @param sinkMode The mode for handling output conflicts.
* @returns A tap.
*/
def createLocalTap(sinkMode: SinkMode): Tap[JobConf, _, _] = {
val taps = localPaths.map {
p: String =>
CastFileTap(new FileTap(localScheme, p, sinkMode))
}.toList
taps match {
case Nil => throw new InvalidSourceException("LocalPaths is empty")
case oneTap :: Nil => oneTap
case many => new ScaldingMultiSourceTap(many)
}
}
}
object HiddenFileFilter extends PathFilter {
def accept(p: Path) = {
val name = p.getName
!name.startsWith("_") && !name.startsWith(".")
}
}
object SuccessFileFilter extends PathFilter {
def accept(p: Path) = { p.getName == "_SUCCESS" }
}
object AcceptAllPathFilter extends PathFilter {
def accept(p: Path) = true
}
object FileSource {
def glob(glob: String, conf: Configuration, filter: PathFilter = AcceptAllPathFilter): Iterable[FileStatus] = {
val path = new Path(glob)
Option(path.getFileSystem(conf).globStatus(path, filter)).map {
_.toIterable // convert java Array to scala Iterable
}.getOrElse {
Iterable.empty
}
}
/**
* @return whether globPath contains non hidden files
*/
def globHasNonHiddenPaths(globPath: String, conf: Configuration): Boolean = {
!glob(globPath, conf, HiddenFileFilter).isEmpty
}
/**
* @return whether globPath contains a _SUCCESS file
*/
def globHasSuccessFile(globPath: String, conf: Configuration): Boolean = allGlobFilesWithSuccess(globPath, conf, hiddenFilter = false)
/**
* Determines whether each file in the glob has a _SUCCESS sibling file in the same directory
* @param globPath path to check
* @param conf Hadoop Configuration to create FileSystem
* @param hiddenFilter true, if only non-hidden files are checked
* @return true if the directory has files after filters are applied
*/
def allGlobFilesWithSuccess(globPath: String, conf: Configuration, hiddenFilter: Boolean): Boolean = {
// Produce tuples (dirName, hasSuccess, hasNonHidden) keyed by dir
//
val usedDirs = glob(globPath, conf, AcceptAllPathFilter)
.map { fileStatus: FileStatus =>
// stringify Path for Semigroup
val dir =
if (fileStatus.isDirectory)
fileStatus.getPath.toString
else
fileStatus.getPath.getParent.toString
// HiddenFileFilter should better be called non-hidden but it borrows its name from the
// private field name in hadoop FileInputFormat
//
dir -> (dir,
OrVal(SuccessFileFilter.accept(fileStatus.getPath) && fileStatus.isFile),
OrVal(HiddenFileFilter.accept(fileStatus.getPath)))
}
// OR by key
val uniqueUsedDirs = MapAlgebra.sumByKey(usedDirs)
.filter { case (_, (_, _, hasNonHidden)) => (!hiddenFilter || hasNonHidden.get) }
// there is at least one valid path, and all paths have success
//
uniqueUsedDirs.nonEmpty && uniqueUsedDirs.forall {
case (_, (_, hasSuccess, _)) => hasSuccess.get
}
}
}
/**
* This is a base class for File-based sources
*/
abstract class FileSource extends SchemedSource with LocalSourceOverride with HfsTapProvider {
/**
* Determines if a path is 'valid' for this source. In strict mode all paths must be valid.
* In non-strict mode, all invalid paths will be filtered out.
*
* Subclasses can override this to validate paths.
*
* The default implementation is a quick sanity check to look for missing or empty directories.
* It is necessary but not sufficient -- there are cases where this will return true but there is
* in fact missing data.
*
* TODO: consider writing a more in-depth version of this method in [[TimePathedSource]] that looks for
* TODO: missing days / hours etc.
*/
protected def pathIsGood(p: String, conf: Configuration) = FileSource.globHasNonHiddenPaths(p, conf)
def hdfsPaths: Iterable[String]
// By default, we write to the LAST path returned by hdfsPaths
def hdfsWritePath = hdfsPaths.last
override def createTap(readOrWrite: AccessMode)(implicit mode: Mode): Tap[_, _, _] = {
mode match {
// TODO support strict in Local
case Local(_) => {
readOrWrite match {
case Read => createLocalTap(sinkMode)
case Write => new FileTap(localScheme, localWritePath, sinkMode)
}
}
case hdfsMode @ Hdfs(_, _) => readOrWrite match {
case Read => createHdfsReadTap(hdfsMode)
case Write => CastHfsTap(createHfsTap(hdfsScheme, hdfsWritePath, sinkMode))
}
case _ => {
val tryTtp = Try(TestTapFactory(this, hdfsScheme, sinkMode)).map {
// these java types are invariant, so we cast here
_.createTap(readOrWrite)
.asInstanceOf[Tap[Any, Any, Any]]
}.orElse {
Try(TestTapFactory(this, localScheme.getSourceFields, sinkMode)).map {
_.createTap(readOrWrite)
.asInstanceOf[Tap[Any, Any, Any]]
}
}
tryTtp match {
case Success(s) => s
case Failure(e) => throw new java.lang.IllegalArgumentException(s"Failed to create tap for: $toString, with error: ${e.getMessage}", e)
}
}
}
}
// This is only called when Mode.sourceStrictness is true
protected def hdfsReadPathsAreGood(conf: Configuration) = {
hdfsPaths.forall { pathIsGood(_, conf) }
}
/*
* This throws InvalidSourceException if:
* 1) we are in sourceStrictness mode and all sources are not present.
* 2) we are not in the above, but some source has no input whatsoever
* TODO this only does something for HDFS now. Maybe we should do the same for LocalMode
*/
override def validateTaps(mode: Mode): Unit = {
mode match {
case Hdfs(strict, conf) => {
if (strict && (!hdfsReadPathsAreGood(conf))) {
throw new InvalidSourceException(
"[" + this.toString + "] Data is missing from one or more paths in: " +
hdfsPaths.toString)
} else if (!hdfsPaths.exists { pathIsGood(_, conf) }) {
//Check that there is at least one good path:
throw new InvalidSourceException(
"[" + this.toString + "] No good paths in: " + hdfsPaths.toString)
}
}
case Local(strict) => {
val files = localPaths.map{ p => new java.io.File(p) }
if (strict && !files.forall(_.exists)) {
throw new InvalidSourceException(
"[" + this.toString + s"] Data is missing from: ${localPaths.filterNot { p => new java.io.File(p).exists }}")
} else if (!files.exists(_.exists)) {
throw new InvalidSourceException(
"[" + this.toString + "] No good paths in: " + hdfsPaths.toString)
}
}
case _ => ()
}
}
/*
* Get all the set of valid paths based on source strictness.
*/
protected def goodHdfsPaths(hdfsMode: Hdfs) = {
hdfsMode match {
//we check later that all the paths are good
case Hdfs(true, _) => hdfsPaths
// If there are no matching paths, this is still an error, we need at least something:
case Hdfs(false, conf) => hdfsPaths.filter{ pathIsGood(_, conf) }
}
}
protected def createHdfsReadTap(hdfsMode: Hdfs): Tap[JobConf, _, _] = {
val taps: List[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]] =
goodHdfsPaths(hdfsMode)
.toList.map { path => CastHfsTap(createHfsTap(hdfsScheme, path, sinkMode)) }
taps.size match {
case 0 => {
// This case is going to result in an error, but we don't want to throw until
// validateTaps. Return an InvalidSource here so the Job constructor does not fail.
// In the worst case if the flow plan is misconfigured,
//openForRead on mappers should fail when using this tap.
new InvalidSourceTap(hdfsPaths)
}
case 1 => taps.head
case _ => new ScaldingMultiSourceTap(taps)
}
}
}
class ScaldingMultiSourceTap(taps: Seq[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]])
extends MultiSourceTap[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]], JobConf, RecordReader[_, _]](taps: _*) {
private final val randomId = UUID.randomUUID.toString
override def getIdentifier() = randomId
override def hashCode: Int = randomId.hashCode
}
/**
* The fields here are ('offset, 'line)
*/
trait TextSourceScheme extends SchemedSource {
// The text-encoding to use when writing out the lines (default is UTF-8).
val textEncoding: String = CHTextLine.DEFAULT_CHARSET
override def localScheme = new CLTextLine(new Fields("offset", "line"), Fields.ALL, textEncoding)
override def hdfsScheme = HadoopSchemeInstance(new CHTextLine(CHTextLine.DEFAULT_SOURCE_FIELDS, textEncoding))
}
trait TextLineScheme extends TextSourceScheme with SingleMappable[String] {
//In textline, 0 is the byte position, the actual text string is in column 1
override def sourceFields = Dsl.intFields(Seq(1))
}
/**
* Mix this in for delimited schemes such as TSV or one-separated values
* By default, TSV is given
*/
trait DelimitedScheme extends SchemedSource {
//override these as needed:
val fields = Fields.ALL
//This is passed directly to cascading where null is interpretted as string
val types: Array[Class[_]] = null
val separator = "\\t"
val skipHeader = false
val writeHeader = false
val quote: String = null
// Whether to throw an exception or not if the number of fields does not match an expected number.
// If set to false, missing fields will be set to null.
val strict = true
// Whether to throw an exception if a field cannot be coerced to the right type.
// If set to false, then fields that cannot be coerced will be set to null.
val safe = true
//These should not be changed:
override def localScheme = new CLTextDelimited(fields, skipHeader, writeHeader, separator, strict, quote, types, safe)
override def hdfsScheme = {
assert(
types == null || fields.size == types.size,
"Fields [" + fields + "] of different size than types array [" + types.mkString(",") + "]")
HadoopSchemeInstance(new CHTextDelimited(fields, null, skipHeader, writeHeader, separator, strict, quote, types, safe))
}
}
trait SequenceFileScheme extends SchemedSource {
//override these as needed:
val fields = Fields.ALL
// TODO Cascading doesn't support local mode yet
override def hdfsScheme = HadoopSchemeInstance(new CHSequenceFile(fields))
}
/**
* Ensures that a _SUCCESS file is present in every directory included by a glob,
* as well as the requirements of [[FileSource.pathIsGood]]. The set of directories to check for
* _SUCCESS
* is determined by examining the list of all paths returned by globPaths and adding parent
* directories of the non-hidden files encountered.
* pathIsGood should still be considered just a best-effort test. As an illustration the following
* layout with an in-flight job is accepted for the glob dir*/*:
* <pre>
* dir1/_temporary
* dir2/file1
* dir2/_SUCCESS
* </pre>
*
* Similarly if dir1 is physically empty pathIsGood is still true for dir*/* above
*
* On the other hand it will reject an empty output directory of a finished job:
* <pre>
* dir1/_SUCCESS
* </pre>
*
*/
trait SuccessFileSource extends FileSource {
override protected def pathIsGood(p: String, conf: Configuration) =
FileSource.allGlobFilesWithSuccess(p, conf, true)
}
/**
* Use this class to add support for Cascading local mode via the Hadoop tap.
* Put another way, this runs a Hadoop tap outside of Hadoop in the Cascading local mode
*/
trait LocalTapSource extends LocalSourceOverride {
override def createLocalTap(sinkMode: SinkMode): Tap[JobConf, _, _] = {
val taps = localPaths.map { p =>
new LocalTap(p, hdfsScheme, sinkMode).asInstanceOf[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]]
}.toSeq
taps match {
case Nil => throw new InvalidSourceException("LocalPaths is empty")
case oneTap :: Nil => oneTap
case many => new ScaldingMultiSourceTap(many)
}
}
}
abstract class FixedPathSource(path: String*) extends FileSource {
def localPaths = path.toList
def hdfsPaths = path.toList
// `toString` is used by equals in JobTest, which causes
// problems due to unstable collection type of `path`
override def toString = getClass.getName + path.mkString("(", ",", ")")
override def hdfsWritePath = stripTrailing(super.hdfsWritePath)
override def hashCode = toString.hashCode
override def equals(that: Any): Boolean = (that != null) && (that.toString == toString)
/**
* Similar in behavior to {@link TimePathedSource.writePathFor}.
* Strip out the trailing slash star.
*/
protected def stripTrailing(path: String): String = {
assert(path != "*", "Path must not be *")
assert(path != "/*", "Path must not be /*")
if (path.takeRight(2) == "/*") {
path.dropRight(2)
} else {
path
}
}
}
/**
* Tab separated value source
*/
case class Tsv(p: String, override val fields: Fields = Fields.ALL,
override val skipHeader: Boolean = false, override val writeHeader: Boolean = false,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme
/**
* Allows the use of multiple Tsv input paths. The Tsv files will
* be process through your flow as if they are a single pipe. Tsv
* files must have the same schema.
* For more details on how multiple files are handled check the
* cascading docs.
*/
case class MultipleTsvFiles(p: Seq[String], override val fields: Fields = Fields.ALL,
override val skipHeader: Boolean = false, override val writeHeader: Boolean = false) extends FixedPathSource(p: _*)
with DelimitedScheme
/**
* Csv value source
* separated by commas and quotes wrapping all fields
*/
case class Csv(p: String,
override val separator: String = ",",
override val fields: Fields = Fields.ALL,
override val skipHeader: Boolean = false,
override val writeHeader: Boolean = false,
override val quote: String = "\\"",
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme
/**
* One separated value (commonly used by Pig)
*/
case class Osv(p: String, f: Fields = Fields.ALL,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p)
with DelimitedScheme {
override val fields = f
override val separator = "\\u0001"
}
object TextLine {
// Default encoding is UTF-8
val defaultTextEncoding: String = CHTextLine.DEFAULT_CHARSET
val defaultSinkMode: SinkMode = SinkMode.REPLACE
def apply(p: String, sm: SinkMode = defaultSinkMode, textEncoding: String = defaultTextEncoding): TextLine =
new TextLine(p, sm, textEncoding)
}
class TextLine(p: String, override val sinkMode: SinkMode, override val textEncoding: String) extends FixedPathSource(p) with TextLineScheme {
// For some Java interop
def this(p: String) = this(p, TextLine.defaultSinkMode, TextLine.defaultTextEncoding)
}
/**
* Alternate typed TextLine source that keeps both 'offset and 'line fields.
*/
class OffsetTextLine(filepath: String,
override val sinkMode: SinkMode,
override val textEncoding: String)
extends FixedPathSource(filepath) with Mappable[(Long, String)] with TextSourceScheme {
override def converter[U >: (Long, String)] =
TupleConverter.asSuperConverter[(Long, String), U](TupleConverter.of[(Long, String)])
}
/**
* Alternate typed TextLine source that keeps both 'offset and 'line fields.
*/
object OffsetTextLine {
// Default encoding is UTF-8
val defaultTextEncoding: String = CHTextLine.DEFAULT_CHARSET
val defaultSinkMode: SinkMode = SinkMode.REPLACE
def apply(p: String, sm: SinkMode = defaultSinkMode, textEncoding: String = defaultTextEncoding): OffsetTextLine =
new OffsetTextLine(p, sm, textEncoding)
}
case class SequenceFile(p: String, f: Fields = Fields.ALL, override val sinkMode: SinkMode = SinkMode.REPLACE)
extends FixedPathSource(p) with SequenceFileScheme with LocalTapSource {
override val fields = f
}
case class MultipleSequenceFiles(p: String*) extends FixedPathSource(p: _*) with SequenceFileScheme with LocalTapSource
case class MultipleTextLineFiles(p: String*) extends FixedPathSource(p: _*) with TextLineScheme
/**
* Delimited files source
* allowing to override separator and quotation characters and header configuration
*/
case class MultipleDelimitedFiles(f: Fields,
override val separator: String,
override val quote: String,
override val skipHeader: Boolean,
override val writeHeader: Boolean,
p: String*) extends FixedPathSource(p: _*) with DelimitedScheme {
override val fields = f
}
| tglstory/scalding | scalding-core/src/main/scala/com/twitter/scalding/FileSource.scala | Scala | apache-2.0 | 19,768 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.fixture.funspec.sharing
import java.util.concurrent.ConcurrentHashMap
import org.scalatest._
import DbServer._
import java.util.UUID.randomUUID
object DbServer { // Simulating a database server
type Db = StringBuffer
private val databases = new ConcurrentHashMap[String, Db]
def createDb(name: String): Db = {
val db = new StringBuffer
databases.put(name, db)
db
}
def removeDb(name: String) {
databases.remove(name)
}
}
trait DbFixture { this: fixture.Suite =>
type FixtureParam = Db
// Allow clients to populate the database after
// it is created
def populateDb(db: Db) {}
def withFixture(test: OneArgTest): Outcome = {
val dbName = randomUUID.toString
val db = createDb(dbName) // create the fixture
try {
populateDb(db) // setup the fixture
withFixture(test.toNoArgTest(db)) // "loan" the fixture to the test
}
finally removeDb(dbName) // clean up the fixture
}
}
class ExampleSpec extends fixture.FunSpec with DbFixture {
override def populateDb(db: Db) { // setup the fixture
db.append("ScalaTest is ")
}
describe("Testing") {
it("should be easy") { db =>
db.append("easy!")
assert(db.toString === "ScalaTest is easy!")
}
it("should be fun") { db =>
db.append("fun!")
assert(db.toString === "ScalaTest is fun!")
}
}
// This test doesn't need a Db
describe("Test code") {
it("should be clear") { () =>
val buf = new StringBuffer
buf.append("ScalaTest code is ")
buf.append("clear!")
assert(buf.toString === "ScalaTest code is clear!")
}
}
}
| travisbrown/scalatest | examples/src/main/scala/org/scalatest/examples/fixture/funspec/sharing/ExampleSpec.scala | Scala | apache-2.0 | 2,258 |
/* Copyright 2012 Typesafe (http://www.typesafe.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package twirl.compiler
import org.specs2.mutable.Specification
import java.io.File
import twirl.api._
object TemplateUtilsSpec extends Specification {
"Templates" should {
"provide a HASH util" in {
Hash("itShouldWork".getBytes, "") must be_==("31c0c4e0e142fe9b605fff44528fedb3dd8ae254")
}
"provide a Format API" in {
"HTML for example" in {
import twirl.compiler.Helper.{Html, HtmlFormat}
val html = HtmlFormat.raw("<h1>") + HtmlFormat.escape("Hello <world>") + HtmlFormat.raw("</h1>")
html.toString must be_==("<h1>Hello <world></h1>")
}
"Text for example" in {
case class Text(text: String) extends Appendable[Text] {
val buffer = new StringBuilder(text)
def +(other: Text) = {
buffer.append(other.buffer)
this
}
override def toString = buffer.toString
}
object TextFormat extends Format[Text] {
def raw(text: String) = Text(text)
def escape(text: String) = Text(text)
}
val text = TextFormat.raw("<h1>") + TextFormat.escape("Hello <world>") + TextFormat.raw("</h1>")
text.toString must be_==("<h1>Hello <world></h1>")
}
"generate proper packages from the directory structure" in {
val baseDir = new File("twirl-compiler/src/test/templates/")
def haveTemplateName(templateName: String*) = be_==(templateName) ^^ { fileName: String =>
TwirlCompiler.generatedFile(
template = new File(baseDir, fileName),
sourceDirectory = baseDir,
generatedDirectory = new File("generated-templates")
)._1.toSeq
}
"on the template dir root" in {
"simple.scala.html" must haveTemplateName("html", "simple")
}
"one level deep" in {
"example/simple.scala.html" must haveTemplateName("example", "html", "simple")
}
"several levels deep" in {
"com/example/simple.scala.html" must haveTemplateName("com", "example", "html", "simple")
}
}
}
}
} | spray/twirl | twirl-compiler/src/test/scala/twirl/compiler/TemplateUtilsSpec.scala | Scala | apache-2.0 | 2,729 |
package org.genivi.sota.http
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.server.{AuthorizationFailedRejection, Directive0, Directive1, Directives, Rejection}
import akka.http.scaladsl.server.Directives._
import cats.data.Xor
import com.advancedtelematic.akka.http.jwt.InvalidScopeRejection
import com.advancedtelematic.jws.CompactSerialization
import com.advancedtelematic.jwt.{JsonWebToken, Scope, Subject}
import io.circe.parser._
import io.circe.Decoder
import org.genivi.sota.data.Namespace
case class AuthedNamespaceScope(namespace: Namespace, scope: Option[Scope] = None) {
type ScopeItem = String
def hasScope(sc: ScopeItem) : Boolean = scope.isEmpty || scope.get.underlying.contains(sc)
def hasScopeReadonly(sc: ScopeItem) : Boolean = hasScope(sc) || hasScope(sc + ".readonly")
def oauthScope(scope: ScopeItem): Directive0 = {
if (hasScope(scope)) pass
else reject(InvalidScopeRejection(scope), AuthorizationFailedRejection)
}
def oauthScopeReadonly(scope: ScopeItem): Directive0 = {
if (hasScopeReadonly(scope)) pass
else reject(InvalidScopeRejection(scope), AuthorizationFailedRejection)
}
}
object AuthedNamespaceScope {
import scala.language.implicitConversions
implicit def toNamespace(ns: AuthedNamespaceScope): Namespace = ns.namespace
val namespacePrefix = "namespace."
def apply(token: IdToken) : AuthedNamespaceScope = {
AuthedNamespaceScope(Namespace(token.sub.underlying))
}
def apply(token: JsonWebToken) : AuthedNamespaceScope = {
val nsSet = token.scope.underlying.collect {
case x if x.startsWith(namespacePrefix) => x.substring(namespacePrefix.length)
}
if (nsSet.size == 1) {
AuthedNamespaceScope(Namespace(nsSet.toVector(0)), Some(token.scope))
} else {
AuthedNamespaceScope(Namespace(token.subject.underlying), Some(token.scope))
}
}
}
/**
* Type class defining an extraction of namespace information from a token of type `T`
* @tparam T type of a token
*/
trait NsFromToken[T] {
def toNamespaceScope(token: T): AuthedNamespaceScope
}
object NsFromToken {
implicit val NsFromIdToken = new NsFromToken[IdToken] {
override def toNamespaceScope(token: IdToken) = AuthedNamespaceScope(token)
}
implicit val NsFromJwt = new NsFromToken[JsonWebToken] {
override def toNamespaceScope(token: JsonWebToken) = AuthedNamespaceScope(token)
}
def parseToken[T: NsFromToken](serializedToken: String)
(implicit decoder: Decoder[T]): Xor[String, T] =
for {
serialized <- CompactSerialization.parse(serializedToken)
token <- decode[T](serialized.encodedPayload.stringData()).leftMap(_.getMessage)
} yield token
}
/**
* Identity token
* @param sub Subject claim
*/
final case class IdToken(sub: Subject)
object IdToken {
import io.circe.generic.semiauto._
import org.genivi.sota.marshalling.CirceInstances._
implicit val DecoderInstance: Decoder[IdToken] = deriveDecoder[IdToken]
}
object AuthNamespaceDirectives {
import Directives._
private[this] def badNamespaceRejection(msg: String): Rejection = AuthorizationFailedRejection
def authNamespace[T](ns0: Option[Namespace])
(implicit nsFromToken: NsFromToken[T], decoder: Decoder[T]): Directive1[AuthedNamespaceScope] =
extractCredentials flatMap { creds =>
val maybeNamespace = creds match {
case Some(OAuth2BearerToken(serializedToken)) =>
NsFromToken.parseToken[T](serializedToken).flatMap{ token =>
val authedNs = nsFromToken.toNamespaceScope(token)
ns0 match {
case Some(ns) if ns == authedNs.namespace => Xor.right(authedNs)
case Some(ns) if authedNs.hasScope(AuthedNamespaceScope.namespacePrefix + ns) =>
Xor.right(AuthedNamespaceScope(ns, authedNs.scope))
case Some(ns) => Xor.Left("The oauth token does not accept the given namespace")
case None => Xor.right(authedNs)
}
}
case _ => Xor.Left("No oauth token provided to extract namespace")
}
maybeNamespace match {
case Xor.Right(t) => provide(t)
case Xor.Left(msg) =>
extractLog flatMap { l =>
l.info(s"Could not extract namespace: $msg")
reject(badNamespaceRejection(msg))
}
}
}
}
| PDXostc/rvi_sota_server | common/src/main/scala/org/genivi/sota/http/AuthNamespaceDirectives.scala | Scala | mpl-2.0 | 4,362 |
/*
* Copyright (c) 2013 University of Massachusetts Amherst
* Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*/
package edu.umass.cs.iesl.bibmogrify.reader
import edu.umass.cs.iesl.scalacommons.DateUtils._
import edu.umass.cs.iesl.bibmogrify.model._
import edu.umass.cs.iesl.bibmogrify.model.Authorities._
import com.typesafe.scalalogging.slf4j.Logging
import edu.umass.cs.iesl.bibmogrify.pipeline.Transformer
import edu.umass.cs.iesl.bibmogrify.{NamedInputStream, NamedPlugin, BibMogrifyException}
import xml.{Elem, NodeSeq, Node}
import edu.umass.cs.iesl.scalacommons.{NonemptyString, XMLIgnoreDTD}
import edu.umass.cs.iesl.scalacommons.StringUtils._
import edu.umass.cs.iesl.namejuggler.PersonNameWithDerivations
object PatentST36Reader extends Transformer[NamedInputStream, StructuredPatent] with Logging with NamedPlugin {
val name = "st36"
val fromType = "NamedInputStream"
val toType = "StructuredPatent"
/*
private def parseLanguage(s: String): Option[Language] = {
// ** since these are standardized, they should go in some more general place
s match {
case "en" => English
case "eng" => English
case "de" => German
case "cn" => Chinese
case "jp" => Japanese
case "ru" => Russian
case l => {
logger.warn("Ignoring unknown language: " + l);
None
}
}
}*/
private def parseIdentifierAndDate(d: Option[Node], ct: EventType): (Option[Identifier], Option[CitationEvent]) = {
(d map ((c: Node) => {
val id = BasicIdentifier((c \\ "doc-number").text,
Some(new BasicIdentifierAuthority(("patent-" + (c \\ "country").text.trim + "-" + ct.shortName).n)))
val date = parseDate(c)
val event = new BasicCitationEvent(date, ct)
(id, Some(event))
})).getOrElse((None, None))
}
private val parseDateR = "(....)(..)(..)".r
private def parseDate(c: Node): Option[BasicPartialDate] = {
val dateString: String = (c \\ "date").text.trim
dateString match {
case "" => None
case d => {
val parseDateR(yearS, monthS, dayS) = d
val year: Option[Int] = Some(yearS.toInt)
// val dayS: Option[String] = (doc \\ "day").text
// val day: Option[Int] = dayS.map(_.toInt)
Some(BasicPartialDate(year, parseMonthOneBased(monthS), Some(dayS.toInt)))
}
}
}
def inferBodyTypeFromHeading(s: String, defaultType: BodyTextSectionType): BodyTextSectionType = {
s match {
case q: String if q.toLowerCase.contains("field") => TechnicalField
case q: String if q.toLowerCase.contains("background") => IntroductionAndBackground
case q: String if q.toLowerCase.contains("introduction") => IntroductionAndBackground
case q: String if q.toLowerCase.contains("summary") => Summary
case q: String if q.toLowerCase.contains("figure") => FigureCaptions
case q: String if q.toLowerCase.contains("drawing") => FigureCaptions
case q: String => defaultType
//** could add more here if needed
}
}
def parse(inLocation: Location, doc: Node): StructuredPatent = {
import ReaderUtils._
def splitBodyText(n: Node, defaultType: BodyTextSectionType): Seq[BodyTextSection] = {
val init: List[BodyTextSection] = List[BodyTextSection](new BasicBodyTextSection(GeneralBodyText, "", None))
// start the fold with a general block, which may enf up empty if we immediately get a heading
val result = n.child.foldLeft(init)((accum: List[BodyTextSection], n: Node) => {
n match {
case np: Elem if np.label == "heading" => {
new BasicBodyTextSection(inferBodyTypeFromHeading(np.text.trim, defaultType), None, np.text.trim) ::
accum
}
case np: Elem if np.label == "p" => (accum.head ++ np.text.trim) :: accum.tail
case np => accum //ignore
}
})
result.filterNot(_.text.isEmpty).reverse
// "background of the invention" or "background art" or just "background"
//"detailed description of the invention" or "disclosure of the invention" or just "description"
//"summary"
//"brief description of the figures"
}
/**
* Descriptions may contain paragraphs and headers, and/or subsections like "summary" which in turn contain paragraphs and headers. Here we just
* return everything.
* @param n
* @return
*/
def parseDescription(n: Node): Seq[BodyTextSection] = {
/*
<xs:element ref="summary" />
<xs:element ref="related-apps" />
<xs:element ref="govt-interest" />
<xs:element ref="detailed-desc" />
<xs:element ref="technical-field" />
<xs:element ref="background-art" />
<xs:element ref="disclosure" />
<xs:element ref="description-of-drawings" />
<xs:element ref="best-mode" />
<xs:element ref="mode-for-invention" />
<xs:element ref="industrial-applicability" />
<xs:element ref="sequence-list-text" />
*/
val descriptionElements = Map("summary" -> Summary, "govt-interest" -> GeneralBodyText, "detailed-desc" -> GeneralBodyText,
"technical-field" -> TechnicalField, "background-art" -> IntroductionAndBackground, "disclosure" -> GeneralBodyText,
"description-of-drawings" -> FigureCaptions, "best-mode" -> GeneralBodyText,
"mode-for-inventions" -> GeneralBodyText,
"industrial-applicability" -> GeneralBodyText)
val result = descriptionElements.toSeq.flatMap {
case (k, v) => (n \\ k) flatMap (c => splitBodyText(c, v))
}
val nt = splitBodyText(n, GeneralBodyText)
Seq(result, nt).flatten
}
def getBodyText: Seq[BodyTextSection] = {
// ignore "invention-title" here
val desc = (doc \\ "description") flatMap parseDescription
val claims = (doc \\ "claims").text match {
case "" => None
case t => Some(new BasicBodyTextSection(Claims, t, None))
}
desc ++ Seq(claims).flatten
}
// IDs and dates are confounded in the source data; we separate and regroup them
def getIdentifiersAndDates: (Seq[Identifier], Seq[CitationEvent]) = {
val (pubId, pubEvent) = parseIdentifierAndDate((doc \\ "bibliographic-data" \\ "publication-reference" \\ "document-id").headOption,
Published) // assume exactly one
val (recId, recEvent) = parseIdentifierAndDate((doc \\ "bibliographic-data" \\ "application-reference" \\ "document-id").headOption,
Received) // assume exactly one
// ignore dates-of-public-availability for now
val ids = Seq(pubId, recId).flatten
val events = Seq(pubEvent, recEvent).flatten
(ids, events)
}
// ** legal info, e.g. new owner
def parseKeywordGroup(seq: NodeSeq, auth: KeywordAuthority): Seq[Keyword] = {
seq flatMap ((n: Node) => {
val word: Option[NonemptyString] = (n \\ "text").stripTags //** ignoring lots of structured data in here
word.map(x => new BasicKeyword(x, Some(auth)))
})
}
val c = new StructuredPatent() {
//override val doctype: Option[DocType] = Patent
override val locations = Seq(inLocation)
override val title: Option[NonemptyString] = (doc \\ "bibliographic-data" \\ "invention-title").stripTags
override val (identifiers, dates) = getIdentifiersAndDates
val abstracts = (doc \\ "abstract")
val abstractsByLanguage = abstracts groupBy {
n: Node => {
val langName = (n \\ "@lang").text
val lang = Language.get(langName)
if (lang == None) {
logger.warn("Unknown language: " + langName)
}
lang
}
}
// override val abstractLanguages: Seq[Option[Language]] = abstractsByLanguage.keys.toSeq
override val abstractText: Iterable[TextWithLanguage] = {
for ((lang, abs) <- abstractsByLanguage) {
if (abs.length != 1) logger.error(abs.length + " abstracts for language " + lang.getOrElse("None"))
}
val withoutHeaders: Map[Option[Language], Option[NonemptyString]] = abstractsByLanguage.map {
case (l, n) => (l, (n \\ "p").stripTags
.opt) // exclude headers,
// as these are likely general
// uninformative things like "background"
}
withoutHeaders.flatMap {
case (l, Some(n)) => Some(TextWithLanguage(l, n))
case _ => None
}
//val englishAbstracts: Option[NodeSeq] = abstractsByLanguage.get(Some(English))
//val s = englishAbstracts.map(ns => Some(ns.text.trim)).getOrElse(abstractsByLanguage.get(None).map(_.text.trim))
//s
}
override val sourceLanguage = Language.get((doc \\ "bibliographic-data" \\ "language-of-filing").text.trim)
override val language = Language.get((doc \\ "bibliographic-data" \\ "language-of-publication").text.trim)
def parsePatentCitationGroup(seq: NodeSeq): Seq[StructuredPatent] = {
seq map ((n: Node) => {
val (id, event) = parseIdentifierAndDate(Some(n), Published) // ** Hmm: do priority claims refer to the filing date?
new StructuredPatent {
override val identifiers = Seq(id).flatten
override val dates = Seq(event).flatten
}
})
}
def parseNonPatentCitationGroup(seq: NodeSeq): Seq[String] = {
seq map ((n: Node) => (n \\ "text").text)
}
def parseFamily(seq: NodeSeq): Seq[StructuredPatent] = {
(seq \\ "family-member") map ((n: Node) => {
val d = (n \\ "document-id").headOption
val (id, pubEvent) = parseIdentifierAndDate(d, Published)
val recEvent: Option[CitationEvent] = d.map(r => (r \\ "application-date").headOption
.map(q => new BasicCitationEvent(parseDate(q), Received))).getOrElse(None)
new StructuredPatent {
override val identifiers = Seq(id).flatten
override val dates = Seq(pubEvent, recEvent).flatten
}
})
}
private val inventorNodes = (doc \\\\ "inventors" \\ "inventor") ++ (doc \\\\ "applicants" \\ "applicant" filter { _ \\\\ "@app-type" exists (_.text == "applicant-inventor") })
override val authors = inventorNodes.map {
inventorNode =>
new AuthorInRole(new Person() {
override val name = "%s %s".format((inventorNode \\\\ "first-name").text, (inventorNode \\\\ "last-name").text).opt.map {
PersonNameWithDerivations(_)
}
// this was giving me grief so I changed it (empty iterator on PersonName.combineGivenNames)
/*Some(new PersonNameWithDerivations {
override val firstName = (inventorNode \\ "first-name").text.opt
override val surNames = (inventorNode \\ "last-name").text.opt.toSet
}) */
}, Nil)
}
override val keywords = {
val ipc = parseKeywordGroup(doc \\\\ "classification-ipc", IpcKeywordAuthority)
val ipcr = parseKeywordGroup(doc \\\\ "classification-ipcr", IpcrKeywordAuthority)
val ecla = parseKeywordGroup(doc \\\\ "classification-ecla", EclaKeywordAuthority)
val fterm = parseKeywordGroup(doc \\\\ "classification-f-term", FtermKeywordAuthority)
val nationalNodes: NodeSeq = doc \\\\ "classification-national"
val nationalKeywords = for {c <- nationalNodes
country <- (c \\ "country").text.opt // if country isn't given the node is dropped
auth <- new BasicKeywordAuthority(country)}
yield parseKeywordGroup(c, auth)
var result = Set(ipc, ipcr, ecla, fterm).flatten ++ nationalKeywords.flatten
result
}
override val priorityClaims = parsePatentCitationGroup(doc \\ "bibliographic-data" \\ "priority-claims" \\ "priority-claim")
override val patentCitations = parsePatentCitationGroup(doc \\\\ "bibliographic-data" \\\\ "patcit") ++
parsePatentCitationGroup(doc \\\\ "description" \\\\ "patcit")
override val nonPatentCitations = parseNonPatentCitationGroup(doc \\\\ "bibliographic-data" \\\\ "nplcit") ++
parseNonPatentCitationGroup(doc \\\\ "description" \\\\ "nplcit")
lazy val forwardCitations = parsePatentCitationGroup(doc \\\\ "bibliographic-data" \\\\ "fwdcit") ++
parsePatentCitationGroup(doc \\\\ "description" \\\\ "fwdcit")
override val references = patentCitations //++ nonPatentCitations
override val searchReportPatentCitations = parsePatentCitationGroup(doc \\\\ "srep-citations" \\\\ "patcit")
override val searchReportNonPatentCitations = parseNonPatentCitationGroup(doc \\\\ "srep-citations" \\\\ "nplcit")
override val mainFamily = parseFamily(doc \\ "bibliographic-data" \\ "patent-family" \\ "main-family")
override val completeFamily = parseFamily(doc \\ "bibliographic-data" \\ "patent-family" \\ "complete-family")
override val bodyText = getBodyText
}
c
}
def parseDroppingErrors(inLocation: Location, doc: Node): Option[StructuredPatent] = {
try {
val c = parse(inLocation, doc)
Some(c)
}
catch {
case e: BibMogrifyException => logger.error(e.getMessage)
None
}
}
/* def apply(s: InputStream): TraversableOnce[CitationMention] =
{
//val xmlloader = new XMLFilenameOnlyMappingDTDLoader(Map("wo-patent-document-v1-3.dtd" -> new InputSource(getClass.getResource
("/wo-patent-document-v1-3.dtd").getPath)))
val xmlloader = XMLIgnoreDTD
// always one per file
parseDroppingErrors(xmlloader.load(s))
//XmlUtils.firstLevelNodes(s).flatMap(node => (node \\\\ "wopatent-document").flatMap(parseDroppingErrors(_)))
}*/
def apply(nis: NamedInputStream): TraversableOnce[StructuredPatent] = {
//val xml = scala.xml.XML.load(f)
// val xml = XMLIgnoreDTD.load(f) // can't, because we need the entity declarations
//XMLMapDTD.setGlobalXMLCatalogDir(getClass.getResource("/dblp.dtd").getPath)
//val xmlloader = new XMLFilenameOnlyMappingDTDLoader(Map("dblp.dtd" -> new InputSource(getClass.getResource("/dblp.dtd").getPath)))
// val xml = xmlloader.load(f)
//XmlUtils.firstLevelNodes(s).flatMap(node => (node \\\\ "REC").flatMap(parseDroppingErrors(_)))
val s = nis.getInputStream
val inLocation = new BasicStringLocation(nis.name, Nil)
try {
XMLIgnoreDTD.load(s).flatMap(parseDroppingErrors(inLocation, _))
}
catch {
case e => {
logger.error("Failed to parse " + nis.name, e);
Nil
}
}
finally {
s.close()
}
}
}
// todo refactor / cleanup. Just cut and paste for now.
object PatentST36AuthorsReader extends Transformer[NamedInputStream, StructuredPatent] with Logging with NamedPlugin {
val name = "st36authors"
val fromType = "NamedInputStream"
val toType = "StructuredPatent"
def apply(nis: NamedInputStream): TraversableOnce[StructuredPatent] = {
//val xml = scala.xml.XML.load(f)
// val xml = XMLIgnoreDTD.load(f) // can't, because we need the entity declarations
//XMLMapDTD.setGlobalXMLCatalogDir(getClass.getResource("/dblp.dtd").getPath)
//val xmlloader = new XMLFilenameOnlyMappingDTDLoader(Map("dblp.dtd" -> new InputSource(getClass.getResource("/dblp.dtd").getPath)))
// val xml = xmlloader.load(f)
//XmlUtils.firstLevelNodes(s).flatMap(node => (node \\\\ "REC").flatMap(parseDroppingErrors(_)))
val s = nis.getInputStream
val inLocation = new BasicStringLocation(nis.name, Nil)
try {
XMLIgnoreDTD.load(s).flatMap(parseDroppingErrors(inLocation, _))
}
catch {
case e => {
logger.error("Failed to parse " + nis.name, e);
Nil
}
}
finally {
s.close()
}
}
def parseDroppingErrors(inLocation: Location, doc: Node): Option[StructuredPatent] = {
try {
val c = parse(inLocation, doc)
Some(c)
}
catch {
case e: BibMogrifyException => logger.error(e.getMessage)
None
}
}
private def parseIdentifierAndDate(d: Option[Node], ct: EventType): (Option[Identifier], Option[CitationEvent]) = {
(d map ((c: Node) => {
val id = BasicIdentifier((c \\ "doc-number").text,
Some(new BasicIdentifierAuthority(("patent-" + (c \\ "country").text.trim + "-" + ct.shortName).n)))
val date = parseDate(c)
val event = new BasicCitationEvent(date, ct)
(id, Some(event))
})).getOrElse((None, None))
}
private val parseDateR = "(....)(..)(..)".r
private def parseDate(c: Node): Option[BasicPartialDate] = {
val dateString: String = (c \\ "date").text.trim
dateString match {
case "" => None
case d => {
val parseDateR(yearS, monthS, dayS) = d
val year: Option[Int] = Some(yearS.toInt)
// val dayS: Option[String] = (doc \\ "day").text
// val day: Option[Int] = dayS.map(_.toInt)
Some(BasicPartialDate(year, parseMonthOneBased(monthS), Some(dayS.toInt)))
}
}
}
def parse(inLocation: Location, doc: Node): StructuredPatent = {
// IDs and dates are confounded in the source data; we separate and regroup them
def getIdentifiersAndDates: (Seq[Identifier], Seq[CitationEvent]) = {
val (pubId, pubEvent) = parseIdentifierAndDate((doc \\ "bibliographic-data" \\ "publication-reference" \\ "document-id").headOption,
Published) // assume exactly one
val (recId, recEvent) = parseIdentifierAndDate((doc \\ "bibliographic-data" \\ "application-reference" \\ "document-id").headOption,
Received) // assume exactly one
// ignore dates-of-public-availability for now
val ids = Seq(pubId, recId).flatten
val events = Seq(pubEvent, recEvent).flatten
(ids, events)
}
val c = new StructuredPatent() {
override val (identifiers, dates) = getIdentifiersAndDates
private val inventorNodes = (doc \\\\ "inventors" \\ "inventor") ++ (doc \\\\ "applicants" \\ "applicant" filter { _ \\\\ "@app-type" exists (_.text == "applicant-inventor") })
override val authors = inventorNodes.map {
inventorNode =>
new AuthorInRole(new Person() {
override val name = "%s %s".format((inventorNode \\\\ "first-name").text, (inventorNode \\\\ "last-name").text).opt.map {
PersonNameWithDerivations(_)
}
// this was giving me grief so I changed it (empty iterator on PersonName.combineGivenNames)
/*Some(new PersonNameWithDerivations {
override val firstName = (inventorNode \\ "first-name").text.opt
override val surNames = (inventorNode \\ "last-name").text.opt.toSet
}) */
}, Nil)
}
}
c
}
}
| iesl/bibmogrify | src/main/scala/edu/umass/cs/iesl/bibmogrify/reader/PatentST36Reader.scala | Scala | apache-2.0 | 19,281 |
// Copyright 2014,2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.scalding.examples
import commbank.grimlock.framework.{ Cell, Locate }
import commbank.grimlock.framework.content.Content
import commbank.grimlock.framework.encoding.{ DateCodec, StringCodec }
import commbank.grimlock.framework.environment.implicits._
import commbank.grimlock.framework.extract.ExtractWithDimension
import commbank.grimlock.framework.metadata.{ Dictionary, NominalSchema, NumericType }
import commbank.grimlock.framework.pairwise.Upper
import commbank.grimlock.framework.position.{ Along, Over, Coordinates2 }
import commbank.grimlock.framework.transform.Transformer
import commbank.grimlock.library.aggregate.{ Entropy, Sums }
import commbank.grimlock.library.pairwise.{ Concatenate, Plus }
import commbank.grimlock.library.squash.PreservingMinimumPosition
import commbank.grimlock.scalding.Persist
import commbank.grimlock.scalding.environment.Context
import commbank.grimlock.scalding.environment.implicits._
import com.twitter.scalding.{ Args, Job }
import scala.io.Source
import shapeless.{ HList, HNil }
import shapeless.nat.{ _0, _1, _2 }
// Simple bucketing implementation. For numerical values it generates categorical values that are the rounded up
// value. All other values are passed through.
case class CeilingBucketing[P <: HList]() extends Transformer[P, P] {
def present(cell: Cell[P]): TraversableOnce[Cell[P]] = {
val con = (cell.content.classification.isOfType(NumericType), cell.content.value.as[Double]) match {
case (true, Some(d)) => Content(NominalSchema[Long](), math.ceil(d).toLong)
case _ => cell.content
}
List(Cell(cell.position, con))
}
}
class MutualInformation(args: Args) extends Job(args) {
// Define implicit context.
implicit val ctx = Context()
import ctx.encoder
// Path to data files, output folder
val path = args.getOrElse("path", "../../data")
val output = "scalding"
// Read in the dictionary (ignoring errors).
val (dictionary, _) = Dictionary.load(Source.fromFile(s"${path}/exampleDictionary.txt"), "|")
// Read the data.
// 1/ Read the data using the supplied dictionary. This returns a 3D matrix (instance x feature x date).
// 2/ Proceed with only the data (ignoring errors).
// 3/ Squash the 3rd dimension, keeping values with minimum (earlier) coordinates. The result is a 2D matrix
// (instance x feature).
// 4/ Bucket all continuous variables by rounding them.
val data = ctx
.read(
s"${path}/exampleMutual.txt",
Persist.textLoader,
Cell.shortStringParser(StringCodec :: StringCodec :: DateCodec() :: HNil, dictionary, _1, "|")
)
.data
.squash(_2, PreservingMinimumPosition())
.transform(CeilingBucketing())
// Define extractor for extracting count from histogram count map.
val extractor = ExtractWithDimension[Coordinates2[String, String], _0, Content]
.andThenPresent(_.value.as[Double])
// Compute histogram on the data.
val mhist = data
.histogram(Along(_0))(Locate.AppendContentString, false)
// Compute count of histogram elements.
val mcount = mhist
.summarise(Over(_0))(Sums())
.gather()
// Compute sum of marginal entropy
// 1/ Compute the marginal entropy over the features.
// 2/ Compute pairwise sum of marginal entropies for all upper triangular values.
val marginal = mhist
.summariseWithValue(Over(_0))(mcount, Entropy(extractor).andThenRelocate(_.position.append("marginal").toOption))
.pair(Over(_0))(Upper, Plus(Locate.PrependPairwiseSelectedStringToRemainder(Over(_0), "%s,%s", false, "|")))
// Compute histogram on pairwise data.
// 1/ Generate pairwise values for all upper triangular values.
// 2/ Compute histogram on pairwise values.
val jhist = data
.pair(Over(_1))(
Upper,
Concatenate(Locate.PrependPairwiseSelectedStringToRemainder(Over(_1), "%s,%s", false, "|"))
)
.histogram(Along(_1))(Locate.AppendContentString, false)
// Compute count of histogram elements.
val jcount = jhist
.summarise(Over(_0))(Sums())
.gather()
// Compute joint entropy
val joint = jhist
.summariseWithValue(Over(_0))(
jcount,
Entropy(extractor, negate = true).andThenRelocate(_.position.append("joint").toOption)
)
// Generate mutual information
// 1/ Sum marginal and negated joint entropy
// 2/ Persist mutual information.
(marginal ++ joint)
.summarise(Over(_0))(Sums())
.saveAsText(ctx, s"./demo.${output}/mi.out", Cell.toShortString(true, "|"))
.toUnit
}
| CommBank/grimlock | grimlock-examples/src/main/scala/commbank/grimlock/scalding/MutualInformation.scala | Scala | apache-2.0 | 5,148 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js sbt plugin **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package scala.scalajs.sbtplugin.testing
import sbt.testing.{Event => SbtEvent, _}
class Events(taskDef: TaskDef) {
abstract class Event(val status: Status,
val throwable: OptionalThrowable = new OptionalThrowable) extends SbtEvent {
val fullyQualifiedName = taskDef.fullyQualifiedName
val fingerprint = taskDef.fingerprint
val selector = taskDef.selectors.headOption.getOrElse(new SuiteSelector)
val duration = -1L
}
case class Error(exception: Throwable) extends Event(
Status.Error, new OptionalThrowable(exception))
case class Failure(exception: Throwable) extends Event(
Status.Failure, new OptionalThrowable(exception))
case object Succeeded extends Event(Status.Success)
case object Skipped extends Event(Status.Skipped)
case object Pending extends Event(Status.Pending)
case object Ignored extends Event(Status.Ignored)
case object Canceled extends Event(Status.Canceled)
}
| swhgoon/scala-js | sbt-plugin/src/main/scala/scala/scalajs/sbtplugin/testing/Events.scala | Scala | bsd-3-clause | 1,466 |
package model.auth
import be.objectify.deadbolt.scala.models.{Permission, Role, Subject}
case class Student(identifier: String, authToken: String) extends Subject {
val roles: List[Role] = List(StudentRole)
val permissions: List[Permission] = List(StudentPermission)
}
object StudentRole extends Role {
val name: String = "student"
}
object StudentPermission extends Permission {
val value: String = "student"
}
| BandOf3/assignment-system-web | app/model/auth/Student.scala | Scala | mit | 425 |
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.
package com.microsoft.ml.spark
import com.microsoft.ml.spark.ColumnOptions.ColumnOptions
import com.microsoft.ml.spark.DataOptions.DataOptions
import org.apache.spark.mllib.random.RandomDataGenerator
import org.apache.spark.sql.Row
import scala.util.Random
/** Combines an array of row generators into a single row generator.
* @param generators
*/
class RandomRowGeneratorCombiner(generators: Array[RandomMMLGenerator[Row]]) extends RandomMMLGenerator[Row] {
override def nextValue(): Row = Row.merge(generators.map(generator => generator.nextValue()): _*)
override def copy(): RandomRowGeneratorCombiner = new RandomRowGeneratorCombiner(generators)
}
/** Randomly generates a row given the set space of data, column options.
* @param col The column generation options specifying the column type to generate.
* @param data The data generation options specifying the data to generate.
*/
class RandomRowGenerator(col: ColumnOptions, data: DataOptions) extends RandomMMLGenerator[Row] {
override def nextValue(): Row = {
if (data == DataOptions.Boolean)
Row(random.nextBoolean)
else if (data == DataOptions.Byte)
Row(random.nextByte)
else if (data == DataOptions.Double)
Row(random.nextDouble)
else if (data == DataOptions.Int)
Row(random.nextInt)
else if (data == DataOptions.Short)
Row(random.nextShort)
else if (data == DataOptions.String)
Row(random.nextString)
else if (data == DataOptions.Date)
Row(random.nextDate)
else if (data == DataOptions.Timestamp)
Row(random.nextTimestamp)
else throw new Exception("Selected type not supported: " + data)
}
override def copy(): RandomRowGenerator = new RandomRowGenerator(col, data)
}
/** Base abstract class for random generation of data.
* @tparam T The data to generate.
*/
abstract class RandomMMLGenerator[T] extends RandomDataGenerator[T] {
var seed: Long = 0
var random: GenerateDataType = new GenerateDataType(new Random(seed))
override def setSeed(seed: Long): Unit = {
random = new GenerateDataType(new Random(seed))
this.seed = seed
}
}
| rastala/mmlspark | src/core/test/datagen/src/main/scala/GenerateRow.scala | Scala | mit | 2,275 |
////////////////////////////////////////////////////////////////////////////////
// //
// OpenSolid is a generic library for the representation and manipulation //
// of geometric objects such as points, curves, surfaces, and volumes. //
// //
// Copyright 2007-2015 by Ian Mackenzie //
// [email protected] //
// //
// This Source Code Form is subject to the terms of the Mozilla Public //
// License, v. 2.0. If a copy of the MPL was not distributed with this file, //
// you can obtain one at http://mozilla.org/MPL/2.0/. //
// //
////////////////////////////////////////////////////////////////////////////////
package org.opensolid.core
import org.opensolid.core.IntervalGenerators._
import org.scalacheck._
trait VectorBounds2dGenerators {
val finiteVectorBounds2d: Gen[VectorBounds2d] =
for {
x <- finiteInterval
y <- finiteInterval
} yield VectorBounds2d(x, y)
val validVectorBounds2d: Gen[VectorBounds2d] =
for {
x <- validInterval
y <- validInterval
} yield VectorBounds2d(x, y)
val anyVectorBounds2d: Gen[VectorBounds2d] =
for {
x <- anyInterval
y <- anyInterval
} yield VectorBounds2d(x, y)
implicit val arbitraryVectorBounds2d: Arbitrary[VectorBounds2d] = Arbitrary(anyVectorBounds2d)
}
object VectorBounds2dGenerators extends VectorBounds2dGenerators
| ianmackenzie/opensolid-core | src/test/scala/org/opensolid/core/VectorBox2dGenerators.scala | Scala | mpl-2.0 | 1,778 |
package avrohugger
package format
package standard
import treehugger.forest._
import definitions._
import treehuggerDSL._
import org.apache.avro.Schema
import scala.collection.JavaConversions._
object StandardObjectTree {
def toObjectDef(
classStore: ClassStore,
schema: Schema) = {
// register new type
val classSymbol = RootClass.newClass(schema.getName + ".Value")
classStore.accept(schema, classSymbol)
OBJECTDEF(schema.getName) withParents("Enumeration") := BLOCK(
TYPEVAR(schema.getName) := REF("Value"),
VAL(schema.getEnumSymbols.mkString(", ")) := REF("Value")
)
}
}
| ppearcy/avrohugger | avrohugger-core/src/main/scala/format/standard/StandardObjectTree.scala | Scala | apache-2.0 | 635 |
package org.embulk.parser.firebase_avro.define.root.user_dim.user_properties.value
case class Value(string_value: Option[String] = None,
int_value: Option[Long] = None,
float_value: Option[Double] = None,
double_value: Option[Double] = None)
| smdmts/embulk-parser-firebase_avro | src/main/scala/org/embulk/parser/firebase_avro/define/root/user_dim/user_properties/value/Value.scala | Scala | mit | 294 |
package wakfutcp.client.util
import java.net.{InetAddress, InetSocketAddress}
import java.security.KeyFactory
import java.security.spec.X509EncodedKeySpec
import javax.crypto.Cipher
import akka.actor.typed.scaladsl._
import akka.actor.typed.{ActorRef, Behavior, Terminated}
import akka.actor.{ActorSystem, PoisonPill}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Tcp
import akka.util.ByteString
import wakfutcp.client.util.Auth.Credentials
import wakfutcp.protocol.ServerMessage
import wakfutcp.protocol.common.{Proxy, Version, WorldInfo}
import wakfutcp.protocol.messages.client.ClientDispatchAuthenticationMessage.CredentialData
import wakfutcp.protocol.messages.client._
import wakfutcp.protocol.messages.server._
import wakfutcp.protocol.util.extensions.client._
import wakfutcp.protocol.util.stream
import wakfutcp.protocol.util.stream.Recv
object GameSession {
def behavior(
address: InetSocketAddress,
credentials: Credentials,
version: Version,
onConnect: (ActorRef[GameSession.Message], ActorRef[ByteString]) => Behavior[stream.Message[ServerMessage]])(
selectWorld: Seq[(Proxy, WorldInfo)] => Proxy)(
implicit system: ActorSystem,
materializer: ActorMaterializer
): Behavior[Message] = Behaviors.setup { ctx =>
val authHandler = stream.withConnection(Auth.behavior(ctx.self, _, credentials, version, selectWorld))
ctx.system.log.debug("Connecting to auth server at {}", address)
Tcp()
.outgoingConnection(address)
.join(stream.client.flow(ctx.spawn(authHandler, "Auth")))
.run()
Behaviors.immutable { (ctx, msg) =>
msg match {
case ConnectWorld(worldServerAddress, token) =>
ctx.system.log.debug("Connecting to world server at {}", worldServerAddress)
val worldHandler =
stream.withConnection { conn =>
World.behavior(ctx.self, conn, token, version, onConnect(ctx.self, conn))
}
Tcp()
.outgoingConnection(worldServerAddress)
.join(stream.client.flow(ctx.spawn(worldHandler, "World")))
.run()
Behaviors.same
case Shutdown =>
Behaviors.stopped
}
}
}
sealed trait Message
final case class ConnectWorld(address: InetSocketAddress, token: String) extends Message
case object Shutdown extends Message
}
object Auth {
def behavior(
session: ActorRef[GameSession.Message],
connection: ActorRef[ByteString],
credentials: Credentials,
version: Version,
selectWorld: Seq[(Proxy, WorldInfo)] => Proxy
): Behavior[stream.Message[ServerMessage]] = Behaviors.immutable { (ctx, msg) =>
msg match {
case Recv(ClientIpMessage(bytes)) =>
ctx.system.log.debug("Received client user IP ({})", InetAddress.getByAddress(bytes))
connection !! ClientVersionMessage(Version.WithBuild(version, "-1"))
Behaviors.same
case Recv(ClientVersionResultMessage(success, required)) =>
ctx.system.log.debug("Version check successful: {}, required: {}", success, required)
connection !! ClientPublicKeyRequestMessage(8)
Behaviors.same
case Recv(ClientPublicKeyMessage(salt, pubKey)) =>
ctx.system.log.debug("Received public key with salt {}", salt)
val cert = new X509EncodedKeySpec(pubKey)
val keyFactory = KeyFactory.getInstance("RSA")
val cipher = Cipher.getInstance("RSA")
cipher.init(Cipher.ENCRYPT_MODE, keyFactory.generatePublic(cert))
connection !!
ClientDispatchAuthenticationMessage.create(
CredentialData(salt, credentials.login, credentials.password), cipher)
Behaviors.same
case Recv(ClientDispatchAuthenticationResultMessage(result, _)) =>
ctx.system.log.debug("Public key authentication result: {}", result)
connection !! ClientProxiesRequestMessage()
Behaviors.same
case Recv(ClientProxiesResultMessage(proxies, worlds)) =>
ctx.system.log.debug("Received client proxies list")
val choice = selectWorld(proxies.zip(worlds))
connection !! AuthenticationTokenRequestMessage(choice.id, 0)
authenticateWorld(session, connection, choice)
case other =>
ctx.system.log.debug("Unhandled auth server message: {}", other)
Behaviors.same
}
}
private def authenticateWorld(
session: ActorRef[GameSession.Message],
connection: ActorRef[ByteString],
proxy: Proxy
): Behavior[stream.Message[ServerMessage]] = Behaviors.immutable[stream.Message[ServerMessage]] { (ctx, msg) =>
msg match {
case Recv(AuthenticationTokenResultMessage.Success(token)) =>
ctx.system.log.debug("Received authentication token: {}", token)
val address = new InetSocketAddress(proxy.server.address, proxy.server.ports.head)
session ! GameSession.ConnectWorld(address, token)
connection.upcast[Any] ! PoisonPill
Behaviors.same
case Recv(AuthenticationTokenResultMessage.Failure) =>
ctx.system.log.error("Token authentication failed")
Behaviors.stopped
case other =>
ctx.system.log.debug("Unhandled auth server message: {}", other)
Behaviors.same
}
}.onSignal {
case (_, Terminated(`connection`)) =>
Behaviors.stopped
}
final case class Credentials(login: String, password: String)
}
object World {
def behavior(
session: ActorRef[GameSession.Message],
connection: ActorRef[ByteString],
token: String,
version: Version,
onConnect: Behavior[stream.Message[ServerMessage]]
): Behavior[stream.Message[ServerMessage]] = Behaviors.immutable { (ctx, msg) =>
msg match {
case Recv(ClientIpMessage(bytes)) =>
ctx.system.log.debug("Received client IP from game server: {}", InetAddress.getByAddress(bytes))
connection !! ClientVersionMessage(Version.WithBuild(version, "-1"))
Behaviors.same
case Recv(ClientVersionResultMessage(success, required)) =>
ctx.system.log.debug("Version check successful: {}, required: {}", success, required)
connection !! ClientAuthenticationTokenMessage(token)
Behaviors.same
case Recv(result: ClientAuthenticationResultsMessage) =>
ctx.system.log.debug("Client authentication result: {}", result)
Behaviors.same
case Recv(result: WorldSelectionResultMessage) =>
ctx.system.log.debug("World selection result: {}", result)
onConnect
case other =>
ctx.system.log.debug("Unhandled message during world auth: {}", other)
Behaviors.same
}
}
}
| OpenWakfu/wakfutcp | protocol/src/main/scala/wakfutcp/client/util/GameSession.scala | Scala | mit | 6,619 |
package ssscs
import java.util.Date
case class ArticleInfo(url: String, title: String, date: Date)
| raincole/ssscs | src/main/scala/ssscs/ArticleInfo.scala | Scala | mit | 101 |
package org.jetbrains.plugins.scala.caches.stats
trait TrackedCacheType {
type Cache
def id: String
def name: String
def alwaysTrack: Boolean
def capabilities: CacheCapabilities[Cache]
def tracked: collection.Seq[Cache]
def cachedEntityCount: Int
def clear(): Unit
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/caches/stats/TrackedCacheType.scala | Scala | apache-2.0 | 285 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.text.inflector
import java.util.Collections
import java.util.Locale
import java.util.regex.Matcher
import java.util.regex.Pattern
import RuleBasedPluralizer._
object RuleBasedPluralizer {
class IdentityPluralizer extends Pluralizer {
def pluralize(word: String): String = word
def pluralize(word: String, number: Int): String = word
}
private val IDENTITY_PLURALIZER = new IdentityPluralizer()
private var pattern: Pattern = Pattern.compile("\\\\A(\\\\s*)(.+?)(\\\\s*)\\\\Z")
private var pluPattern1: Pattern = Pattern.compile("^\\\\p{Lu}+$")
private var pluPattern2: Pattern = Pattern.compile("^\\\\p{Lu}.*")
}
/**
* RuleBasedPluralizer class.
*
* @author chaostone
*/
class RuleBasedPluralizer(var rules: List[Rule], var locale: Locale, var fallbackPluralizer: Pluralizer)
extends Pluralizer {
/**
* Constructs a pluralizer with an empty list of rules. Use the setters to configure.
*/
def this() = {
this(List.empty, Locale.getDefault, null)
}
/**
* Constructs a pluralizer that uses a list of rules then an identity {@link Pluralizer} if none
* of the rules match. This is useful to build your own {@link Pluralizer} from scratch.
*
* @param rules the rules to apply in order
* @param locale the locale specifying the language of the pluralizer
*/
def this(rules: List[Rule], locale: Locale) = {
this(rules, locale, IDENTITY_PLURALIZER)
}
/**
* Converts a noun or pronoun to its plural form.
* This method is equivalent to calling <code>pluralize(word, 2)</code>.
* The return value is not defined if this method is passed a plural form.
*/
def pluralize(word: String): String = pluralize(word, 2)
/**
* Converts a noun or pronoun to its plural form for the given number of instances. If
* <code>number</code> is 1, <code>word</code> is returned unchanged.
* The return value is not defined if this method is passed a plural form.
*/
def pluralize(word: String, number: Int): String = {
if (number == 1)
return word
val matcher = pattern.matcher(word)
if (matcher.matches()) {
val pre = matcher.group(1)
val trimmedWord = matcher.group(2)
val post = matcher.group(3)
val plural = pluralizeInternal(trimmedWord)
if (plural == null)
return fallbackPluralizer.pluralize(word, number)
return pre + postProcess(trimmedWord, plural) + post
}
word
}
/**
* Goes through the rules in turn until a match is found at which point the rule is applied and
* the result returned. If no rule matches, returns <code>null</code>.
*
* @param word a singular noun
* @return the plural form of the noun, or <code>null</code> if no rule matches
*/
protected def pluralizeInternal(word: String): String =
rules.find(_.applies(word)).map(_.apply(word)).getOrElse(null)
/**
* <p>
* Apply processing to <code>pluralizedWord</code>. This implementation ensures the case of the
* plural is consistent with the case of the input word.
* </p>
* <p>
* If <code>trimmedWord</code> is all uppercase, then <code>pluralizedWord</code> is uppercased.
* If <code>trimmedWord</code> is titlecase, then <code>pluralizedWord</code> is titlecased.
* </p>
*
* @param trimmedWord the input word, with leading and trailing whitespace removed
* @param pluralizedWord the pluralized word
* @return the <code>pluralizedWord</code> after processing
*/
protected def postProcess(trimmedWord: String, pluralizedWord: String): String = {
if (pluPattern1.matcher(trimmedWord).matches())
return pluralizedWord.toUpperCase(locale)
else if (pluPattern2.matcher(trimmedWord).matches())
return pluralizedWord.substring(0, 1).toUpperCase(locale) + pluralizedWord.substring(1)
pluralizedWord
}
}
| beangle/commons | text/src/main/scala/org/beangle/commons/text/inflector/RuleBasedPluralizer.scala | Scala | lgpl-3.0 | 4,564 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package kafka.api
import kafka.zk.ZooKeeperTestHarness
import org.junit.{After, Before}
trait SaslTestHarness extends ZooKeeperTestHarness with SaslSetup {
protected val zkSaslEnabled: Boolean
@Before
override def setUp() {
if (zkSaslEnabled)
startSasl(Both)
else
startSasl(KafkaSasl)
super.setUp
}
@After
override def tearDown() {
super.tearDown
closeSasl()
}
}
| samaitra/kafka | core/src/test/scala/integration/kafka/api/SaslTestHarness.scala | Scala | apache-2.0 | 1,210 |
package fpinscala.parallelism
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory, Future => JFuture}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfter, EitherValues, FlatSpec, Matchers}
import scala.concurrent.{Future => SFuture}
/**
* Created by benen on 03/08/17.
*/
class NonBlockingParSpec extends FlatSpec with Matchers with BeforeAndAfter with EitherValues {
val asyncThreadCount = new AtomicInteger
val threadFactory: ThreadFactory =
(r: Runnable) => {
asyncThreadCount.incrementAndGet
Executors.defaultThreadFactory.newThread(r)
}
var executorService: ExecutorService = _
before {
asyncThreadCount.set(0)
// note that sequence() will not work if we do not provide enough parallel threads! (see exercise 7.9)
executorService = Executors.newCachedThreadPool(threadFactory)
}
behavior of "7.10 error handling"
it should "register callbacks correctly" in {
//given
val succeeding = Nonblocking.Par.delay[Int](5)
//When
val result = Nonblocking.Par.run(executorService)(succeeding)
// then
// result.right.value shouldBe 5
}
it should "recover from errors correctly" in {
//given
val failing = Nonblocking.Par.delay[Int]{throw new RuntimeException("Failing") }
//When
val result = Nonblocking.Par.run(executorService)(failing)
// then
// result.left.value shouldBe a[RuntimeException]
}
}
| benen/fpinscala | exercises/src/test/scala/fpinscala/parallelism/NonBlockingParSpec.scala | Scala | mit | 1,566 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.akkastream.task
import org.apache.gearpump.Message
import org.apache.gearpump.akkastream.task.Zip2Task.ZipFunction
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext
class Zip2Task[A1, A2, OUT](context: TaskContext, userConf : UserConfig)
extends GraphTask(context, userConf) {
val zip = userConf.
getValue[ZipFunction[A1, A2, OUT]](Zip2Task.ZIP2_FUNCTION)(context.system).get.zip
var a1: Option[A1] = None
var a2: Option[A2] = None
override def onNext(msg: Message) : Unit = {
val message = msg.value
val time = msg.timestamp
a1 match {
case Some(x) =>
a2 = Some(message.asInstanceOf[A2])
a1.foreach(v1 => {
a2.foreach(v2 => {
val out = zip(v1, v2)
context.output(Message(out.asInstanceOf[OUT], time))
})
})
case None =>
a1 = Some(message.asInstanceOf[A1])
}
}
}
object Zip2Task {
case class ZipFunction[A1, A2, OUT](val zip: (A1, A2) => OUT) extends Serializable
val ZIP2_FUNCTION = "org.apache.gearpump.akkastream.task.zip2.function"
}
| manuzhang/incubator-gearpump | experiments/akkastream/src/main/scala/org/apache/gearpump/akkastream/task/Zip2Task.scala | Scala | apache-2.0 | 1,955 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.account;
import java.io.File
import javax.xml.parsers.DocumentBuilder
import javax.xml.parsers.DocumentBuilderFactory
import org.w3c.dom.Document
import org.w3c.dom.Element
import org.w3c.dom.Node
import org.w3c.dom.NodeList
import scouter.server.util.XmlUtil
import scouter.lang.Account
import scouter.util.StringKeyLinkedMap
import scouter.util.ArrayUtil
import scouter.server.util.EnumerScala
object AccountFileHandler {
val TAG_ACCOUNTS = "Accounts";
val TAG_ACCOUNT = "Account";
val TAG_EMAIL = "Email";
val ATTR_ID = "id";
val ATTR_PASS = "pass";
val ATTR_GROUP = "group";
def parse(file: File): StringKeyLinkedMap[Account] = {
val accountMap = new StringKeyLinkedMap[Account]();
val docBuilderFactory = DocumentBuilderFactory.newInstance();
val docBuilder = docBuilderFactory.newDocumentBuilder();
val doc = docBuilder.parse(file);
doc.getDocumentElement().normalize();
val accountList = doc.getElementsByTagName(TAG_ACCOUNT);
EnumerScala.foreach(accountList, (account: Node) => {
if (account.getNodeType() == Node.ELEMENT_NODE) {
val acObj = new Account();
val accountElement = account.asInstanceOf[Element];
acObj.id = accountElement.getAttribute(ATTR_ID);
acObj.password = accountElement.getAttribute(ATTR_PASS);
acObj.group = accountElement.getAttribute(ATTR_GROUP);
acObj.email = extractTextValue(accountElement, TAG_EMAIL);
accountMap.put(acObj.id, acObj);
}
})
return accountMap;
}
def addAccount(file: File, account: Account) {
val docBuilderFactory = DocumentBuilderFactory.newInstance();
val docBuilder = docBuilderFactory.newDocumentBuilder();
val doc = docBuilder.parse(file);
doc.getDocumentElement().normalize();
val accounts = doc.getElementsByTagName(TAG_ACCOUNTS).item(0);
val accountEle = doc.createElement(TAG_ACCOUNT);
accountEle.setAttribute(ATTR_ID, account.id);
accountEle.setAttribute(ATTR_PASS, account.password);
accountEle.setAttribute(ATTR_GROUP, account.group);
val emailEle = doc.createElement(TAG_EMAIL);
emailEle.setTextContent(account.email);
accountEle.appendChild(emailEle);
accounts.appendChild(accountEle);
XmlUtil.writeXmlFileWithIndent(doc, file, 2);
}
def editAccount(file: File, account: Account) {
val docBuilderFactory = DocumentBuilderFactory.newInstance();
val docBuilder = docBuilderFactory.newDocumentBuilder();
val doc = docBuilder.parse(file);
doc.getDocumentElement().normalize();
val nodeList = doc.getElementsByTagName(TAG_ACCOUNT);
EnumerScala.foreach(nodeList, (node: Node) => {
if (node.getNodeType() == Node.ELEMENT_NODE) {
val element = node.asInstanceOf[Element];
val id = element.getAttribute(ATTR_ID);
if (account.id.equals(id)) {
element.setAttribute(ATTR_PASS, account.password);
element.setAttribute(ATTR_GROUP, account.group);
element.getElementsByTagName(TAG_EMAIL).item(0).setTextContent(account.email);
XmlUtil.writeXmlFileWithIndent(doc, file, 2);
return ;
}
}
})
throw new Exception("Cannot find account id : " + account.id);
}
private def extractTextValue(alertElement: Element, tagName: String): String = {
val nodeList = alertElement.getElementsByTagName(tagName);
if (ArrayUtil.len(nodeList) == 0) {
return null;
}
val objTypeElement = nodeList.item(0).asInstanceOf[Element];
if (objTypeElement == null) null else objTypeElement.getTextContent();
}
} | jw0201/scouter | scouter.server/src/scouter/server/account/AccountFileHandler.scala | Scala | apache-2.0 | 4,646 |
/**
Scalding with Avro (and Json) tutorial part 0.
To run this job:
yarn jar target/scalding-tutorial-0.14.0.jar AvroTutorial0 --local --avro --json
Check the output:
java -jar avro-tools-1.7.6.jar tojson tutorial/data/avrooutput0.avro
**/
import com.twitter.scalding.{Job, Args, JsonLine}
import com.twitter.scalding.avro.UnpackedAvroSource
import org.apache.avro.Schema
class AvroTutorial0(args: Args) extends Job(args) {
val schema = """{
"type": "record", "name": "parseJson", "fields": [
{ "name": "sessionId", "type": "string" },
{ "name": "optionalField", "type": ["string", "null"] }
] }"""
JsonLine("data/session.json", ('sessionId, 'optionalField)).read
.write(UnpackedAvroSource("target/data/avrooutput0.avro", new Schema.Parser().parse(schema)))
}
| Cascading/scalding-tutorial | src/main/scala/tutorial/AvroTutorial0.scala | Scala | apache-2.0 | 780 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import cmwell.domain._
import org.joda.time.DateTime
import org.scalatest.{FlatSpec, Matchers}
import scala.language.postfixOps
/**
* Created with IntelliJ IDEA.
* User: markz
* Date: 1/13/13
* Time: 3:15 PM
* Testing infoton serializer code
*/
class InfotonSerializerSpec extends FlatSpec with Matchers {
def serialize2Anddeserialize2(i: Infoton): Infoton = {
val (uuid,rows) = InfotonSerializer.serialize2(i)
val it = rows.view.flatMap {
case (q,fields) => fields.view.sortBy(_._1).flatMap{
case (fieldName,values) => values.view.sortBy(_._1).map(value => (q,fieldName,value))
}
}.iterator
InfotonSerializer.deserialize2(uuid,it)
}
"very big infoton" should "be successful" in {
val x: Set[FieldValue] = {
val b = Set.newBuilder[FieldValue]
b.sizeHint(100000)
for(i <- 0 until 100000) {
b += FString(s"cmwell://name/$i")
}
b.result()
}
val objInfo = new ObjectInfoton(path = "/command-test/objinfo1", dc = "test", fields = Option(Map[String,Set[FieldValue]]("g" -> Set(FString("h")),"last" -> Set(FString("zitnik")) , "name" -> x )))
InfotonSerializer.deserialize(InfotonSerializer.serialize(objInfo)) shouldEqual objInfo
serialize2Anddeserialize2(objInfo) shouldEqual objInfo
}
"object infoton serializer" should "be successful" in {
val objInfo = ObjectInfoton("/command-test/objinfo1","dc_test", None, Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))))
val objInfoCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(objInfo))
// check system
objInfo.path should equal(objInfoCmp.path)
objInfo.uuid should equal(objInfoCmp.uuid)
objInfo.lastModified should equal(objInfoCmp.lastModified)
// check fields
objInfo.fields.get("name").size should equal(objInfoCmp.fields.get("name").size)
}
"empty file infoton serializer" should "be successful" in {
val fc = FileContent("text/plain",0)
val emptyInfo = FileInfoton("/command-test/objinfo1","dc_test", None, Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))) , fc )
val emptyInfoCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(emptyInfo))
emptyInfo.path should equal (emptyInfoCmp.path)
emptyInfo.uuid should equal (emptyInfoCmp.uuid)
emptyInfo.lastModified should equal (emptyInfoCmp.lastModified)
}
"file binary infoton serializer" should "be successful" in {
val source = scala.io.Source.fromFile("./src/test/resources/mascot.jpg" ,"iso-8859-1")
val byteArray = source.map(_.toByte).toArray
source.close()
val s = byteArray
val img : FileContent = FileContent(s, "image/jpeg;charset=iso-8859-1")
val imgInfo = FileInfoton("/command-test/objinfo1","dc_test", None, Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))) , img )
val imgInfoCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(imgInfo))
val imgInfoCmp2 = serialize2Anddeserialize2(imgInfo)
// check system
imgInfo.path should equal (imgInfoCmp.path)
imgInfo.uuid should equal (imgInfoCmp.uuid)
imgInfo.lastModified should equal (imgInfoCmp.lastModified)
imgInfo.path should equal (imgInfoCmp2.path)
imgInfo.uuid should equal (imgInfoCmp2.uuid)
imgInfo.lastModified should equal (imgInfoCmp2.lastModified)
// check fields
imgInfo.fields.get("name").size should equal (imgInfoCmp.fields.get("name").size)
imgInfo.fields.get("name").size should equal (imgInfoCmp2.fields.get("name").size)
(imgInfoCmp: @unchecked) match {
case FileInfoton(_,_,_,_,fields , content, _ ) =>
content.get match {
case FileContent(data,mimeType,_,_) =>
val d = data.get
d should equal (s)
"image/jpeg;charset=iso-8859-1" should equal (mimeType)
}
}
(imgInfoCmp2: @unchecked) match {
case FileInfoton(_,_,_,_,fields , content, _ ) =>
content.get match {
case FileContent(data,mimeType,_,_) =>
val d = data.get
d should equal (s)
"image/jpeg;charset=iso-8859-1" should equal (mimeType)
}
}
}
"file text infoton serializer" should "be successful" in {
val source = scala.io.Source.fromFile("./src/test/resources/test.txt" ,"UTF-8")
val byteArray = source.map(_.toByte).toArray
source.close()
val s = byteArray
val text : FileContent = FileContent(s, "text/plain;charset=utf-8")
val textInfo = FileInfoton("/command-test/objinfo1","dc_test", None, Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))) , text )
val textInfoCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(textInfo))
val textInfoCmp2 = serialize2Anddeserialize2(textInfo)
// check system
textInfo.path should equal (textInfoCmp.path)
textInfo.uuid should equal (textInfoCmp.uuid)
textInfo.lastModified should equal (textInfoCmp.lastModified)
textInfo.path should equal (textInfoCmp2.path)
textInfo.uuid should equal (textInfoCmp2.uuid)
textInfo.lastModified should equal (textInfoCmp2.lastModified)
// check fields
textInfo.fields.get("name").size should equal (textInfoCmp.fields.get("name").size)
textInfo.fields.get("name").size should equal (textInfoCmp2.fields.get("name").size)
(textInfoCmp: @unchecked) match {
case FileInfoton(_,_,_,_,fields , content, _ ) =>
content.get match {
case FileContent(data,mimeType,_,_) =>
val d = data.get
d should equal (s)
"text/plain;charset=utf-8" should equal (mimeType)
}
}
(textInfoCmp2: @unchecked) match {
case FileInfoton(_,_,_,_,fields , content, _ ) =>
content.get match {
case FileContent(data,mimeType,_,_) =>
val d = data.get
d should equal (s)
"text/plain;charset=utf-8" should equal (mimeType)
}
}
}
// TODO: make this configurable
val chunkSize = 65536
"big file infoton with % chunkSize != 0" should "be successful" in {
val bArr = Array.tabulate[Byte](chunkSize + chunkSize + 12345)(_.&(0xff).toByte)
val data : FileContent = FileContent(bArr, "application/octet-stream")
val fInf = FileInfoton("/command-test/fileinfo1","dc_test", None, Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))), data)
val dataInfoCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(fInf))
val dataInfoCmp2 = serialize2Anddeserialize2(fInf)
// check system
fInf.path should equal (dataInfoCmp.path)
fInf.uuid should equal (dataInfoCmp.uuid)
fInf.lastModified should equal (dataInfoCmp.lastModified)
fInf.path should equal (dataInfoCmp2.path)
fInf.uuid should equal (dataInfoCmp2.uuid)
fInf.lastModified should equal (dataInfoCmp2.lastModified)
// check fields
fInf.fields.get("name").size should equal (dataInfoCmp.fields.get("name").size)
fInf.fields.get("name").size should equal (dataInfoCmp2.fields.get("name").size)
(dataInfoCmp: @unchecked) match {
case FileInfoton(_,_,_,_,_,content,_) =>
content.get match {
case FileContent(binData,mimeType,_,_) =>
val d = binData.get
d should equal (bArr)
"application/octet-stream" should equal (mimeType)
}
}
(dataInfoCmp2: @unchecked) match {
case FileInfoton(_,_,_,_,_,content,_) =>
content.get match {
case FileContent(binData,mimeType,_,_) =>
val d = binData.get
d should equal (bArr)
"application/octet-stream" should equal (mimeType)
}
}
}
"big file infoton with % chunkSize == 0" should "be successful" in {
val bArr = Array.tabulate[Byte](2*chunkSize)(_.&(0xff).toByte)
val data : FileContent = FileContent(bArr, "application/octet-stream")
val fInf = FileInfoton("/command-test/fileinfo1","dc_test", None, Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))), data)
val dataInfoCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(fInf))
val dataInfoCmp2 = serialize2Anddeserialize2(fInf)
// check system
fInf.path should equal (dataInfoCmp.path)
fInf.uuid should equal (dataInfoCmp.uuid)
fInf.lastModified should equal (dataInfoCmp.lastModified)
fInf.path should equal (dataInfoCmp2.path)
fInf.uuid should equal (dataInfoCmp2.uuid)
fInf.lastModified should equal (dataInfoCmp2.lastModified)
// check fields
fInf.fields.get("name").size should equal (dataInfoCmp.fields.get("name").size)
fInf.fields.get("name").size should equal (dataInfoCmp2.fields.get("name").size)
(dataInfoCmp: @unchecked) match {
case FileInfoton(_,_,_,_,_,content,_) =>
content.get match {
case FileContent(binData,mimeType,_,_) =>
val d = binData.get
d should equal (bArr)
"application/octet-stream" should equal (mimeType)
}
}
(dataInfoCmp2: @unchecked) match {
case FileInfoton(_,_,_,_,_,content,_) =>
content.get match {
case FileContent(binData,mimeType,_,_) =>
val d = binData.get
d should equal (bArr)
"application/octet-stream" should equal (mimeType)
}
}
}
"link infoton serializer" should "be successful" in {
val forward = LinkInfoton("/command-test/objinfo1","dc_test" , Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))) , "/mark" , LinkType.Forward )
val forwardCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(forward))
// check system
forward.path should equal (forwardCmp.path)
forward.uuid should equal (forwardCmp.uuid)
forward.lastModified should equal (forwardCmp.lastModified)
// check link
(forwardCmp: @unchecked) match {
case LinkInfoton(_,_,_,_,_,t,lt,_) => {
forward.linkTo should equal (t)
forward.linkType should equal (lt)
}
}
// check fields
forward.fields.get("name").size should equal (forwardCmp.fields.get("name").size)
val per = LinkInfoton("/command-test/objinfo1","dc_test", Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))) , "/mark" , LinkType.Permanent )
val perCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(per))
// check system
per.path should equal (perCmp.path)
per.uuid should equal (perCmp.uuid)
per.lastModified should equal (perCmp.lastModified)
// check link
(perCmp: @unchecked) match {
case LinkInfoton(_,_,_,_,_,t,lt,_ ) => {
per.linkTo should equal (t)
per.linkType should equal (lt)
}
}
// check fields
per.fields.get("name").size should equal (perCmp.fields.get("name").size)
val temp = LinkInfoton("/command-test/objinfo1","dc_test" , Map("name" -> Set[FieldValue](FString("gal"), FString("yoav"))) , "/mark" , LinkType.Temporary )
val tempCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(temp))
// check system
temp.path should equal (tempCmp.path)
temp.uuid should equal (tempCmp.uuid)
temp.lastModified should equal (tempCmp.lastModified)
// check link
(tempCmp: @unchecked) match {
case LinkInfoton(_,_,_,_,_,t,lt,_ ) => {
temp.linkTo should equal (t)
temp.linkType should equal (lt)
}
}
// check fields
temp.fields.get("name").size should equal (tempCmp.fields.get("name").size)
}
"deleted infoton serializer" should "be successful" in {
val deleted = DeletedInfoton("/command-test/delete","dc_test")
val deletedCmp = InfotonSerializer.deserialize(InfotonSerializer.serialize(deleted))
deleted.path should equal (deletedCmp.path)
deleted.lastModified should equal (deletedCmp.lastModified)
}
"diffetent infotons with same fields" should "return isSameAs==true" in {
val infoton1 = ObjectInfoton("/pathOfInfoton1","dc_test", None, new DateTime("2015-03-04T12:51:39.000Z"), Map("Mark"->Set[FieldValue](FString("King"),FString("Awesome"))))
val infoton2 = ObjectInfoton("/pathOfInfoton2","dc_test", None, new DateTime("2001-02-03T09:34:21.000Z"), Map("Mark"->Set[FieldValue](FString("Awesome"),FString("King"))))
(infoton1 isSameAs infoton2) should equal (true)
}
}
| nruppin/CM-Well | server/cmwell-domain/src/test/scala/InfotonSerializerSpec.scala | Scala | apache-2.0 | 12,941 |
/*
* Copyright © 2017 Full 360 Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package com.full360.prometheus
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{ BeforeAndAfterEach, Matchers, WordSpecLike }
trait BaseSpec extends WordSpecLike with BeforeAndAfterEach with MockitoSugar with Matchers {
def provide: AfterWord = afterWord("provide")
override protected def afterEach(): Unit = Prometheus.clearRegistry()
override protected def beforeEach(): Unit = registryShouldBe("")
def registryShouldBe(registry: String): Unit = Prometheus.getRegistry shouldBe registry
}
| full360/prometheus_client_scala | client-core/src/test/scala/com/full360/prometheus/BaseSpec.scala | Scala | mit | 1,613 |
package scalaz.contrib
package nscala_time
import scalaz._
import scalaz.syntax.enum._
import org.joda.time._
trait Instances {
private def orderFromInt[A](f: (A, A) => Int): Order[A] = new Order[A] {
def order(x: A, y: A) = Ordering.fromInt(f(x, y))
}
implicit val durationInstance = new Monoid[Duration] with Order[Duration] {
override def zero = org.joda.time.Duration.ZERO
override def append(f1: Duration, f2: ⇒ Duration) = f1.withDurationAdded(f2, 1)
override def order(a1: Duration, a2: Duration) = Ordering.fromInt(a1 compareTo a2)
}
implicit val periodInstance = new Monoid[Period] with Equal[Period] {
override val zero = Period.ZERO
override def append(f1: Period, f2: ⇒ Period) = new com.github.nscala_time.time.RichPeriod(f1) + f2
override def equal(a1: Period, a2: Period) = a1 == a2
}
implicit val yearsInstance = new Monoid[Years] with Order[Years] {
def order(x: Years, y: Years) = Ordering.fromInt(x compareTo y)
def append(x: Years, y: => Years) = x plus y
val zero = Years.ZERO
}
implicit val monthsInstance = new Monoid[Months] with Order[Months] {
def order(x: Months, y: Months) = Ordering.fromInt(x compareTo y)
def append(x: Months, y: => Months) = x plus y
val zero = Months.ZERO
}
implicit val weeksInstance = new Monoid[Weeks] with Order[Weeks] {
def order(x: Weeks, y: Weeks) = Ordering.fromInt(x compareTo y)
def append(x: Weeks, y: => Weeks) = x plus y
val zero = Weeks.ZERO
}
implicit val daysInstance = new Monoid[Days] with Order[Days] {
def order(x: Days, y: Days) = Ordering.fromInt(x compareTo y)
def append(x: Days, y: => Days) = x plus y
val zero = Days.ZERO
}
implicit val hoursInstance = new Monoid[Hours] with Order[Hours] {
def order(x: Hours, y: Hours) = Ordering.fromInt(x compareTo y)
def append(x: Hours, y: => Hours) = x plus y
val zero = Hours.ZERO
}
implicit val minutesInstance = new Monoid[Minutes] with Order[Minutes] {
def order(x: Minutes, y: Minutes) = Ordering.fromInt(x compareTo y)
def append(x: Minutes, y: => Minutes) = x plus y
val zero = Minutes.ZERO
}
implicit val secondsInstance = new Monoid[Seconds] with Order[Seconds] {
def order(x: Seconds, y: Seconds) = Ordering.fromInt(x compareTo y)
def append(x: Seconds, y: => Seconds) = x plus y
val zero = Seconds.ZERO
}
implicit val intervalInstance = new Semigroup[Interval] with Equal[Interval] {
def append(x: Interval, y: => Interval) = Option(x overlap y) getOrElse new Interval(0, 0)
def equal(x: Interval, y: Interval) = x == y
}
implicit val yearMonthInstance = orderFromInt[YearMonth](_ compareTo _)
implicit val monthDayInstance = orderFromInt[MonthDay](_ compareTo _)
implicit val instantInstance = orderFromInt[Instant](_ compareTo _)
implicit val localTimeInstance = orderFromInt[LocalTime](_ compareTo _)
implicit val dateTimeInstance = new Enum[DateTime] {
override def order(x: DateTime, y: DateTime): Ordering =
Ordering.fromInt(x compareTo y)
override def pred(a: DateTime): DateTime = a.minusDays(1)
override def succ(a: DateTime): DateTime = a.plusDays(1)
}
implicit val localDateInstance = new Enum[LocalDate] {
override def order(x: LocalDate, y: LocalDate): Ordering =
Ordering.fromInt(x compareTo y)
override def pred(a: LocalDate): LocalDate = a.minusDays(1)
override def succ(a: LocalDate): LocalDate = a.plusDays(1)
}
implicit val localDateTimeInstance = new Enum[LocalDateTime] {
override def order(x: LocalDateTime, y: LocalDateTime): Ordering =
Ordering.fromInt(x compareTo y)
override def pred(a: LocalDateTime): LocalDateTime = a.minusDays(1)
override def succ(a: LocalDateTime): LocalDateTime = a.plusDays(1)
}
}
// vim: expandtab:ts=2:sw=2
| typelevel/scalaz-contrib | nscala-time/main/scala/instances.scala | Scala | mit | 3,865 |
package com.clarifi.reporting
/** A G-algebra for "running" a value in G
* where G is usually some monad.
*
* @note identity law: run(fa) == run(run(fa).point[G]) | Applicative[G]
* @note distributive law: run(f)(run(fa)) == run(fa <*> f) | Apply[G]
*/
trait Run[G[_]] {
def run[A](a: G[A]): A
}
| ermine-language/ermine-legacy | src/main/scala/com/clarifi/reporting/Run.scala | Scala | bsd-2-clause | 309 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.lemma
trait Lemmatizer {
def lemmatize(word:String): String
}
object NoopLemmatizer extends Lemmatizer {
def lemmatize(word:String): String = word
}
| melisabok/factorie | src/main/scala/cc/factorie/app/nlp/lemma/Lemmatizer.scala | Scala | apache-2.0 | 931 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.scalalib
import org.junit.Test
import org.junit.Assert._
import org.junit.Assume._
import scala.collection.immutable.NumericRange
import scala.math.BigDecimal
import org.scalajs.testsuite.utils.Platform._
class RangesTest {
@Test def iterableRangeLinks_Issue650(): Unit = {
Iterable.range(1, 10)
}
@Test def iterableRangeAndSimpleRangeAreEqual(): Unit = {
// Mostly to exercise more methods of ranges for dce warnings
assertEquals((0 until 10).toList, Iterable.range(0, 10).toList)
}
@Test def numericRangeOverflow_Issue2407(): Unit = {
val nr = NumericRange(Int.MinValue, Int.MaxValue, 1 << 23)
assertEquals(Int.MinValue, nr.sum)
}
@Test def rangeForeach_Issue2409(): Unit = {
val r = Int.MinValue to Int.MaxValue by (1 << 23)
var i = 0
r.foreach(_ => i += 1)
assertEquals(512, i)
assertEquals(512, r.length)
assertEquals(Int.MinValue, r.sum)
}
@Test def rangeToString_Issue2412(): Unit = {
if (scalaVersion.startsWith("2.11.")) {
assertEquals("Range(1, 3, 5, 7, 9)", (1 to 10 by 2).toString)
assertEquals("Range()", (1 until 1 by 2).toString)
assertTrue(
(BigDecimal(0.0) to BigDecimal(1.0)).toString.startsWith("scala.collection.immutable.Range$Partial"))
assertEquals("Range(0, 1)", (0 to 1).toString)
} else {
assertEquals("inexact Range 1 to 10 by 2", (1 to 10 by 2).toString)
assertEquals("empty Range 1 until 1 by 2", (1 until 1 by 2).toString)
assertEquals("Range requires step", (BigDecimal(0.0) to BigDecimal(1.0)).toString)
assertEquals("Range 0 to 1", (0 to 1).toString)
}
}
@Test def numericRangeToString_Issue2412(): Unit = {
if (scalaVersion.startsWith("2.11.")) {
assertEquals("NumericRange(0, 2, 4, 6, 8, 10)",
NumericRange.inclusive(0, 10, 2).toString())
assertEquals("NumericRange(0, 2, 4, 6, 8)",
NumericRange(0, 10, 2).toString)
} else {
assertEquals("NumericRange 0 to 10 by 2",
NumericRange.inclusive(0, 10, 2).toString())
assertEquals("NumericRange 0 until 10 by 2",
NumericRange(0, 10, 2).toString)
}
}
@Test def numericRangeWithArbitraryIntegral(): Unit = {
// This is broken in Scala JVM up to (including) 2.11.8, 2.12.1 (SI-10086).
assumeFalse("Assumed not on JVM for 2.12.1",
executingInJVM && scalaVersion == "2.12.1")
// Our custom integral type.
case class A(v: Int)
implicit object aIsIntegral extends scala.math.Integral[A] {
def compare(x: A, y: A): Int = x.v.compare(y.v)
def fromInt(x: Int): A = A(x)
def minus(x: A, y: A): A = A(x.v - y.v)
def negate(x: A): A = A(-x.v)
def plus(x: A, y: A): A = A(x.v + y.v)
def times(x: A, y: A): A = A(x.v * y.v)
def quot(x: A, y: A): A = A(x.v / y.v)
def rem(x: A, y: A): A = A(x.v % y.v)
def toDouble(x: A): Double = x.v.toDouble
def toFloat(x: A): Float = x.v.toFloat
def toInt(x: A): Int = x.v
def toLong(x: A): Long = x.v.toLong
def parseString(str: String): Option[A] = Some(A(str.toInt))
}
val r = NumericRange(A(1), A(10), A(1))
assertEquals(A(1), r.min)
assertEquals(A(9), r.max)
// Also test with custom ordering.
assertEquals(A(9), r.min(aIsIntegral.reverse))
assertEquals(A(1), r.max(aIsIntegral.reverse))
}
}
| scala-js/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/scalalib/RangesTest.scala | Scala | apache-2.0 | 3,668 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events.Event
import org.scalatest.time.Span
import org.scalatest.time.Millis
import org.scalatest.prop.Tables
import org.scalatest.events.TestSucceeded
import org.scalatest.tools.TestSortingReporter
trait TestTimeoutExpectedResults extends EventHelpers { s: ParallelTestExecution =>
def assertTestTimeoutTest(events: List[Event])
val holdTestSucceededName: String
val holdUntilEventCount: Int
override def sortingTimeout: Span = Span(300, Millis)
var holdingReporter: TestHoldingReporter = null
}
object ParallelTestExecutionTestTimeoutExamples extends Tables {
def testTimeoutSpec = new ExampleParallelTestExecutionTestTimeoutSpec()
def testTimeoutFixtureSpec = new ExampleParallelTestExecutionTestTimeoutFixtureSpec()
def testTimeoutFunSuite = new ExampleParallelTestExecutionTestTimeoutFunSuite()
def testTimeoutFixtureFunSuite = new ExampleParallelTestExecutionTestTimeoutFixtureFunSuite()
def testTimeoutFunSpec = new ExampleParallelTestExecutionTestTimeoutFunSpec()
def testTimeoutFixtureFunSpec = new ExampleParallelTestExecutionTestTimeoutFixtureFunSpec()
def testTimeoutFeatureSpec = new ExampleParallelTestExecutionTestTimeoutFeatureSpec()
def testTimeoutFixtureFeatureSpec = new ExampleParallelTestExecutionTestTimeoutFixtureFeatureSpec()
def testTimeoutFlatSpec = new ExampleParallelTestExecutionTestTimeoutFlatSpec()
def testTimeoutFixtureFlatSpec = new ExampleParallelTestExecutionTestTimeoutFixtureFlatSpec()
def testTimeoutFreeSpec = new ExampleParallelTestExecutionTestTimeoutFreeSpec()
def testTimeoutFixtureFreeSpec = new ExampleParallelTestExecutionTestTimeoutFixtureFreeSpec()
def testTimeoutPropSpec = new ExampleParallelTestExecutionTestTimeoutPropSpec()
def testTimeoutFixturePropSpec = new ExampleParallelTestExecutionTestTimeoutFixturePropSpec()
def testTimeoutWordSpec = new ExampleParallelTestExecutionTestTimeoutWordSpec()
def testTimeoutFixtureWordSpec = new ExampleParallelTestExecutionTestTimeoutFixtureWordSpec()
def testTimeoutExamples =
Table(
"suite1",
testTimeoutSpec,
testTimeoutFixtureSpec,
testTimeoutFunSuite,
testTimeoutFixtureFunSuite,
testTimeoutFunSpec,
testTimeoutFixtureFunSpec,
testTimeoutFeatureSpec,
testTimeoutFixtureFeatureSpec,
testTimeoutFlatSpec,
testTimeoutFixtureFlatSpec,
testTimeoutFreeSpec,
testTimeoutFixtureFreeSpec,
testTimeoutPropSpec,
testTimeoutFixturePropSpec,
testTimeoutWordSpec,
testTimeoutFixtureWordSpec
)
}
class TestHoldingReporter(dispatch: Reporter, holdingTestSucceededName: String) extends CatchReporter {
val out = System.err
private var holdEvent: Option[Event] = None
override protected def doApply(event: Event) {
event match {
case testSucceeded: TestSucceeded if testSucceeded.testName == holdingTestSucceededName =>
holdEvent = Some(testSucceeded)
case _ => dispatch(event)
}
}
protected def doDispose() {}
def fireHoldEvent() {
holdEvent match {
case Some(event) => dispatch(event)
case None =>
}
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutSpec extends Spec with ParallelTestExecution with TestTimeoutExpectedResults {
def `test 1` {}
def `test 2` {}
def `test 3` {}
val holdTestSucceededName = "test 2"
val holdUntilEventCount = 5
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "test 1")
checkTestSucceeded(events(1), "test 1")
checkTestStarting(events(2), "test 2")
checkTestStarting(events(3), "test 3")
checkTestSucceeded(events(4), "test 3")
// The missing one
checkTestSucceeded(events(5), "test 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureSpec extends fixture.Spec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
def `test 1`(fixture: String) {}
def `test 2`(fixture: String) {}
def `test 3`(fixture: String) {}
val holdTestSucceededName = "test 2"
val holdUntilEventCount = 5
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "test 1")
checkTestSucceeded(events(1), "test 1")
checkTestStarting(events(2), "test 2")
checkTestStarting(events(3), "test 3")
checkTestSucceeded(events(4), "test 3")
// The missing one
checkTestSucceeded(events(5), "test 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFunSuite extends FunSuite with ParallelTestExecution with TestTimeoutExpectedResults {
test("Test 1") {}
test("Test 2") {}
test("Test 3") {}
val holdTestSucceededName = "Test 2"
val holdUntilEventCount = 5
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestStarting(events(3), "Test 3")
checkTestSucceeded(events(4), "Test 3")
// The missing one
checkTestSucceeded(events(5), "Test 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureFunSuite extends fixture.FunSuite with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
test("Test 1") { fixture => }
test("Test 2") { fixture => }
test("Test 3") { fixture => }
val holdTestSucceededName = "Test 2"
val holdUntilEventCount = 5
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestStarting(events(3), "Test 3")
checkTestSucceeded(events(4), "Test 3")
// The missing one
checkTestSucceeded(events(5), "Test 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFunSpec extends FunSpec with ParallelTestExecution with TestTimeoutExpectedResults {
describe("Scope 1") {
it("Test 1") {}
it("Test 2") {}
}
describe("Scope 2") {
it("Test 3") {}
it("Test 4") {}
}
val holdTestSucceededName = "Scope 2 Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestStarting(events(8), "Scope 2 Test 4")
checkTestSucceeded(events(9), "Scope 2 Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureFunSpec extends fixture.FunSpec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
describe("Scope 1") {
it("Test 1") { fixture => }
it("Test 2") { fixture =>}
}
describe("Scope 2") {
it("Test 3") { fixture => }
it("Test 4") { fixture => }
}
val holdTestSucceededName = "Scope 2 Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestStarting(events(8), "Scope 2 Test 4")
checkTestSucceeded(events(9), "Scope 2 Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFeatureSpec extends FeatureSpec with ParallelTestExecution with TestTimeoutExpectedResults {
feature("Scope 1") {
scenario("Test 1") {}
scenario("Test 2") {}
}
feature("Scope 2") {
scenario("Test 3") {}
scenario("Test 4") {}
}
val holdTestSucceededName = "Feature: Scope 2 Scenario: Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Feature: Scope 1")
checkTestStarting(events(1), "Feature: Scope 1 Scenario: Test 1")
checkTestSucceeded(events(2), "Feature: Scope 1 Scenario: Test 1")
checkTestStarting(events(3), "Feature: Scope 1 Scenario: Test 2")
checkTestSucceeded(events(4), "Feature: Scope 1 Scenario: Test 2")
checkScopeClosed(events(5), "Feature: Scope 1")
checkScopeOpened(events(6), "Feature: Scope 2")
checkTestStarting(events(7), "Feature: Scope 2 Scenario: Test 3")
checkTestStarting(events(8), "Feature: Scope 2 Scenario: Test 4")
checkTestSucceeded(events(9), "Feature: Scope 2 Scenario: Test 4")
checkScopeClosed(events(10), "Feature: Scope 2")
// The missing one
checkTestSucceeded(events(11), "Feature: Scope 2 Scenario: Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureFeatureSpec extends fixture.FeatureSpec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
feature("Scope 1") {
scenario("Test 1") { fixture => }
scenario("Test 2") { fixture =>}
}
feature("Scope 2") {
scenario("Test 3") { fixture => }
scenario("Test 4") { fixture => }
}
val holdTestSucceededName = "Feature: Scope 2 Scenario: Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Feature: Scope 1")
checkTestStarting(events(1), "Feature: Scope 1 Scenario: Test 1")
checkTestSucceeded(events(2), "Feature: Scope 1 Scenario: Test 1")
checkTestStarting(events(3), "Feature: Scope 1 Scenario: Test 2")
checkTestSucceeded(events(4), "Feature: Scope 1 Scenario: Test 2")
checkScopeClosed(events(5), "Feature: Scope 1")
checkScopeOpened(events(6), "Feature: Scope 2")
checkTestStarting(events(7), "Feature: Scope 2 Scenario: Test 3")
checkTestStarting(events(8), "Feature: Scope 2 Scenario: Test 4")
checkTestSucceeded(events(9), "Feature: Scope 2 Scenario: Test 4")
checkScopeClosed(events(10), "Feature: Scope 2")
// The missing one
checkTestSucceeded(events(11), "Feature: Scope 2 Scenario: Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFlatSpec extends FlatSpec with ParallelTestExecution with TestTimeoutExpectedResults {
behavior of "Scope 1"
it should "Test 1" in {}
it should "Test 2" in {}
behavior of "Scope 2"
it should "Test 3" in {}
it should "Test 4" in {}
val holdTestSucceededName = "Scope 2 should Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestStarting(events(8), "Scope 2 should Test 4")
checkTestSucceeded(events(9), "Scope 2 should Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 should Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureFlatSpec extends fixture.FlatSpec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
behavior of "Scope 1"
it should "Test 1" in { fixture => }
it should "Test 2" in { fixture => }
behavior of "Scope 2"
it should "Test 3" in { fixture => }
it should "Test 4" in { fixture => }
val holdTestSucceededName = "Scope 2 should Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestStarting(events(8), "Scope 2 should Test 4")
checkTestSucceeded(events(9), "Scope 2 should Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 should Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFreeSpec extends FreeSpec with ParallelTestExecution with TestTimeoutExpectedResults {
"Scope 1" - {
"Test 1" in {}
"Test 2" in {}
}
"Scope 2" - {
"Test 3" in {}
"Test 4" in {}
}
val holdTestSucceededName = "Scope 2 Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestStarting(events(8), "Scope 2 Test 4")
checkTestSucceeded(events(9), "Scope 2 Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureFreeSpec extends fixture.FreeSpec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
"Scope 1" - {
"Test 1" in { fixture => }
"Test 2" in { fixture => }
}
"Scope 2" - {
"Test 3" in { fixture => }
"Test 4" in { fixture => }
}
val holdTestSucceededName = "Scope 2 Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestStarting(events(8), "Scope 2 Test 4")
checkTestSucceeded(events(9), "Scope 2 Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutPropSpec extends PropSpec with ParallelTestExecution with TestTimeoutExpectedResults {
property("Test 1") {}
property("Test 2") {}
property("Test 3") {}
val holdTestSucceededName = "Test 2"
val holdUntilEventCount = 5
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestStarting(events(3), "Test 3")
checkTestSucceeded(events(4), "Test 3")
// The missing one
checkTestSucceeded(events(5), "Test 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixturePropSpec extends fixture.PropSpec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
property("Test 1") { fixture => }
property("Test 2") { fixture => }
property("Test 3") { fixture => }
val holdTestSucceededName = "Test 2"
val holdUntilEventCount = 5
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestStarting(events(3), "Test 3")
checkTestSucceeded(events(4), "Test 3")
// The missing one
checkTestSucceeded(events(5), "Test 2")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutWordSpec extends WordSpec with ParallelTestExecution with TestTimeoutExpectedResults {
"Scope 1" should {
"Test 1" in {}
"Test 2" in {}
}
"Scope 2" should {
"Test 3" in {}
"Test 4" in {}
}
val holdTestSucceededName = "Scope 2 should Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestStarting(events(8), "Scope 2 should Test 4")
checkTestSucceeded(events(9), "Scope 2 should Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 should Test 3")
}
}
@DoNotDiscover
class ExampleParallelTestExecutionTestTimeoutFixtureWordSpec extends fixture.WordSpec with ParallelTestExecution with TestTimeoutExpectedResults with StringFixture {
"Scope 1" should {
"Test 1" in { fixture => }
"Test 2" in { fixture => }
}
"Scope 2" should {
"Test 3" in { fixture => }
"Test 4" in { fixture => }
}
val holdTestSucceededName = "Scope 2 should Test 3"
val holdUntilEventCount = 11
override protected[scalatest] def createTestSpecificReporter(testSorter: DistributedTestSorter, testName: String): Reporter = {
holdingReporter = new TestHoldingReporter(super.createTestSpecificReporter(testSorter, testName), holdTestSucceededName)
holdingReporter
}
def assertTestTimeoutTest(events: List[Event]) {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestStarting(events(8), "Scope 2 should Test 4")
checkTestSucceeded(events(9), "Scope 2 should Test 4")
checkScopeClosed(events(10), "Scope 2")
// The missing one
checkTestSucceeded(events(11), "Scope 2 should Test 3")
}
}
| SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/ParallelTestExecutionTestTimeoutExamples.scala | Scala | apache-2.0 | 24,200 |
package scaldi
import org.scalatest.{Matchers, WordSpec}
import java.text.{DateFormat, SimpleDateFormat}
class IdentifierSpec extends WordSpec with Matchers {
"Identifier" when {
"defaults are used" should {
"be converted from string" in {
getId("str") should equal (StringIdentifier("str"))
}
"be converted from symbol" in {
getId('sym) should equal (StringIdentifier("sym"))
}
"be converted from class" in {
getId(classOf[String]) should equal (TypeTagIdentifier.typeId[String])
}
}
"compared" should {
"should not match for different identifier types" in {
getId(classOf[DateFormat]) sameAs getId("java.text.DateFormat") should be (false)
}
"should match for the same identifier types" in {
getId('publisher) sameAs getId("publisher") should be (true)
getId('user) sameAs getId("publisher") should be (false)
getId(classOf[String]) sameAs getId(classOf[String]) should be (true)
getId(classOf[DateFormat]) sameAs getId(classOf[String]) should be (false)
}
"use polymothism to compare classes" in {
getId(classOf[SimpleDateFormat]) sameAs getId(classOf[DateFormat]) should be (true)
getId(classOf[DateFormat]) sameAs getId(classOf[SimpleDateFormat]) should be (false)
}
}
}
def getId[T: CanBeIdentifier](target: T) = implicitly[CanBeIdentifier[T]].toIdentifier(target)
} | Mironor/scaldi | src/test/scala/scaldi/IdentifierSpec.scala | Scala | apache-2.0 | 1,497 |
package unfiltered.netty.async
import unfiltered.netty.{ async, ReceivedMessage, RequestBinding, ServerErrorResponse }
import unfiltered.netty.request.{ AbstractMultiPartDecoder, Decode, Helpers, MultiPartBinding, MultiPartCallback, MultiPartPass, TidyExceptionHandler }
import unfiltered.request.HttpRequest
import unfiltered.response.{ Pass => UPass, ResponseFunction }
import unfiltered.Async
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.ChannelHandler.Sharable
/** Provides useful defaults for Passing */
object MultipartPlan {
type Intent = PartialFunction[HttpRequest[ReceivedMessage], MultiPartIntent]
type MultiPartIntent = PartialFunction[MultiPartCallback, Unit]
val Pass: MultiPartIntent = { case _ => UPass }
val PassAlong: Intent = { case _ => Pass }
}
/** Enriches an async netty plan with multipart decoding capabilities. */
@Sharable
trait MultiPartDecoder
extends async.Plan
with AbstractMultiPartDecoder
with TidyExceptionHandler {
def intent: MultipartPlan.Intent
/** Decide if the intent could handle the request */
protected def handleOrPass(
ctx: ChannelHandlerContext, e: java.lang.Object, binding: RequestBinding)(thunk: => Unit) = {
intent.orElse(MultipartPlan.PassAlong)(binding) match {
case MultipartPlan.Pass => pass(ctx, e)
case _ => thunk
}
}
/** Called when the chunked request has been fully received. Executes the intent */
protected def complete(ctx: ChannelHandlerContext, nmsg: java.lang.Object)(cleanUp: => Unit) = {
val channelState = Helpers.channelStateOrCreate(ctx)
val res = channelState.originalReq match {
case Some(req) =>
val msg = ReceivedMessage(req, ctx, nmsg)
val multiBinding = new MultiPartBinding(channelState.decoder, msg)
val binding = new RequestBinding(msg)
intent.orElse(MultipartPlan.PassAlong)(binding) match {
case MultipartPlan.Pass =>
// fixme(doug): this isn't really responding here?
MultipartPlan.Pass
case multipartIntent =>
multipartIntent(Decode(multiBinding))
}
cleanUp
case _ =>
sys.error("Original request missing from channel state %s".format(ctx))
}
cleanUp
res
}
final override def channelRead(ctx: ChannelHandlerContext, obj: java.lang.Object) =
upgrade(ctx, obj)
final override def channelInactive(ctx: ChannelHandlerContext) {
cleanFiles(ctx)
ctx.fireChannelInactive()
}
}
@Sharable
class MultiPartPlanifier(
val intent: MultipartPlan.Intent,
val pass: MultiPartPass.PassHandler)
extends MultiPartDecoder
with ServerErrorResponse
/** Provides a MultiPart decoding plan that may buffer to disk while parsing the request */
object MultiPartDecoder {
def apply(intent: MultipartPlan.Intent): async.Plan =
MultiPartDecoder(intent, MultiPartPass.DefaultPassHandler)
def apply(intent: MultipartPlan.Intent, pass: MultiPartPass.PassHandler) =
new MultiPartPlanifier(intent, pass)
}
/** Provides a MultiPart decoding plan that won't buffer to disk while parsing the request */
object MemoryMultiPartDecoder {
def apply(intent: MultipartPlan.Intent): async.Plan =
MemoryMultiPartDecoder(intent, MultiPartPass.DefaultPassHandler)
def apply(intent: MultipartPlan.Intent, pass: MultiPartPass.PassHandler) =
new MultiPartPlanifier(intent, pass) {
override protected val useDisk = false
}
}
| beni55/unfiltered | netty-uploads/src/main/scala/request/async/plans.scala | Scala | mit | 3,464 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.template.similarproduct
import org.apache.predictionio.controller.PPreparator
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
class Preparator
extends PPreparator[TrainingData, PreparedData] {
def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = {
new PreparedData(
users = trainingData.users,
items = trainingData.items,
viewEvents = trainingData.viewEvents)
}
}
class PreparedData(
val users: RDD[(String, User)],
val items: RDD[(String, Item)],
val viewEvents: RDD[ViewEvent]
) extends Serializable
| pferrel/PredictionIO | examples/experimental/scala-parallel-similarproduct-localmodel/src/main/scala/Preparator.scala | Scala | apache-2.0 | 1,436 |
// code-examples/AppDesign/enumerations/http-case-script.scala
sealed abstract class HttpMethod(val id: Int) {
def name = getClass getSimpleName
override def toString = name
}
case object Connect extends HttpMethod(0)
case object Delete extends HttpMethod(1)
case object Get extends HttpMethod(2)
case object Head extends HttpMethod(3)
case object Options extends HttpMethod(4)
case object Post extends HttpMethod(5)
case object Put extends HttpMethod(6)
case object Trace extends HttpMethod(7)
def handle (method: HttpMethod) = method match {
case Connect => println(method + ": " + method.id)
case Delete => println(method + ": " + method.id)
case Get => println(method + ": " + method.id)
case Head => println(method + ": " + method.id)
case Options => println(method + ": " + method.id)
case Post => println(method + ": " + method.id)
case Put => println(method + ": " + method.id)
case Trace => println(method + ": " + method.id)
}
List(Connect, Delete, Get, Head, Options, Post, Put, Trace) foreach {
method => handle(method)
}
| XClouded/t4f-core | scala/src/tmp/AppDesign/enumerations/http-case-script.scala | Scala | apache-2.0 | 1,096 |
import org.apache.jena.rdf.model.ResourceFactory
/**
* @author jiemakel
*
*/
object CIDOC {
val ns = "http://www.cidoc-crm.org/cidoc-crm/"
def R(s: String) = ResourceFactory.createResource(ns+s)
def P(s: String) = ResourceFactory.createProperty(ns+s)
val CRM_Entity = R("E1_CRM_Entity")
val Temporal_Entity = R("E2_Temporal_Entity")
val Condition_State = R("E3_Condition_State")
val Period = R("E4_Period")
val Event = R("E5_Event")
val Destruction = R("E6_Destruction")
val Activity = R("E7_Activity")
val Acquisition = R("E8_Acquisition")
val Move = R("E9_Move")
val Transfer_of_Custody = R("E10_Transfer_of_Custody")
val Modification = R("E11_Modification")
val Production = R("E12_Production")
val Attribute_Assignment = R("E13_Attribute_Assignment")
val Condition_Assessment = R("E14_Condition_Assessment")
val Identifier_Assignment = R("E15_Identifier_Assignment")
val Measurement = R("E16_Measurement")
val Type_Assignment = R("E17_Type_Assignment")
val Physical_Thing = R("E18_Physical_Thing")
val Physical_Object = R("E19_Physical_Object")
val Biological_Object = R("E20_Biological_Object")
val Person = R("E21_Person")
val ManMade_Object = R("E22_Man-Made_Object")
val Physical_ManMade_Thing = R("E24_Physical_Man-Made_Thing")
val ManMade_Feature = R("E25_Man-Made_Feature")
val Physical_Feature = R("E26_Physical_Feature")
val Site = R("E27_Site")
val Conceptual_Object = R("E28_Conceptual_Object")
val Design_or_Procedure = R("E29_Design_or_Procedure")
val Right = R("E30_Right")
val Document = R("E31_Document")
val Authority_Document = R("E32_Authority_Document")
val Linguistic_Object = R("E33_Linguistic_Object")
val Inscription = R("E34_Inscription")
val Title = R("E35_Title")
val Visual_Item = R("E36_Visual_Item")
val Mark = R("E37_Mark")
val Image = R("E38_Image")
val Actor = R("E39_Actor")
val Legal_Body = R("E40_Legal_Body")
val Appellation = R("E41_Appellation")
val Identifier = R("E42_Identifier")
val Place_Appellation = R("E44_Place_Appellation")
val Address = R("E45_Address")
val Section_Definition = R("E46_Section_Definition")
val Spatial_Coordinates = R("E47_Spatial_Coordinates")
val Place_Name = R("E48_Place_Name")
val Time_Appellation = R("E49_Time_Appellation")
val Date = R("E50_Date")
val Contact_Point = R("E51_Contact_Point")
val TimeSpan = R("E52_Time-Span")
val Place = R("E53_Place")
val Dimension = R("E54_Dimension")
val Type = R("E55_Type")
val Language = R("E56_Language")
val Material = R("E57_Material")
val Measurement_Unit = R("E58_Measurement_Unit")
val Beginning_of_Existence = R("E63_Beginning_of_Existence")
val End_of_Existence = R("E64_End_of_Existence")
val Creation = R("E65_Creation")
val Formation = R("E66_Formation")
val Birth = R("E67_Birth")
val Dissolution = R("E68_Dissolution")
val Death = R("E69_Death")
val Thing = R("E70_Thing")
val ManMade_Thing = R("E71_Man-Made_Thing")
val Legal_Object = R("E72_Legal_Object")
val Information_Object = R("E73_Information_Object")
val Group = R("E74_Group")
val Conceptual_Object_Appellation = R("E75_Conceptual_Object_Appellation")
val Persistent_Item = R("E77_Persistent_Item")
val Collection = R("E78_Collection")
val Part_Addition = R("E79_Part_Addition")
val Part_Removal = R("E80_Part_Removal")
val Transformation = R("E81_Transformation")
val Actor_Appellation = R("E82_Actor_Appellation")
val Type_Creation = R("E83_Type_Creation")
val Information_Carrier = R("E84_Information_Carrier")
val Joining = R("E85_Joining")
val Leaving = R("E86_Leaving")
val Curation_Activity = R("E87_Curation_Activity")
val Propositional_Object = R("E89_Propositional_Object")
val Symbolic_Object = R("E90_Symbolic_Object")
val parts_description = P("P3_parts_description")
val is_identified_by = P("P1_is_identified_by")
val identifies = P("P1i_identifies")
val has_type = P("P2_has_type")
val is_type_of = P("P2i_is_type_of")
val has_note = P("P3_has_note")
val has_timeSpan = P("P4_has_time-span")
val is_timeSpan_of = P("P4i_is_time-span_of")
val state_consists_of = P("P5_consists_of")
val state_forms_part_of = P("P5i_forms_part_of")
val took_place_at = P("P7_took_place_at")
val place_witnessed = P("P7i_witnessed")
val took_place_on_or_within = P("P8_took_place_on_or_within")
val object_witnessed = P("P8i_witnessed")
val period_consists_of = P("P9_consists_of")
val period_forms_part_of = P("P9i_forms_part_of")
val period_falls_within = P("P10_falls_within")
val contains_period = P("P10i_contains")
val had_participant = P("P11_had_participant")
val participated_in = P("P11i_participated_in")
val occurred_in_the_presence_of = P("P12_occurred_in_the_presence_of")
val was_present_at = P("P12i_was_present_at")
val destroyed = P("P13_destroyed")
val was_destroyed_by = P("P13i_was_destroyed_by")
val carried_out_by = P("P14_carried_out_by")
val performed = P("P14i_performed")
val was_influenced_by = P("P15_was_influenced_by")
val influenced = P("P15i_influenced")
val used_specific_object = P("P16_used_specific_object")
val was_used_for = P("P16i_was_used_for")
val was_motivated_by = P("P17_was_motivated_by")
val motivated = P("P17i_motivated")
val was_intended_use_of = P("P19_was_intended_use_of")
val was_made_for = P("P19i_was_made_for")
val had_specific_purpose = P("P20_had_specific_purpose")
val was_specific_purpose_of = P("P20i_was_purpose_of")
val had_general_purpose = P("P21_had_general_purpose")
val was_general_purpose_of = P("P21i_was_purpose_of")
val transferred_title_to = P("P22_transferred_title_to")
val acquired_title_through = P("P22i_acquired_title_through")
val transferred_title_from = P("P23_transferred_title_from")
val surrendered_title_through = P("P23i_surrendered_title_through")
val transferred_title_of = P("P24_transferred_title_of")
val changed_ownership_through = P("P24i_changed_ownership_through")
val moved = P("P25_moved")
val moved_by = P("P25i_moved_by")
val moved_to = P("P26_moved_to")
val was_destination_of = P("P26i_was_destination_of")
val moved_from = P("P27_moved_from")
val was_origin_of = P("P27i_was_origin_of")
val custody_surrendered_by = P("P28_custody_surrendered_by")
val surrendered_custody_through = P("P28i_surrendered_custody_through")
val custody_received_by = P("P29_custody_received_by")
val received_custody_through = P("P29i_received_custody_through")
val transferred_custody_of = P("P30_transferred_custody_of")
val custody_transferred_through = P("P30i_custody_transferred_through")
val has_modified = P("P31_has_modified")
val was_modified_by = P("P31i_was_modified_by")
val used_general_technique = P("P32_used_general_technique")
val was_technique_of = P("P32i_was_technique_of")
val used_specific_technique = P("P33_used_specific_technique")
val was_used_by = P("P33i_was_used_by")
val concerned = P("P34_concerned")
val was_assessed_by = P("P34i_was_assessed_by")
val has_identified = P("P35_has_identified")
val was_identified_by = P("P35i_was_identified_by")
val assigned_identifier = P("P37_assigned")
val identifier_was_assigned_by = P("P37i_was_assigned_by")
val deassigned = P("P38_deassigned")
val was_deassigned_by = P("P38i_was_deassigned_by")
val measured = P("P39_measured")
val was_measured_by = P("P39i_was_measured_by")
val observed_dimension = P("P40_observed_dimension")
val was_observed_in = P("P40i_was_observed_in")
val classified = P("P41_classified")
val was_classified_by = P("P41i_was_classified_by")
val assigned_type = P("P42_assigned")
val type_was_assigned_by = P("P42i_was_assigned_by")
val has_dimension = P("P43_has_dimension")
val is_dimension_of = P("P43i_is_dimension_of")
val has_condition = P("P44_has_condition")
val is_condition_of = P("P44i_is_condition_of")
val physicalThing_consists_of = P("P45_consists_of")
val is_incorporated_in = P("P45i_is_incorporated_in")
val physicalThing_is_composed_of = P("P46_is_composed_of")
val physicalThing_forms_part_of = P("P46i_forms_part_of")
val has_preferred_identifier = P("P48_has_preferred_identifier")
val is_preferred_identifier_of = P("P48i_is_preferred_identifier_of")
val has_former_or_current_keeper = P("P49_has_former_or_current_keeper")
val is_former_or_current_keeper_of = P("P49i_is_former_or_current_keeper_of")
val has_current_keeper = P("P50_has_current_keeper")
val is_current_keeper_of = P("P50i_is_current_keeper_of")
val has_former_or_current_owner = P("P51_has_former_or_current_owner")
val is_former_or_current_owner_of = P("P51i_is_former_or_current_owner_of")
val has_current_owner = P("P52_has_current_owner")
val is_current_owner_of = P("P52i_is_current_owner_of")
val has_former_or_current_location = P("P53_has_former_or_current_location")
val is_former_or_current_location_of = P("P53i_is_former_or_current_location_of")
val has_current_permanent_location = P("P54_has_current_permanent_location")
val is_current_permanent_location_of = P("P54i_is_current_permanent_location_of")
val has_current_location = P("P55_has_current_location")
val currently_holds = P("P55i_currently_holds")
val bears_feature = P("P56_bears_feature")
val is_found_on = P("P56i_is_found_on")
val has_number_of_parts = P("P57_has_number_of_parts")
val has_section_definition = P("P58_has_section_definition")
val defines_section = P("P58i_defines_section")
val has_section = P("P59_has_section")
val is_located_on_or_within = P("P59i_is_located_on_or_within")
val depicts = P("P62_depicts")
val is_depicted_by = P("P62i_is_depicted_by")
val shows_visual_item = P("P65_shows_visual_item")
val is_shown_by = P("P65i_is_shown_by")
val refers_to = P("P67_refers_to")
val is_referred_to_by = P("P67i_is_referred_to_by")
val foresees_use_of = P("P68_foresees_use_of")
val use_foreseen_by = P("P68i_use_foreseen_by")
val is_associated_with = P("P69_is_associated_with")
val documents = P("P70_documents")
val is_documented_in = P("P70i_is_documented_in")
val lists = P("P71_lists")
val is_listed_in = P("P71i_is_listed_in")
val has_language = P("P72_has_language")
val is_language_of = P("P72i_is_language_of")
val has_translation = P("P73_has_translation")
val is_translation_of = P("P73i_is_translation_of")
val has_current_or_former_residence = P("P74_has_current_or_former_residence")
val is_current_or_former_residence_of = P("P74i_is_current_or_former_residence_of")
val possesses = P("P75_possesses")
val is_possessed_by = P("P75i_is_possessed_by")
val has_contact_point = P("P76_has_contact_point")
val provides_access_to = P("P76i_provides_access_to")
val timeSpan_is_identified_by = P("P78_is_identified_by")
val identifies_timeSpan = P("P78i_identifies")
val beginning_is_qualified_by = P("P79_beginning_is_qualified_by")
val end_is_qualified_by = P("P80_end_is_qualified_by")
val ongoing_throughout = P("P81_ongoing_throughout")
val at_some_time_within = P("P82_at_some_time_within")
val begin_of_the_begin = P("P82a_begin_of_the_begin")
val end_of_the_begin = P("P81a_end_of_the_begin")
val begin_of_the_end = P("P81b_begin_of_the_end")
val end_of_the_end = P("P82b_end_of_the_end")
val had_at_least_duration = P("P83_had_at_least_duration")
val was_minimum_duration_of = P("P83i_was_minimum_duration_of")
val had_at_most_duration = P("P84_had_at_most_duration")
val was_maximum_duration_of = P("P84i_was_maximum_duration_of")
val timeSpan_falls_within = P("P86_falls_within")
val timeSpan_contains = P("P86i_contains")
val place_is_identified_by = P("P87_is_identified_by")
val identifies_place = P("P87i_identifies")
val place_consists_of = P("P88_consists_of")
val place_forms_part_of = P("P88i_forms_part_of")
val place_falls_within = P("P89_falls_within")
val place_contains = P("P89i_contains")
val has_value = P("P90_has_value")
val has_unit = P("P91_has_unit")
val is_unit_of = P("P91i_is_unit_of")
val brought_into_existence = P("P92_brought_into_existence")
val was_brought_into_existence_by = P("P92i_was_brought_into_existence_by")
val took_out_of_existence = P("P93_took_out_of_existence")
val was_taken_out_of_existence_by = P("P93i_was_taken_out_of_existence_by")
val has_created = P("P94_has_created")
val was_created_by = P("P94i_was_created_by")
val has_formed = P("P95_has_formed")
val was_formed_by = P("P95i_was_formed_by")
val by_mother = P("P96_by_mother")
val gave_birth = P("P96i_gave_birth")
val from_father = P("P97_from_father")
val was_father_for = P("P97i_was_father_for")
val brought_into_life = P("P98_brought_into_life")
val was_born = P("P98i_was_born")
val dissolved = P("P99_dissolved")
val was_dissolved_by = P("P99i_was_dissolved_by")
val was_death_of = P("P100_was_death_of")
val died_in = P("P100i_died_in")
val had_as_general_use = P("P101_had_as_general_use")
val was_use_of = P("P101i_was_use_of")
val has_title = P("P102_has_title")
val is_title_of = P("P102i_is_title_of")
val was_intended_for = P("P103_was_intended_for")
val was_intention_of = P("P103i_was_intention_of")
val is_subject_to = P("P104_is_subject_to")
val applies_to = P("P104i_applies_to")
val right_held_by = P("P105_right_held_by")
val has_right_on = P("P105i_has_right_on")
val symbolicObject_is_composed_of = P("P106_is_composed_of")
val symbolicObject_forms_part_of = P("P106i_forms_part_of")
val has_current_or_former_member = P("P107_has_current_or_former_member")
val is_current_or_former_member_of = P("P107i_is_current_or_former_member_of")
val has_produced = P("P108_has_produced")
val was_produced_by = P("P108i_was_produced_by")
val has_current_or_former_curator = P("P109_has_current_or_former_curator")
val is_current_or_former_curator_of = P("P109i_is_current_or_former_curator_of")
val augmented = P("P110_augmented")
val was_augmented_by = P("P110i_was_augmented_by")
val added = P("P111_added")
val was_added_by = P("P111i_was_added_by")
val diminished = P("P112_diminished")
val was_diminished_by = P("P112i_was_diminished_by")
val removed = P("P113_removed")
val was_removed_by = P("P113i_was_removed_by")
val is_equal_in_time_to = P("P114_is_equal_in_time_to")
val finishes = P("P115_finishes")
val is_finished_by = P("P115i_is_finished_by")
val starts = P("P116_starts")
val is_started_by = P("P116i_is_started_by")
val occurs_during = P("P117_occurs_during")
val includes = P("P117i_includes")
val overlaps_in_time_with = P("P118_overlaps_in_time_with")
val is_overlapped_in_time_by = P("P118i_is_overlapped_in_time_by")
val meets_in_time_with = P("P119_meets_in_time_with")
val is_met_in_time_by = P("P119i_is_met_in_time_by")
val occurs_before = P("P120_occurs_before")
val occurs_after = P("P120i_occurs_after")
val place_overlaps_with = P("P121_overlaps_with")
val borders_with = P("P122_borders_with")
val resulted_in = P("P123_resulted_in")
val resulted_from = P("P123i_resulted_from")
val transformed = P("P124_transformed")
val was_transformed_by = P("P124i_was_transformed_by")
val used_object_of_type = P("P125_used_object_of_type")
val was_type_of_object_used_in = P("P125i_was_type_of_object_used_in")
val employed = P("P126_employed")
val was_employed_in = P("P126i_was_employed_in")
val has_broader_term = P("P127_has_broader_term")
val has_narrower_term = P("P127i_has_narrower_term")
val carries = P("P128_carries")
val is_carried_by = P("P128i_is_carried_by")
val is_about = P("P129_is_about")
val is_subject_of = P("P129i_is_subject_of")
val shows_features_of = P("P130_shows_features_of")
val features_are_also_found_on = P("P130i_features_are_also_found_on")
val actor_is_identified_by = P("P131_is_identified_by")
val identifies_actor = P("P131i_identifies")
val period_overlaps_with = P("P132_overlaps_with")
val is_separated_from = P("P133_is_separated_from")
val continued = P("P134_continued")
val was_continued_by = P("P134i_was_continued_by")
val created_type = P("P135_created_type")
val type_was_created_by = P("P135i_was_created_by")
val was_based_on = P("P136_was_based_on")
val supported_type_creation = P("P136i_supported_type_creation")
val exemplifies = P("P137_exemplifies")
val is_exemplified_by = P("P137i_is_exemplified_by")
val represents = P("P138_represents")
val has_representation = P("P138i_has_representation")
val has_alternative_form = P("P139_has_alternative_form")
val assigned_attribute_to = P("P140_assigned_attribute_to")
val was_attributed_by = P("P140i_was_attributed_by")
val assigned_attribute = P("P141_assigned")
val attribute_was_assigned_by = P("P141i_was_assigned_by")
val used_constituent = P("P142_used_constituent")
val was_used_in = P("P142i_was_used_in")
val joined = P("P143_joined")
val was_joined_by = P("P143i_was_joined_by")
val joined_with = P("P144_joined_with")
val gained_member_by = P("P144i_gained_member_by")
val separated = P("P145_separated")
val left_by = P("P145i_left_by")
val separated_from = P("P146_separated_from")
val lost_member_by = P("P146i_lost_member_by")
val curated = P("P147_curated")
val was_curated_by = P("P147i_was_curated_by")
val has_component = P("P148_has_component")
val is_component_of = P("P148i_is_component_of")
val conceptualObject_is_identified_by = P("P149_is_identified_by")
val identifies_conceptualObject = P("P149i_identifies")
}
| jiemakel/anything2rdf | src/main/scala/CIDOC.scala | Scala | mit | 16,883 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import java.util.Collections
import org.apache.hadoop.hive.metastore.api.FieldSchema
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.spark.SparkFunSuite
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* A set of tests for the filter conversion logic used when pushing partition pruning into the
* metastore
*/
class FiltersSuite extends SparkFunSuite with Logging with PlanTest {
private val shim = new Shim_v0_13
private val testTable = new org.apache.hadoop.hive.ql.metadata.Table("default", "test")
private val varCharCol = new FieldSchema()
varCharCol.setName("varchar")
varCharCol.setType(serdeConstants.VARCHAR_TYPE_NAME)
testTable.setPartCols(Collections.singletonList(varCharCol))
filterTest("string filter",
(a("stringcol", StringType) > Literal("test")) :: Nil,
"stringcol > \\"test\\"")
filterTest("string filter backwards",
(Literal("test") > a("stringcol", StringType)) :: Nil,
"\\"test\\" > stringcol")
filterTest("int filter",
(a("intcol", IntegerType) === Literal(1)) :: Nil,
"intcol = 1")
filterTest("int filter backwards",
(Literal(1) === a("intcol", IntegerType)) :: Nil,
"1 = intcol")
filterTest("int and string filter",
(Literal(1) === a("intcol", IntegerType)) :: (Literal("a") === a("strcol", IntegerType)) :: Nil,
"1 = intcol and \\"a\\" = strcol")
filterTest("skip varchar",
(Literal("") === a("varchar", StringType)) :: Nil,
"")
filterTest("SPARK-19912 String literals should be escaped for Hive metastore partition pruning",
(a("stringcol", StringType) === Literal("p1\\" and q=\\"q1")) ::
(Literal("p2\\" and q=\\"q2") === a("stringcol", StringType)) :: Nil,
"""stringcol = 'p1" and q="q1' and 'p2" and q="q2' = stringcol""")
private def filterTest(name: String, filters: Seq[Expression], result: String) = {
test(name) {
withSQLConf(SQLConf.ADVANCED_PARTITION_PREDICATE_PUSHDOWN.key -> "true") {
val converted = shim.convertFilters(testTable, filters)
if (converted != result) {
fail(s"Expected ${filters.mkString(",")} to convert to '$result' but got '$converted'")
}
}
}
}
test("turn on/off ADVANCED_PARTITION_PREDICATE_PUSHDOWN") {
import org.apache.spark.sql.catalyst.dsl.expressions._
Seq(true, false).foreach { enabled =>
withSQLConf(SQLConf.ADVANCED_PARTITION_PREDICATE_PUSHDOWN.key -> enabled.toString) {
val filters =
(Literal(1) === a("intcol", IntegerType) ||
Literal(2) === a("intcol", IntegerType)) :: Nil
val converted = shim.convertFilters(testTable, filters)
if (enabled) {
assert(converted == "(1 = intcol or 2 = intcol)")
} else {
assert(converted.isEmpty)
}
}
}
}
private def a(name: String, dataType: DataType) = AttributeReference(name, dataType)()
}
| bravo-zhang/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala | Scala | apache-2.0 | 3,967 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest.backend
import io.fabric8.kubernetes.client.DefaultKubernetesClient
import org.apache.spark.deploy.k8s.integrationtest.backend.GCE.GCETestBackend
import org.apache.spark.deploy.k8s.integrationtest.backend.minikube.{Minikube, MinikubeTestBackend}
import org.apache.spark.deploy.k8s.integrationtest.docker.SparkDockerImageBuilder
private[spark] trait IntegrationTestBackend {
def name(): String
def initialize(): Unit
def getKubernetesClient(): DefaultKubernetesClient
def cleanUp(): Unit = {}
}
private[spark] object IntegrationTestBackendFactory {
def getTestBackend(): IntegrationTestBackend = {
Option(System.getProperty("spark.kubernetes.test.master"))
.map(new GCETestBackend(_))
.getOrElse(new MinikubeTestBackend())
}
}
| publicRoman/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/backend/IntegrationTestBackend.scala | Scala | apache-2.0 | 1,612 |
package com.twitter.util.security
import java.security.Principal
import java.security.cert.Certificate
import javax.net.ssl.{SSLSession, SSLSessionContext}
import javax.security.cert.X509Certificate
/**
* Represents a non-existent secure relationship between two entities, i.e.
* a non-existent `SSLSession`.
*/
object NullSslSession extends SSLSession {
def getApplicationBufferSize: Int = 0
def getCipherSuite: String = ""
def getCreationTime: Long = 0
def getId: Array[Byte] = Array.empty
def getLastAccessedTime: Long = 0
def getLocalCertificates: Array[Certificate] = Array.empty
def getLocalPrincipal: Principal = NullPrincipal
def getPacketBufferSize: Int = 0
def getPeerCertificateChain: Array[X509Certificate] = Array.empty
def getPeerCertificates: Array[Certificate] = Array.empty
def getPeerHost: String = ""
def getPeerPort: Int = 0
def getPeerPrincipal: Principal = NullPrincipal
def getProtocol: String = ""
def getSessionContext: SSLSessionContext = NullSslSessionContext
def getValue(name: String): Object = ""
def getValueNames: Array[String] = Array.empty
def invalidate: Unit = {}
def isValid: Boolean = false
def putValue(name: String, value: Object): Unit = {}
def removeValue(name: String): Unit = {}
}
| twitter/util | util-security/src/main/scala/com/twitter/util/security/NullSslSession.scala | Scala | apache-2.0 | 1,272 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.