code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.metric.LongSQLMetric
trait HashSemiJoin {
self: SparkPlan =>
val leftKeys: Seq[Expression]
val rightKeys: Seq[Expression]
val left: SparkPlan
val right: SparkPlan
val condition: Option[Expression]
override def output: Seq[Attribute] = left.output
protected[this] def supportUnsafe: Boolean = {
(self.codegenEnabled && self.unsafeEnabled
&& UnsafeProjection.canSupport(leftKeys)
&& UnsafeProjection.canSupport(rightKeys)
&& UnsafeProjection.canSupport(left.schema)
&& UnsafeProjection.canSupport(right.schema))
}
override def outputsUnsafeRows: Boolean = supportUnsafe
override def canProcessUnsafeRows: Boolean = supportUnsafe
override def canProcessSafeRows: Boolean = !supportUnsafe
protected def leftKeyGenerator: Projection =
if (supportUnsafe) {
UnsafeProjection.create(leftKeys, left.output)
} else {
newMutableProjection(leftKeys, left.output)()
}
protected def rightKeyGenerator: Projection =
if (supportUnsafe) {
UnsafeProjection.create(rightKeys, right.output)
} else {
newMutableProjection(rightKeys, right.output)()
}
@transient private lazy val boundCondition =
newPredicate(condition.getOrElse(Literal(true)), left.output ++ right.output)
protected def buildKeyHashSet(
buildIter: Iterator[InternalRow], numBuildRows: LongSQLMetric): java.util.Set[InternalRow] = {
val hashSet = new java.util.HashSet[InternalRow]()
// Create a Hash set of buildKeys
val rightKey = rightKeyGenerator
while (buildIter.hasNext) {
val currentRow = buildIter.next()
numBuildRows += 1
val rowKey = rightKey(currentRow)
if (!rowKey.anyNull) {
val keyExists = hashSet.contains(rowKey)
if (!keyExists) {
hashSet.add(rowKey.copy())
}
}
}
hashSet
}
protected def hashSemiJoin(
streamIter: Iterator[InternalRow],
numStreamRows: LongSQLMetric,
hashSet: java.util.Set[InternalRow],
numOutputRows: LongSQLMetric): Iterator[InternalRow] = {
val joinKeys = leftKeyGenerator
streamIter.filter(current => {
numStreamRows += 1
val key = joinKeys(current)
val r = !key.anyNull && hashSet.contains(key)
if (r) numOutputRows += 1
r
})
}
protected def hashSemiJoin(
streamIter: Iterator[InternalRow],
numStreamRows: LongSQLMetric,
hashedRelation: HashedRelation,
numOutputRows: LongSQLMetric): Iterator[InternalRow] = {
val joinKeys = leftKeyGenerator
val joinedRow = new JoinedRow
streamIter.filter { current =>
numStreamRows += 1
val key = joinKeys(current)
lazy val rowBuffer = hashedRelation.get(key)
val r = !key.anyNull && rowBuffer != null && rowBuffer.exists {
(row: InternalRow) => boundCondition(joinedRow(current, row))
}
if (r) numOutputRows += 1
r
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashSemiJoin.scala | Scala | apache-2.0 | 3,952 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import joptsimple._
import kafka.utils.Utils
import java.util.concurrent.CountDownLatch
import org.apache.log4j.Logger
import kafka.consumer._
/**
* Program to read using the rich consumer and dump the results to standard out
*/
object ConsumerShell {
val logger = Logger.getLogger(getClass)
def main(args: Array[String]): Unit = {
val parser = new OptionParser
val topicOpt = parser.accepts("topic", "REQUIRED: The topic to consume from.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val consumerPropsOpt = parser.accepts("props", "REQUIRED: Properties file with the consumer properties.")
.withRequiredArg
.describedAs("properties")
.ofType(classOf[String])
val partitionsOpt = parser.accepts("partitions", "Number of partitions to consume from.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val options = parser.parse(args : _*)
for(arg <- List(topicOpt, consumerPropsOpt)) {
if(!options.has(arg)) {
System.err.println("Missing required argument \\"" + arg + "\\"")
parser.printHelpOn(System.err)
System.exit(1)
}
}
val partitions = options.valueOf(partitionsOpt).intValue
val propsFile = options.valueOf(consumerPropsOpt)
val topic = options.valueOf(topicOpt)
println("Starting consumer...")
val consumerConfig = new ConsumerConfig(Utils.loadProps(propsFile))
val consumerConnector: ConsumerConnector = Consumer.create(consumerConfig)
val topicMessageStreams = consumerConnector.createMessageStreams(Predef.Map(topic -> partitions))
var threadList = List[ZKConsumerThread]()
for ((topic, streamList) <- topicMessageStreams)
for (stream <- streamList)
threadList ::= new ZKConsumerThread(stream)
for (thread <- threadList)
thread.start
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
override def run() = {
consumerConnector.shutdown
threadList.foreach(_.shutdown)
println("consumer threads shutted down")
}
})
}
}
class ZKConsumerThread(stream: KafkaMessageStream) extends Thread {
val shutdownLatch = new CountDownLatch(1)
val logger = Logger.getLogger(getClass)
override def run() {
println("Starting consumer thread..")
var count: Int = 0
try {
for (message <- stream) {
logger.debug("consumed: " + Utils.toString(message.payload, "UTF-8"))
count += 1
}
}catch {
case e:ConsumerTimeoutException => // this is ok
case oe: Exception => logger.error(oe)
}
shutdownLatch.countDown
println("Received " + count + " messages")
println("thread shutdown !" )
}
def shutdown() {
shutdownLatch.await
}
}
| tcrayford/hafka | kafka/core/src/main/scala/kafka/tools/ConsumerShell.scala | Scala | bsd-3-clause | 3,718 |
package lila.oauth
import com.softwaremill.macwire._
import lila.common.config.CollName
@Module
final class Env(
cacheApi: lila.memo.CacheApi,
userRepo: lila.user.UserRepo,
db: lila.db.Db
)(implicit ec: scala.concurrent.ExecutionContext) {
lazy val legacyClientApi = new LegacyClientApi(db(CollName("oauth2_legacy_client")))
lazy val authorizationApi = new AuthorizationApi(db(CollName("oauth2_authorization")))
lazy val tokenApi = new AccessTokenApi(db(CollName("oauth2_access_token")), cacheApi, userRepo)
lazy val server = wire[OAuthServer]
}
| luanlv/lila | modules/oauth/src/main/Env.scala | Scala | mit | 588 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.File
import java.util.{Locale, TimeZone}
import scala.util.control.NonFatal
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.util.{fileToString, stringToFile}
import org.apache.spark.sql.execution.HiveResult.hiveResultString
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.command.{DescribeColumnCommand, DescribeCommandBase}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.tags.ExtendedSQLTest
/**
* End-to-end test cases for SQL queries.
*
* Each case is loaded from a file in "spark/sql/core/src/test/resources/sql-tests/inputs".
* Each case has a golden result file in "spark/sql/core/src/test/resources/sql-tests/results".
*
* To run the entire test suite:
* {{{
* build/sbt "sql/test-only *SQLQueryTestSuite"
* }}}
*
* To run a single test file upon change:
* {{{
* build/sbt "~sql/test-only *SQLQueryTestSuite -- -z inline-table.sql"
* }}}
*
* To re-generate golden files for entire suite, run:
* {{{
* SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite"
* }}}
*
* To re-generate golden file for a single test, run:
* {{{
* SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite -- -z describe.sql"
* }}}
*
* The format for input files is simple:
* 1. A list of SQL queries separated by semicolon.
* 2. Lines starting with -- are treated as comments and ignored.
* 3. Lines starting with --SET are used to specify the configs when running this testing file. You
* can set multiple configs in one --SET, using comma to separate them. Or you can use multiple
* --SET statements.
* 4. Lines starting with --IMPORT are used to load queries from another test file.
* 5. Lines starting with --CONFIG_DIM are used to specify config dimensions of this testing file.
* The dimension name is decided by the string after --CONFIG_DIM. For example, --CONFIG_DIM1
* belongs to dimension 1. One dimension can have multiple lines, each line representing one
* config set (one or more configs, separated by comma). Spark will run this testing file many
* times, each time picks one config set from each dimension, until all the combinations are
* tried. For example, if dimension 1 has 2 lines, dimension 2 has 3 lines, this testing file
* will be run 6 times (cartesian product).
*
* For example:
* {{{
* -- this is a comment
* select 1, -1;
* select current_date;
* }}}
*
* The format for golden result files look roughly like:
* {{{
* -- some header information
*
* -- !query 0
* select 1, -1
* -- !query 0 schema
* struct<...schema...>
* -- !query 0 output
* ... data row 1 ...
* ... data row 2 ...
* ...
*
* -- !query 1
* ...
* }}}
*
* Note that UDF tests work differently. After the test files under 'inputs/udf' directory are
* detected, it creates three test cases:
*
* - Scala UDF test case with a Scalar UDF registered as the name 'udf'.
*
* - Python UDF test case with a Python UDF registered as the name 'udf'
* iff Python executable and pyspark are available.
*
* - Scalar Pandas UDF test case with a Scalar Pandas UDF registered as the name 'udf'
* iff Python executable, pyspark, pandas and pyarrow are available.
*
* Therefore, UDF test cases should have single input and output files but executed by three
* different types of UDFs. See 'udf/udf-inner-join.sql' as an example.
*/
@ExtendedSQLTest
class SQLQueryTestSuite extends QueryTest with SharedSparkSession {
import IntegratedUDFTestUtils._
private val regenerateGoldenFiles: Boolean = System.getenv("SPARK_GENERATE_GOLDEN_FILES") == "1"
protected val baseResourcePath = {
// We use a path based on Spark home for 2 reasons:
// 1. Maven can't get correct resource directory when resources in other jars.
// 2. We test subclasses in the hive-thriftserver module.
val sparkHome = {
assert(sys.props.contains("spark.test.home") ||
sys.env.contains("SPARK_HOME"), "spark.test.home or SPARK_HOME is not set.")
sys.props.getOrElse("spark.test.home", sys.env("SPARK_HOME"))
}
java.nio.file.Paths.get(sparkHome,
"sql", "core", "src", "test", "resources", "sql-tests").toFile
}
protected val inputFilePath = new File(baseResourcePath, "inputs").getAbsolutePath
protected val goldenFilePath = new File(baseResourcePath, "results").getAbsolutePath
protected val validFileExtensions = ".sql"
private val notIncludedMsg = "[not included in comparison]"
private val clsName = this.getClass.getCanonicalName
protected val emptySchema = StructType(Seq.empty).catalogString
protected override def sparkConf: SparkConf = super.sparkConf
// Fewer shuffle partitions to speed up testing.
.set(SQLConf.SHUFFLE_PARTITIONS, 4)
/** List of test cases to ignore, in lower cases. */
protected def blackList: Set[String] = Set(
"blacklist.sql" // Do NOT remove this one. It is here to test the blacklist functionality.
)
// Create all the test cases.
listTestCases.foreach(createScalaTestCase)
/** A single SQL query's output. */
protected case class QueryOutput(sql: String, schema: String, output: String) {
def toString(queryIndex: Int): String = {
// We are explicitly not using multi-line string due to stripMargin removing "|" in output.
s"-- !query $queryIndex\n" +
sql + "\n" +
s"-- !query $queryIndex schema\n" +
schema + "\n" +
s"-- !query $queryIndex output\n" +
output
}
}
/** A test case. */
protected trait TestCase {
val name: String
val inputFile: String
val resultFile: String
}
/**
* traits that indicate UDF or PgSQL to trigger the code path specific to each. For instance,
* PgSQL tests require to register some UDF functions.
*/
protected trait PgSQLTest
/**
* traits that indicate ANSI-related tests with the ANSI mode enabled.
*/
protected trait AnsiTest
protected trait UDFTest {
val udf: TestUDF
}
/** A regular test case. */
protected case class RegularTestCase(
name: String, inputFile: String, resultFile: String) extends TestCase
/** A PostgreSQL test case. */
protected case class PgSQLTestCase(
name: String, inputFile: String, resultFile: String) extends TestCase with PgSQLTest
/** A UDF test case. */
protected case class UDFTestCase(
name: String,
inputFile: String,
resultFile: String,
udf: TestUDF) extends TestCase with UDFTest
/** A UDF PostgreSQL test case. */
protected case class UDFPgSQLTestCase(
name: String,
inputFile: String,
resultFile: String,
udf: TestUDF) extends TestCase with UDFTest with PgSQLTest
/** An ANSI-related test case. */
protected case class AnsiTestCase(
name: String, inputFile: String, resultFile: String) extends TestCase with AnsiTest
protected def createScalaTestCase(testCase: TestCase): Unit = {
if (blackList.exists(t =>
testCase.name.toLowerCase(Locale.ROOT).contains(t.toLowerCase(Locale.ROOT)))) {
// Create a test case to ignore this case.
ignore(testCase.name) { /* Do nothing */ }
} else testCase match {
case udfTestCase: UDFTest
if udfTestCase.udf.isInstanceOf[TestPythonUDF] && !shouldTestPythonUDFs =>
ignore(s"${testCase.name} is skipped because " +
s"[$pythonExec] and/or pyspark were not available.") {
/* Do nothing */
}
case udfTestCase: UDFTest
if udfTestCase.udf.isInstanceOf[TestScalarPandasUDF] && !shouldTestScalarPandasUDFs =>
ignore(s"${testCase.name} is skipped because pyspark," +
s"pandas and/or pyarrow were not available in [$pythonExec].") {
/* Do nothing */
}
case _ =>
// Create a test case to run this case.
test(testCase.name) {
runTest(testCase)
}
}
}
/** Run a test case. */
protected def runTest(testCase: TestCase): Unit = {
val input = fileToString(new File(testCase.inputFile))
val (comments, code) = input.split("\n").partition(_.trim.startsWith("--"))
// If `--IMPORT` found, load code from another test case file, then insert them
// into the head in this test.
val importedTestCaseName = comments.filter(_.startsWith("--IMPORT ")).map(_.substring(9))
val importedCode = importedTestCaseName.flatMap { testCaseName =>
listTestCases.find(_.name == testCaseName).map { testCase =>
val input = fileToString(new File(testCase.inputFile))
val (_, code) = input.split("\n").partition(_.trim.startsWith("--"))
code
}
}.flatten
// List of SQL queries to run
// note: this is not a robust way to split queries using semicolon, but works for now.
val queries = (importedCode ++ code).mkString("\n").split("(?<=[^\\\\]);")
.map(_.trim).filter(_ != "").toSeq
// Fix misplacement when comment is at the end of the query.
.map(_.split("\n").filterNot(_.startsWith("--")).mkString("\n")).map(_.trim).filter(_ != "")
val settingLines = comments.filter(_.startsWith("--SET ")).map(_.substring(6))
val settings = settingLines.flatMap(_.split(",").map { kv =>
val (conf, value) = kv.span(_ != '=')
conf.trim -> value.substring(1).trim
})
if (regenerateGoldenFiles) {
runQueries(queries, testCase, settings)
} else {
// A config dimension has multiple config sets, and a config set has multiple configs.
// - config dim: Seq[Seq[(String, String)]]
// - config set: Seq[(String, String)]
// - config: (String, String))
// We need to do cartesian product for all the config dimensions, to get a list of
// config sets, and run the query once for each config set.
val configDimLines = comments.filter(_.startsWith("--CONFIG_DIM")).map(_.substring(12))
val configDims = configDimLines.groupBy(_.takeWhile(_ != ' ')).mapValues { lines =>
lines.map(_.dropWhile(_ != ' ').substring(1)).map(_.split(",").map { kv =>
val (conf, value) = kv.span(_ != '=')
conf.trim -> value.substring(1).trim
}.toSeq).toSeq
}
val configSets = configDims.values.foldLeft(Seq(Seq[(String, String)]())) { (res, dim) =>
dim.flatMap { configSet => res.map(_ ++ configSet) }
}
configSets.foreach { configSet =>
try {
runQueries(queries, testCase, settings ++ configSet)
} catch {
case e: Throwable =>
val configs = configSet.map {
case (k, v) => s"$k=$v"
}
logError(s"Error using configs: ${configs.mkString(",")}")
throw e
}
}
}
}
protected def runQueries(
queries: Seq[String],
testCase: TestCase,
configSet: Seq[(String, String)]): Unit = {
// Create a local SparkSession to have stronger isolation between different test cases.
// This does not isolate catalog changes.
val localSparkSession = spark.newSession()
loadTestData(localSparkSession)
testCase match {
case udfTestCase: UDFTest =>
registerTestUDF(udfTestCase.udf, localSparkSession)
case _ =>
}
testCase match {
case _: PgSQLTest =>
// booleq/boolne used by boolean.sql
localSparkSession.udf.register("booleq", (b1: Boolean, b2: Boolean) => b1 == b2)
localSparkSession.udf.register("boolne", (b1: Boolean, b2: Boolean) => b1 != b2)
// vol used by boolean.sql and case.sql.
localSparkSession.udf.register("vol", (s: String) => s)
localSparkSession.conf.set(SQLConf.ANSI_ENABLED.key, true)
case _: AnsiTest =>
localSparkSession.conf.set(SQLConf.ANSI_ENABLED.key, true)
case _ =>
}
if (configSet.nonEmpty) {
// Execute the list of set operation in order to add the desired configs
val setOperations = configSet.map { case (key, value) => s"set $key=$value" }
logInfo(s"Setting configs: ${setOperations.mkString(", ")}")
setOperations.foreach(localSparkSession.sql)
}
// Run the SQL queries preparing them for comparison.
val outputs: Seq[QueryOutput] = queries.map { sql =>
val (schema, output) = handleExceptions(getNormalizedResult(localSparkSession, sql))
// We might need to do some query canonicalization in the future.
QueryOutput(
sql = sql,
schema = schema,
output = output.mkString("\n").replaceAll("\\s+$", ""))
}
if (regenerateGoldenFiles) {
// Again, we are explicitly not using multi-line string due to stripMargin removing "|".
val goldenOutput = {
s"-- Automatically generated by ${getClass.getSimpleName}\n" +
s"-- Number of queries: ${outputs.size}\n\n\n" +
outputs.zipWithIndex.map{case (qr, i) => qr.toString(i)}.mkString("\n\n\n") + "\n"
}
val resultFile = new File(testCase.resultFile)
val parent = resultFile.getParentFile
if (!parent.exists()) {
assert(parent.mkdirs(), "Could not create directory: " + parent)
}
stringToFile(resultFile, goldenOutput)
}
// This is a temporary workaround for SPARK-28894. The test names are truncated after
// the last dot due to a bug in SBT. This makes easier to debug via Jenkins test result
// report. See SPARK-28894.
// See also SPARK-29127. It is difficult to see the version information in the failed test
// cases so the version information related to Python was also added.
val clue = testCase match {
case udfTestCase: UDFTest
if udfTestCase.udf.isInstanceOf[TestPythonUDF] && shouldTestPythonUDFs =>
s"${testCase.name}${System.lineSeparator()}Python: $pythonVer${System.lineSeparator()}"
case udfTestCase: UDFTest
if udfTestCase.udf.isInstanceOf[TestScalarPandasUDF] && shouldTestScalarPandasUDFs =>
s"${testCase.name}${System.lineSeparator()}" +
s"Python: $pythonVer Pandas: $pandasVer PyArrow: $pyarrowVer${System.lineSeparator()}"
case _ =>
s"${testCase.name}${System.lineSeparator()}"
}
withClue(clue) {
// Read back the golden file.
val expectedOutputs: Seq[QueryOutput] = {
val goldenOutput = fileToString(new File(testCase.resultFile))
val segments = goldenOutput.split("-- !query.+\n")
// each query has 3 segments, plus the header
assert(segments.size == outputs.size * 3 + 1,
s"Expected ${outputs.size * 3 + 1} blocks in result file but got ${segments.size}. " +
s"Try regenerate the result files.")
Seq.tabulate(outputs.size) { i =>
QueryOutput(
sql = segments(i * 3 + 1).trim,
schema = segments(i * 3 + 2).trim,
output = segments(i * 3 + 3).replaceAll("\\s+$", "")
)
}
}
// Compare results.
assertResult(expectedOutputs.size, s"Number of queries should be ${expectedOutputs.size}") {
outputs.size
}
outputs.zip(expectedOutputs).zipWithIndex.foreach { case ((output, expected), i) =>
assertResult(expected.sql, s"SQL query did not match for query #$i\n${expected.sql}") {
output.sql
}
assertResult(expected.schema,
s"Schema did not match for query #$i\n${expected.sql}: $output") {
output.schema
}
assertResult(expected.output.sorted, s"Result did not match" +
s" for query #$i\n${expected.sql}") { output.output.sorted }
}
}
}
/**
* This method handles exceptions occurred during query execution as they may need special care
* to become comparable to the expected output.
*
* @param result a function that returns a pair of schema and output
*/
protected def handleExceptions(result: => (String, Seq[String])): (String, Seq[String]) = {
try {
result
} catch {
case a: AnalysisException =>
// Do not output the logical plan tree which contains expression IDs.
// Also implement a crude way of masking expression IDs in the error message
// with a generic pattern "###".
val msg = if (a.plan.nonEmpty) a.getSimpleMessage else a.getMessage
(emptySchema, Seq(a.getClass.getName, msg.replaceAll("#\\d+", "#x")))
case s: SparkException if s.getCause != null =>
// For a runtime exception, it is hard to match because its message contains
// information of stage, task ID, etc.
// To make result matching simpler, here we match the cause of the exception if it exists.
val cause = s.getCause
(emptySchema, Seq(cause.getClass.getName, cause.getMessage))
case NonFatal(e) =>
// If there is an exception, put the exception class followed by the message.
(emptySchema, Seq(e.getClass.getName, e.getMessage))
}
}
/** Executes a query and returns the result as (schema of the output, normalized output). */
private def getNormalizedResult(session: SparkSession, sql: String): (String, Seq[String]) = {
// Returns true if the plan is supposed to be sorted.
def isSorted(plan: LogicalPlan): Boolean = plan match {
case _: Join | _: Aggregate | _: Generate | _: Sample | _: Distinct => false
case _: DescribeCommandBase
| _: DescribeColumnCommand
| _: DescribeRelation
| _: DescribeColumnStatement => true
case PhysicalOperation(_, _, Sort(_, true, _)) => true
case _ => plan.children.iterator.exists(isSorted)
}
val df = session.sql(sql)
val schema = df.schema.catalogString
// Get answer, but also get rid of the #1234 expression ids that show up in explain plans
val answer = SQLExecution.withNewExecutionId(session, df.queryExecution, Some(sql)) {
hiveResultString(df.queryExecution.executedPlan).map(replaceNotIncludedMsg)
}
// If the output is not pre-sorted, sort it.
if (isSorted(df.queryExecution.analyzed)) (schema, answer) else (schema, answer.sorted)
}
protected def replaceNotIncludedMsg(line: String): String = {
line.replaceAll("#\\d+", "#x")
.replaceAll(
s"Location.*$clsName/",
s"Location $notIncludedMsg/{warehouse_dir}/")
.replaceAll("Created By.*", s"Created By $notIncludedMsg")
.replaceAll("Created Time.*", s"Created Time $notIncludedMsg")
.replaceAll("Last Access.*", s"Last Access $notIncludedMsg")
.replaceAll("Partition Statistics\t\\d+", s"Partition Statistics\t$notIncludedMsg")
.replaceAll("\\*\\(\\d+\\) ", "*") // remove the WholeStageCodegen codegenStageIds
}
protected lazy val listTestCases: Seq[TestCase] = {
listFilesRecursively(new File(inputFilePath)).flatMap { file =>
val resultFile = file.getAbsolutePath.replace(inputFilePath, goldenFilePath) + ".out"
val absPath = file.getAbsolutePath
val testCaseName = absPath.stripPrefix(inputFilePath).stripPrefix(File.separator)
if (file.getAbsolutePath.startsWith(
s"$inputFilePath${File.separator}udf${File.separator}postgreSQL")) {
Seq(TestScalaUDF("udf"), TestPythonUDF("udf"), TestScalarPandasUDF("udf")).map { udf =>
UDFPgSQLTestCase(
s"$testCaseName - ${udf.prettyName}", absPath, resultFile, udf)
}
} else if (file.getAbsolutePath.startsWith(s"$inputFilePath${File.separator}udf")) {
Seq(TestScalaUDF("udf"), TestPythonUDF("udf"), TestScalarPandasUDF("udf")).map { udf =>
UDFTestCase(
s"$testCaseName - ${udf.prettyName}", absPath, resultFile, udf)
}
} else if (file.getAbsolutePath.startsWith(s"$inputFilePath${File.separator}postgreSQL")) {
PgSQLTestCase(testCaseName, absPath, resultFile) :: Nil
} else if (file.getAbsolutePath.startsWith(s"$inputFilePath${File.separator}ansi")) {
AnsiTestCase(testCaseName, absPath, resultFile) :: Nil
} else {
RegularTestCase(testCaseName, absPath, resultFile) :: Nil
}
}
}
/** Returns all the files (not directories) in a directory, recursively. */
protected def listFilesRecursively(path: File): Seq[File] = {
val (dirs, files) = path.listFiles().partition(_.isDirectory)
// Filter out test files with invalid extensions such as temp files created
// by vi (.swp), Mac (.DS_Store) etc.
val filteredFiles = files.filter(_.getName.endsWith(validFileExtensions))
filteredFiles ++ dirs.flatMap(listFilesRecursively)
}
/** Load built-in test tables into the SparkSession. */
private def loadTestData(session: SparkSession): Unit = {
import session.implicits._
(1 to 100).map(i => (i, i.toString)).toDF("key", "value").createOrReplaceTempView("testdata")
((Seq(1, 2, 3), Seq(Seq(1, 2, 3))) :: (Seq(2, 3, 4), Seq(Seq(2, 3, 4))) :: Nil)
.toDF("arraycol", "nestedarraycol")
.createOrReplaceTempView("arraydata")
(Tuple1(Map(1 -> "a1", 2 -> "b1", 3 -> "c1", 4 -> "d1", 5 -> "e1")) ::
Tuple1(Map(1 -> "a2", 2 -> "b2", 3 -> "c2", 4 -> "d2")) ::
Tuple1(Map(1 -> "a3", 2 -> "b3", 3 -> "c3")) ::
Tuple1(Map(1 -> "a4", 2 -> "b4")) ::
Tuple1(Map(1 -> "a5")) :: Nil)
.toDF("mapcol")
.createOrReplaceTempView("mapdata")
session
.read
.format("csv")
.options(Map("delimiter" -> "\t", "header" -> "false"))
.schema("a int, b float")
.load(testFile("test-data/postgresql/agg.data"))
.createOrReplaceTempView("aggtest")
session
.read
.format("csv")
.options(Map("delimiter" -> "\t", "header" -> "false"))
.schema(
"""
|unique1 int,
|unique2 int,
|two int,
|four int,
|ten int,
|twenty int,
|hundred int,
|thousand int,
|twothousand int,
|fivethous int,
|tenthous int,
|odd int,
|even int,
|stringu1 string,
|stringu2 string,
|string4 string
""".stripMargin)
.load(testFile("test-data/postgresql/onek.data"))
.createOrReplaceTempView("onek")
session
.read
.format("csv")
.options(Map("delimiter" -> "\t", "header" -> "false"))
.schema(
"""
|unique1 int,
|unique2 int,
|two int,
|four int,
|ten int,
|twenty int,
|hundred int,
|thousand int,
|twothousand int,
|fivethous int,
|tenthous int,
|odd int,
|even int,
|stringu1 string,
|stringu2 string,
|string4 string
""".stripMargin)
.load(testFile("test-data/postgresql/tenk.data"))
.createOrReplaceTempView("tenk1")
}
private val originalTimeZone = TimeZone.getDefault
private val originalLocale = Locale.getDefault
override def beforeAll(): Unit = {
super.beforeAll()
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
Locale.setDefault(Locale.US)
RuleExecutor.resetMetrics()
}
override def afterAll(): Unit = {
try {
TimeZone.setDefault(originalTimeZone)
Locale.setDefault(originalLocale)
// For debugging dump some statistics about how much time was spent in various optimizer rules
logWarning(RuleExecutor.dumpTimeSpent())
} finally {
super.afterAll()
}
}
}
| ptkool/spark | sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala | Scala | apache-2.0 | 24,755 |
/*
* MIT License
*
* Copyright (c) 2016 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.test.oracle
import com.byteslounge.slickrepo.test.{LongVersionedRepositoryTest, OracleConfig}
class OracleLongVersionedRepositoryTest extends LongVersionedRepositoryTest(OracleConfig.config)
| gonmarques/slick-repo | src/test/scala/com/byteslounge/slickrepo/test/oracle/OracleLongVersionedRepositoryTest.scala | Scala | mit | 1,367 |
package im.mange.acceptance.driveby.scalatest.browser
import im.mange.acceptance.driveby.scalatest.WebSpecification
import im.mange.common.ConditionNotMetException
import im.mange.driveby.Id
import im.mange.driveby.conditions._
import org.scalatest.Matchers
class SelectSpec extends WebSpecification with Matchers {
def `select must select an option` {
val id = Id("select")
given.page(<body><form><select id={id.id}><option>1</option><option>2</option></select></form></body>)
.select(id, "2")
.assert(ValueEquals(id, "2"))
}
def `only select if element can be uniquely identified` {
val id = Id("select")
val b = given.page(<body><form><select id={id.id}/><br/><select id={id.id}/></form></body>)
val thrown = the [ConditionNotMetException] thrownBy { b.select(id, "blah") }
thrown.getMessage should equal("""> FAILED: select 'Id(select)' option 'blah' (not met within 2000 millis)""")
}
def `only select if element is interactable` {
val id = Id("select")
val b = given.page(<body><form><select id={id.id} style="display: none"/></form></body>)
val thrown = the [ConditionNotMetException] thrownBy { b.select(id, "blah") }
thrown.getMessage should equal("""> FAILED: select 'Id(select)' option 'blah' (not met within 2000 millis)""")
}
def `only select if option is present` {
val id = Id("select")
val b = given.page(<body><form><select id={id.id}><option>1</option></select></form></body>)
val thrown = the [ConditionNotMetException] thrownBy { b.select(id, "2") }
thrown.getMessage should equal("""> FAILED: select 'Id(select)' option '2' (not met within 2000 millis)""")
}
} | alltonp/driveby | src/test/scala/im/mange/acceptance/driveby/scalatest/browser/SelectSpec.scala | Scala | apache-2.0 | 1,676 |
package org.jetbrains.plugins.scala.codeInsight.intention.types
import com.intellij.codeInsight.intention.PsiElementBaseIntentionAction
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.codeInsight.intention.IntentionUtil
import org.jetbrains.plugins.scala.lang.psi.TypeAdjuster
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScTypedPattern, ScWildcardPattern}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunctionDefinition, ScPatternDefinition, ScVariableDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.api.{ScTypeText, TypeSystem}
import org.jetbrains.plugins.scala.lang.psi.types.{BaseTypes, ScType, ScTypeExt}
import org.jetbrains.plugins.scala.project.ProjectExt
import org.jetbrains.plugins.scala.util.IntentionAvailabilityChecker
/**
* Author: Svyatoslav Ilinskiy
* Date: 22.12.15.
*/
class MakeTypeMoreSpecificIntention extends PsiElementBaseIntentionAction {
override def invoke(project: Project, editor: Editor, element: PsiElement): Unit = {
implicit val typeSystem = project.typeSystem
ToggleTypeAnnotation.complete(new MakeTypeMoreSpecificStrategy(Option(editor)), element)
}
override def isAvailable(project: Project, editor: Editor, element: PsiElement): Boolean = {
if (element == null || !IntentionAvailabilityChecker.checkIntention(this, element)) false
else {
var isAvailable = false
def text(s: String): Unit = {
setText(s)
isAvailable = true
}
implicit val typeSystem = project.typeSystem
val desc = new StrategyAdapter {
override def removeFromVariable(variable: ScVariableDefinition): Unit = {
for {
declared <- variable.declaredType
expr <- variable.expr
tp <- expr.getType()
if MakeTypeMoreSpecificStrategy.computeBaseTypes(declared, tp).nonEmpty
} text(ScalaBundle.message("make.type.more.specific"))
}
override def removeFromValue(value: ScPatternDefinition): Unit = {
for {
declared <- value.declaredType
expr <- value.expr
tp <- expr.getType()
if MakeTypeMoreSpecificStrategy.computeBaseTypes(declared, tp).nonEmpty
} text(ScalaBundle.message("make.type.more.specific"))
}
override def removeFromFunction(function: ScFunctionDefinition): Unit = {
for {
declared <- function.returnType
expr <- function.body
tp <- expr.getType()
if MakeTypeMoreSpecificStrategy.computeBaseTypes(declared, tp).nonEmpty
} text(ScalaBundle.message("make.type.more.specific.fun"))
}
}
ToggleTypeAnnotation.complete(desc, element)
isAvailable
}
}
override def getFamilyName: String = ScalaBundle.message("make.type.more.specific")
}
class MakeTypeMoreSpecificStrategy(editor: Option[Editor])
(implicit typeSystem: TypeSystem) extends Strategy {
import MakeTypeMoreSpecificStrategy._
def doTemplate(te: ScTypeElement, declaredType: ScType, dynamicType: ScType, context: PsiElement, editor: Editor): Unit = {
val types = computeBaseTypes(declaredType, dynamicType).sortWith((t1, t2) => t1.conforms(t2))
if (types.size == 1) {
val replaced = te.replace(ScalaPsiElementFactory.createTypeElementFromText(types.head.canonicalText, te.getContext, te))
TypeAdjuster.markToAdjust(replaced)
} else {
val texts = types.map(ScTypeText)
val expr = new ChooseTypeTextExpression(texts, ScTypeText(declaredType))
IntentionUtil.startTemplate(te, context, expr, editor)
}
}
override def removeFromFunction(function: ScFunctionDefinition): Unit = {
for {
edit <- editor
te <- function.returnTypeElement
body <- function.body
tp <- body.getType()
declared <- te.getType()
} doTemplate(te, declared, tp, function.getParent, edit)
}
override def removeFromValue(value: ScPatternDefinition): Unit = {
for {
edit <- editor
te <- value.typeElement
body <- value.expr
tp <- body.getType()
declared <- te.getType()
} doTemplate(te, declared, tp, value.getParent, edit)
}
override def removeFromVariable(variable: ScVariableDefinition): Unit = {
for {
edit <- editor
te <- variable.typeElement
body <- variable.expr
tp <- body.getType()
declared <- te.getType()
} doTemplate(te, declared, tp, variable.getParent, edit)
}
override def addToPattern(pattern: ScBindingPattern): Unit = ()
override def addToWildcardPattern(pattern: ScWildcardPattern): Unit = ()
override def addToValue(value: ScPatternDefinition): Unit = ()
override def addToFunction(function: ScFunctionDefinition): Unit = ()
override def removeFromPattern(pattern: ScTypedPattern): Unit = ()
override def addToVariable(variable: ScVariableDefinition): Unit = ()
override def removeFromParameter(param: ScParameter): Unit = ()
override def addToParameter(param: ScParameter): Unit = ()
}
object MakeTypeMoreSpecificStrategy {
def computeBaseTypes(declaredType: ScType, dynamicType: ScType)
(implicit typeSystem: TypeSystem) : Seq[ScType] = {
val baseTypes = dynamicType +: BaseTypes.get(dynamicType)
baseTypes.filter(t => t.conforms(declaredType) && !t.equiv(declaredType))
}
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/intention/types/MakeTypeMoreSpecificIntention.scala | Scala | apache-2.0 | 5,798 |
package objsets
import common._
import TweetReader._
/**
* A class to represent tweets.
*/
class Tweet(val user: String, val text: String, val retweets: Int) {
override def toString: String =
"User: " + user + "\n" +
"Text: " + text + " [" + retweets + "]"
}
/**
* This represents a set of objects of type `Tweet` in the form of a binary search
* tree. Every branch in the tree has two children (two `TweetSet`s). There is an
* invariant which always holds: for every branch `b`, all elements in the left
* subtree are smaller than the tweet at `b`. The eleemnts in the right subtree are
* larger.
*
* Note that the above structure requires us to be able to compare two tweets (we
* need to be able to say which of two tweets is larger, or if they are equal). In
* this implementation, the equality / order of tweets is based on the tweet's text
* (see `def incl`). Hence, a `TweetSet` could not contain two tweets with the same
* text from different users.
*
*
* The advantage of representing sets as binary search trees is that the elements
* of the set can be found quickly. If you want to learn more you can take a look
* at the Wikipedia page [1], but this is not necessary in order to solve this
* assignment.
*
* [1] http://en.wikipedia.org/wiki/Binary_search_tree
*/
abstract class TweetSet {
/**
* This method takes a predicate and returns a subset of all the elements
* in the original set for which the predicate is true.
*
* Question: Can we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def filter(p: Tweet => Boolean): TweetSet = ???
/**
* This is a helper method for `filter` that propagetes the accumulated tweets.
*/
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet
/**
* Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def union(that: TweetSet): TweetSet = ???
/**
* Returns the tweet from this set which has the greatest retweet count.
*
* Calling `mostRetweeted` on an empty set should throw an exception of
* type `java.util.NoSuchElementException`.
*
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def mostRetweeted(): Tweet = ???
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in descending order. In other words, the head of the resulting list should
* have the highest retweet count.
*
* Hint: the method `remove` on TweetSet will be very useful.
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def descendingByRetweet: TweetList = ???
/**
* The following methods are already implemented
*/
/**
* Returns a new `TweetSet` which contains all elements of this set, and the
* the new element `tweet` in case it does not already exist in this set.
*
* If `this.contains(tweet)`, the current set is returned.
*/
def incl(tweet: Tweet): TweetSet
/**
* Returns a new `TweetSet` which excludes `tweet`.
*/
def remove(tweet: Tweet): TweetSet
/**
* Tests if `tweet` exists in this `TweetSet`.
*/
def contains(tweet: Tweet): Boolean
/**
* This method takes a function and applies it to every element in the set.
*/
def foreach(f: Tweet => Unit): Unit
/**
* Checks if the set is empty
*/
def isEmpty(): Boolean
}
class Empty extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = acc
override def filter(p: Tweet => Boolean): TweetSet = new Empty
override def union(that: TweetSet): TweetSet = {
that
}
override def mostRetweeted: Tweet = new Tweet("","",0)
override def descendingByRetweet: TweetList = Nil
override def isEmpty():Boolean = true
/**
* The following methods are already implemented
*/
def contains(tweet: Tweet): Boolean = false
def incl(tweet: Tweet): TweetSet = new NonEmpty(tweet, new Empty, new Empty)
def remove(tweet: Tweet): TweetSet = this
def foreach(f: Tweet => Unit): Unit = ()
}
class NonEmpty(elem: Tweet, left: TweetSet, right: TweetSet) extends TweetSet {
override def filter(p: Tweet => Boolean): TweetSet = filterAcc(p,new Empty)
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = {
var newAcc = acc
if(p(elem)) newAcc = acc.incl(elem)
if(left.isEmpty && right.isEmpty){
newAcc
}else{
if(left.isEmpty){
right.filterAcc(p,newAcc)
}else if(right.isEmpty){
left.filterAcc(p,newAcc)
}else right.filterAcc(p,left.filterAcc(p,newAcc))
}
}
override def isEmpty():Boolean = false
override def union(that: TweetSet): TweetSet = {
((left union right) union that).incl(elem)
//(that.incl(elem)) union (left union right) -> Why does it cause stack overflow?
}
override def mostRetweeted: Tweet = {
def bigger(t1: Tweet, t2: Tweet, t3: Tweet): Tweet = {
def x1 = t1.retweets
def x2 = t2.retweets
def x3 = t3.retweets
if(x1 >= x2 && x1>=x3)t1
else if(x2 >= x1 && x2>=x3)t2
else t3
}
bigger(elem,left.mostRetweeted,right.mostRetweeted)
}
override def descendingByRetweet: TweetList = {
def createList(list: TweetList, tree: TweetSet):TweetList ={
val mostRet = tree.mostRetweeted
new Cons(mostRet,remove(mostRet).descendingByRetweet)
}
/*def createList(list: TweetList, tree: TweetSet):TweetList ={
val mostRet = tree.mostRetweeted
val cuttedTree = tree.remove(mostRet)
val newList = new Cons(mostRet,list)
createList(new Cons(mostRet,list),tree.remove(mostRet))
}*/
createList(Nil,this)
}
/**
* The following methods are already implemented
*/
def contains(x: Tweet): Boolean =
if (x.text < elem.text) left.contains(x)
else if (elem.text < x.text) right.contains(x)
else true
def incl(x: Tweet): TweetSet = {
if (x.text < elem.text) new NonEmpty(elem, left.incl(x), right)
else if (elem.text < x.text) new NonEmpty(elem, left, right.incl(x))
else this
}
def remove(tw: Tweet): TweetSet =
if (tw.text < elem.text) new NonEmpty(elem, left.remove(tw), right)
else if (elem.text < tw.text) new NonEmpty(elem, left, right.remove(tw))
else left.union(right)
def foreach(f: Tweet => Unit): Unit = {
f(elem)
left.foreach(f)
right.foreach(f)
}
}
trait TweetList {
def head: Tweet
def tail: TweetList
def isEmpty: Boolean
def foreach(f: Tweet => Unit): Unit =
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
object Nil extends TweetList {
def head = throw new java.util.NoSuchElementException("head of EmptyList")
def tail = throw new java.util.NoSuchElementException("tail of EmptyList")
def isEmpty = true
}
class Cons(val head: Tweet, val tail: TweetList) extends TweetList {
def isEmpty = false
}
object GoogleVsApple {
val google = List("android", "Android", "galaxy", "Galaxy", "nexus", "Nexus")
val apple = List("ios", "iOS", "iphone", "iPhone", "ipad", "iPad")
lazy val googleTweets: TweetSet = TweetReader.allTweets.filter(tweet => google.exists(word => tweet.text.contains(word)))
lazy val appleTweets: TweetSet = TweetReader.allTweets.filter(tweet => apple.exists(word => tweet.text.contains(word)))
/**
* A list of all tweets mentioning a keyword from either apple or google,
* sorted by the number of retweets.
*/
lazy val trending: TweetList = googleTweets.union(appleTweets).descendingByRetweet
}
object Main extends App {
// Print the trending tweets
GoogleVsApple.trending foreach println
}
| JoaoGFarias/MOOCs-Assigments | Coursera - Functional Programming Principles in Scala/Week 3/src/main/scala/objsets/TweetSet.scala | Scala | mit | 7,975 |
package rest
case class TsConnectionDetail(connectionType: String,
connectionId: String,
//properties,
companyName: String,
country: String,
identifiers: List[String],
//addresslines,
dispatchChannelId: String,
email: String) {
} | anderssonfilip/tradeshift-external-api-scala | container/src/main/scala/rest/TsConnectionDetail.scala | Scala | mit | 470 |
package sorm.test.types
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.ShouldMatchers
import sorm._
import sorm.test.MultiInstanceSuite
@RunWith(classOf[JUnitRunner])
class SeqOfSeqsSupportSuite extends FunSuite with ShouldMatchers with MultiInstanceSuite {
import SeqOfSeqsSupportSuite._
def entities = Set() + Entity[A]()
instancesAndIds foreach { case (db, dbId) =>
val a1 = db.save(A( Seq() ))
val a2 = db.save(A( Seq( Seq(2, 3), Seq(), Seq(7) ) ))
val a3 = db.save(A( Seq( Seq() ) ))
val a4 = db.save(A( Seq( Seq(78) ) ))
val a5 = db.save(A( Seq() ))
test(dbId + " - Empty Seq matches empty Seq and not Seq of empty Seq"){
db.query[A]
.whereEqual("a", Seq())
.fetch()
.should(
contain (a1) and
contain (a5) and
not contain (a3)
)
}
test(dbId + " - An empty item Seq does not match inexistent one"){
db.query[A]
.whereEqual("a.item", Seq())
.fetch()
.should(
not contain (a1) and
not contain (a5)
)
}
test(dbId + " - A partially matching seq with container seq containing other seq of same size"){
db.query[A]
.whereEqual("a.item", Seq(2))
.fetch()
.should( not contain (a2) )
}
}
}
object SeqOfSeqsSupportSuite {
case class A ( a : Seq[Seq[Int]] ) extends Persistable
} | cllu/sorm2 | src/test/scala/sorm/test/types/SeqOfSeqsSupportSuite.scala | Scala | mit | 1,474 |
import numbers.finite.Complex
import numbers.finite.PolarComplex
import numbers.finite.RectComplex
import sounder.Sounder._
import sounder.Util._
import scala.math.sin
import scala.math.sqrt
import scala.math.min
import scala.math.max
import scala.math.floor
import scala.math.round
import scala.math.ceil
import scala.math.pow
import scala.math.Pi
//returns the square of a number
def sq(x : Double) = x*x
print("Plug in the active band pass filter and press ENTER")
System.in.read
println("Playing a sequence of tones and recording input")
val Fs = 44100
val P = 1.0/Fs //sample period
def f(k : Int) = round(110.0*pow(2.0,k/2.0))
val ks = 0 to 12
val discard = 10000 //the number of samples we will discard to account for distortion
val L = 8820 //the number of sample we will take out (corresponds with 200ms of signal)
val j = RectComplex(0,1) //the square root of -1
//for each k = 0,..,21 compute estimated spectrum and return the values
//into the list spectrumestimates
val Q : Seq[Complex] = ks.map { k =>
print(f(k) + "Hz ")
def xtrue(t : Double) = sin(2*Pi*f(k)*t)
val (ys, xs) = playRecord(xtrue, 0, 1.0, Fs) //play for 1 second and record
if(min(xs.length, ys.length) < (discard+L)) throw new java.lang.ArrayIndexOutOfBoundsException("Number of samples recorded isn't enough for some reason")
//chop off first discard=10000 samples to avoid distortion when the soundcard
//starts up and take the following L=8820
val x = xs.slice(discard, discard+L)
val y = ys.slice(discard, discard+L)
val d = 2*Pi*f(k)*P
val C = PolarComplex(1,-d*(L+1))*sin(d*L)/sin(d)/L
val A = x.indices.foldLeft(Complex.zero)( (s,ell) => s + PolarComplex(1,-d*ell)*x(ell) )
val B = y.indices.foldLeft(Complex.zero)( (s,ell) => s + PolarComplex(1,-d*ell)*y(ell) )
val Qk = (B - B.conjugate*C)/(A -A.conjugate*C)
Qk //return estimate of the spectrum
}
//circuit parameters
val R1 = 3300
val C1 = 100e-9
val R2 = 15000
val C2 = 10e-9
val a = -R2*C1
val b = R2*C2 + R1*C1
val c = C1*R1*C2*R2
//hypothesised transfer function
def lambda(s : Complex) = s*a/(s*s*c + s*b + 1)
//hypothesised spectrum
def Lambda(f : Double) = lambda(j*2*Pi*f)
//write output to files
{
println("Writing hypothesised spectrum to file hypothesised.csv")
val fmin = 0.001
val fmax = f(ks.max) + 500.0
val file = new java.io.FileWriter("hypothesised.csv")
for(f <- fmin to fmax by 20.0) { //fairly high resolution plot of spectrum
file.write(f.toString.replace('E', 'e') + "\t" +
Lambda(f).magnitude.toString.replace('E', 'e') + "\t" +
mod2pi(Lambda(f).angle).toString.replace('E', 'e')+ "\n")
}
file.close
}
{
println("Writing measured spectrum to file measured.csv")
val file = new java.io.FileWriter("measured.csv")
for(k <- ks) {
file.write(f(k).toString.replace('E', 'e') + "\t" +
Q(k).magnitude.toString.replace('E', 'e') + "\t" +
mod2pi(Q(k).angle).toString.replace('E', 'e') + "\n")
}
file.close
}
println("Scala finished")
| robbymckilliam/testablelinearsystems | tests/activebandpass/spectrumtest.scala | Scala | agpl-3.0 | 3,000 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.onnx
import scala.reflect.ClassTag
import com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.Table
/**
* Reshape the input tensor similar to numpy.reshape.
* First input is the data tensor, second input is a shape tensor which specifies the output shape.
* It outputs the reshaped tensor.
* @param `classTag$T`
* @param ev
* @tparam T The numeric type in this module parameters.
*/
class Reshape[T: ClassTag](var shape: Array[Int] = null)(implicit ev: TensorNumeric[T])
extends AbstractModule[Activity, Tensor[T], T] {
override def updateOutput(input: Activity): Tensor[T] = {
var dataTensor: Tensor[T] = null
if (input.isTable) {
val inputTable = input.toTable
require(inputTable.length() == 2)
dataTensor = inputTable.get[Tensor[T]](1).get
shape = inputTable.get[Tensor[T]](2).get.squeeze().toArray().map(ev.toType[Int])
} else if (input.isTensor) {
dataTensor = input.toTensor[T]
} else {
throw new IllegalArgumentException()
}
require(shape != null, "shape should not be null")
val innerReshaper = nn.Reshape(shape, batchMode = Option(false))
output = innerReshaper.forward(dataTensor)
output
}
override def updateGradInput(input: Activity, gradOutput: Tensor[T]): Activity = {
val inputTensor = if (input.isTable) {
input.toTable.get[Tensor[T]](1).get
} else if (input.isTensor) {
input.toTensor[T]
} else {
throw new IllegalArgumentException()
}
gradInput = inputTensor.zero()
gradInput
}
}
object Reshape {
def apply[T: ClassTag](shape: Array[Int] = null)
(implicit ev: TensorNumeric[T]): Reshape[T] = {
new Reshape[T](shape)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/onnx/Reshape.scala | Scala | apache-2.0 | 2,577 |
See full implementation in JSON.scala
| ud3sh/coursework | functional-programming-in-scala-textbook/answerkey/parsing/09.answer.scala | Scala | unlicense | 38 |
/*
* Copyright 2011 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.scrooge.frontend
import com.twitter.scrooge.ast._
import scala.collection.mutable.ArrayBuffer
import scala.util.parsing.input.{NoPosition, Positional}
class PositionalException(message: String, node: Positional)
extends Exception(s"$message\\n${node.pos.longString}")
case class TypeNotFoundException(name: String, node: Positional) extends PositionalException(name, node)
case class UndefinedConstantException(name: String, node: Positional) extends PositionalException(name, node)
case class UndefinedSymbolException(name: String, node: Positional) extends PositionalException(name, node)
case class TypeMismatchException(name: String, node: Positional) extends PositionalException(name, node)
case class QualifierNotFoundException(name: String, node: Positional) extends PositionalException(name, node)
case class ResolvedDocument(document: Document, resolver: TypeResolver) {
/**
* Given an ID, produce its FQN (e.g. a Java FQN) by appending the namespace.
*/
def qualifySimpleID(
sid: SimpleID,
language: String,
defaultNamespace: String,
fallbackToJavaNamespace: Boolean = true
): Identifier = {
val fallback = if (fallbackToJavaNamespace) document.namespace("java") else None
val namespace = document.namespace(language).orElse(fallback).getOrElse(SimpleID(defaultNamespace))
sid.addScope(namespace)
}
/**
* Given a type, produce its FQN (e.g. a Java FQN) by appending the namespace.
*/
def qualifyName(
name: NamedType,
language: String,
defaultNamespace: String
): Identifier = {
name.scopePrefix match {
case Some(filename) =>
resolver.includeMap(filename.name).qualifySimpleID(name.sid, language, defaultNamespace)
case None =>
qualifySimpleID(name.sid, language, defaultNamespace)
}
}
/**
* Collect the chain of services extended by the given service.
* Returns pairs (resolvedDoc, service) -- resolvedDoc contains service
* and should be used to qualify types used by the service.
*/
def collectParentServices(service: Service): Seq[(ResolvedDocument, Service)] = {
service.parent match {
case None => Nil
case Some(ServiceParent(sid, None)) =>
val parentService = resolver.resolveService(sid)
(this, parentService) +: collectParentServices(parentService)
case Some(ServiceParent(sid, Some(filename))) =>
val doc: ResolvedDocument = resolver.includeMap(filename.name)
val parentService = doc.resolver.resolveService(sid)
(doc, parentService) +: doc.collectParentServices(parentService)
}
}
/**
* Collect and resolve services extended by the given service.
* @return a list of [[ResolvedService ResolvedServices]] that contain FQNs for the parent services.
*/
def resolveParentServices(
service: Service,
namespaceLanguage: String,
defaultNamespace: String
): Seq[ResolvedService] = {
val resolvedServices: Seq[(ResolvedDocument, Service)] = collectParentServices(service)
resolvedServices.map { case (rdoc, svc) =>
ResolvedService(
rdoc.qualifySimpleID(svc.sid.toTitleCase, namespaceLanguage, defaultNamespace),
svc
)
}
}
}
case class ResolvedService(serviceID: Identifier, service: Service)
case class ResolvedDefinition(definition: Definition, resolver: TypeResolver)
case class TypeResolver(
typeMap: Map[String, FieldType] = Map.empty,
constMap: Map[String, ConstDefinition] = Map.empty,
serviceMap: Map[String, Service] = Map.empty,
includeMap: Map[String, ResolvedDocument] = Map.empty) {
protected def getResolver(includePath: String, pos: Positional = new Positional { pos = NoPosition }): TypeResolver = {
includeMap.get(includePath).getOrElse(throw new QualifierNotFoundException(includePath, pos)).resolver
}
def resolveFieldType(id: Identifier): FieldType = id match {
case SimpleID(name, _) => typeMap.get(name).getOrElse(throw new TypeNotFoundException(name, id))
case qid: QualifiedID => getResolver(qid.names.head, qid).resolveFieldType(qid.tail)
}
def resolveServiceParent(parent: ServiceParent): Service =
parent.filename match {
case None => resolveService(parent.sid)
case Some(filename) => getResolver(filename.name).resolveService(parent.sid)
}
def resolveService(sid: SimpleID): Service = serviceMap.get(sid.name).getOrElse(
throw new UndefinedSymbolException(sid.name, sid))
def resolveConst(id: Identifier): (FieldType, RHS) = id match {
case SimpleID(name, _) =>
val const = constMap.get(name).getOrElse(throw new UndefinedConstantException(name, id))
(const.fieldType, const.value)
case qid: QualifiedID => getResolver(qid.names.head).resolveConst(qid.tail)
}
/**
* Returns a new TypeResolver with the given include mapping added.
*/
def withInclude(inc: Include): TypeResolver = {
val resolver = TypeResolver()
val resolvedDocument = resolver(inc.document, Some(inc.prefix))
copy(includeMap = includeMap + (inc.prefix.name -> resolvedDocument))
}
/**
* Returns a new TypeResolver with the given type mapping added.
*/
def withType(name: String, fieldType: FieldType): TypeResolver = {
copy(typeMap = typeMap + (name -> fieldType))
}
/**
* Returns a new TypeResolver with the given constant added.
*/
def withConst(const: ConstDefinition): TypeResolver = {
copy(constMap = constMap + (const.sid.name -> const))
}
/**
* Returns a new TypeResolver with the given service added.
*/
def withService(service: Service): TypeResolver = {
copy(serviceMap = serviceMap + (service.sid.name -> service))
}
/**
* Resolves all types in the given document.
* @param scopePrefix the scope of the document if the document is an include
*/
def apply(doc: Document, scopePrefix: Option[SimpleID] = None): ResolvedDocument = {
var resolver = this
val includes = doc.headers.collect { case i: Include => i }
val defBuf = new ArrayBuffer[Definition](doc.defs.size)
for (i <- includes) {
try {
resolver = resolver.withInclude(i)
} catch {
case ex: Throwable =>
throw new FileParseException(filename = i.filePath, cause = ex)
}
}
for (d <- doc.defs) {
val ResolvedDefinition(d2, r2) = resolver(d, scopePrefix)
resolver = r2
defBuf += d2
}
ResolvedDocument(doc.copy(defs = defBuf.toSeq), resolver)
}
/**
* Resolves types in the given definition according to the current
* typeMap, and then returns an updated TypeResolver with the new
* definition bound, plus the resolved definition.
*/
def apply(definition: Definition, scopePrefix: Option[SimpleID]): ResolvedDefinition = {
definition match {
case d @ Typedef(sid, t, _) =>
val resolved = apply(t)
ResolvedDefinition(
d.copy(fieldType = resolved),
withType(sid.name, resolved))
case s @ Struct(sid, _, fs, _, _) =>
val resolved = s.copy(fields = fs.map(apply))
ResolvedDefinition(
resolved,
withType(sid.name, StructType(resolved, scopePrefix)))
case u @ Union(sid, _, fs, _, _) =>
val resolved = u.copy(fields = fs.map(apply))
ResolvedDefinition(
resolved,
withType(sid.name, StructType(resolved, scopePrefix)))
case e @ Exception_(sid, _, fs, _) =>
val resolved = e.copy(fields = fs.map(apply))
ResolvedDefinition(
resolved,
withType(sid.name, StructType(resolved, scopePrefix)))
case c @ ConstDefinition(_, t, v, _) =>
val fieldType = apply(t)
val resolved = c.copy(fieldType = fieldType, value = apply(v, fieldType))
ResolvedDefinition(resolved, withConst(resolved))
case s @ Service(sid, parent, fs, _) =>
// No need to modify Service, but check that we can resolve parent.
parent.foreach { serviceParent => resolveServiceParent(serviceParent) }
val resolved = s.copy(functions = fs.map(apply))
ResolvedDefinition(resolved, withService(resolved))
case e @ Enum(sid, _, _, _) =>
ResolvedDefinition(e, withType(sid.name, EnumType(e, scopePrefix)))
case s @ Senum(sid, _) =>
ResolvedDefinition(s, withType(sid.name, TString))
case d: EnumField => ResolvedDefinition(d, this)
case d: FunctionArgs => ResolvedDefinition(d, this)
case d: FunctionResult => ResolvedDefinition(d, this)
}
}
def apply(f: Function): Function = f match {
case Function(_, _, t, as, ts, _) =>
f.copy(funcType = apply(t), args = as.map(apply), throws = ts.map(apply))
}
def apply(f: Field): Field = {
val fieldType = apply(f.fieldType)
f.copy(
fieldType = fieldType,
default = f.default.map { const => apply(const, fieldType) })
}
def apply(t: FunctionType): FunctionType = t match {
case Void => Void
case OnewayVoid => OnewayVoid
case t: FieldType => apply(t)
}
def apply(t: FieldType): FieldType = t match {
case ReferenceType(id) => resolveFieldType(id)
case m @ MapType(k, v, _) => m.copy(keyType = apply(k), valueType = apply(v))
case s @ SetType(e, _) => s.copy(eltType = apply(e))
case l @ ListType(e, _) => l.copy(eltType = apply(e))
case b: BaseType => b
case e: EnumType => e
case s: StructType => s
}
def apply(c: RHS, fieldType: FieldType): RHS = c match {
// list values and map values look the same in Thrift, but different in Java and Scala
// So we need type information in order to generated correct code.
case l @ ListRHS(elems) =>
fieldType match {
case ListType(eltType, _) => l.copy(elems = elems.map(e => apply(e, eltType)))
case SetType(eltType, _) => SetRHS(elems.map(e => apply(e, eltType)).toSet)
case _ => throw new TypeMismatchException("Expecting " + fieldType + ", found " + l, c)
}
case m @ MapRHS(elems) =>
fieldType match {
case MapType(keyType, valType, _) =>
m.copy(elems = elems.map { case (k, v) => (apply(k, keyType), apply(v, valType)) })
case st @ StructType(structLike: StructLike, _) =>
val fieldMultiMap: Map[String, Seq[(String, RHS)]] = elems.collect {
case (StringLiteral(fieldName), value) => (fieldName, value)
}.groupBy { case (fieldName, _) => fieldName }
val fieldMap: Map[String, RHS] = fieldMultiMap.collect {
case (fieldName: String, values: Seq[(String, RHS)]) if values.length == 1 =>
values.head
case (fieldName: String, _: Seq[(String, RHS)]) =>
throw new TypeMismatchException(s"Duplicate default values for ${fieldName} found for $fieldType", m)
// Can't have 0 elements here because fieldMultiMap is built by groupBy.
}
structLike match {
case u: Union =>
val definedFields = u.fields.collect {
case field if fieldMap.contains(field.sid.name) =>
(field, fieldMap(field.sid.name))
}
if (definedFields.length == 0)
throw new UndefinedConstantException(s"Constant value missing for union ${u.originalName}", m)
if (definedFields.length > 1)
throw new UndefinedConstantException(s"Multiple constant values for union ${u.originalName}", m)
val (field, rhs) = definedFields.head
val resolvedRhs = apply(rhs, field.fieldType)
UnionRHS(sid = st.sid, field = field, initializer = resolvedRhs)
case struct: StructLike =>
val structMap = Map.newBuilder[Field, RHS]
struct.fields.foreach { field =>
val fieldName = field.sid.name
if (fieldMap.contains(fieldName)) {
val resolvedRhs = apply(fieldMap(fieldName), field.fieldType)
structMap += field -> resolvedRhs
} else if (!field.requiredness.isOptional && field.default.isEmpty) {
throw new TypeMismatchException(s"Value required for ${fieldName} in $fieldType", m)
}
}
StructRHS(sid = st.sid, elems = structMap.result())
}
case _ => throw new TypeMismatchException("Expecting " + fieldType + ", found " + m, m)
}
case i @ IdRHS(id) => {
val (constFieldType, constRHS) = id match {
case sid: SimpleID =>
// When the rhs value is a simpleID, it can only be a constant
// defined in the same file
resolveConst(sid)
case qid @ QualifiedID(names) =>
fieldType match {
case EnumType(enum, _) =>
val resolvedFieldType = resolveFieldType(qid.qualifier)
val value = enum.values.find(_.sid.name == names.last).getOrElse(
throw new UndefinedSymbolException(qid.fullName, qid))
(resolvedFieldType, EnumRHS(enum, value))
case t => resolveConst(qid)
}
}
if (constFieldType != fieldType)
throw new TypeMismatchException(
s"Type mismatch: Expecting $fieldType, found ${id.fullName}: $constFieldType",
id
)
constRHS
}
case _ => c
}
}
| benhoyt/scrooge | scrooge-generator/src/main/scala/com/twitter/scrooge/frontend/TypeResolver.scala | Scala | apache-2.0 | 13,955 |
package archery
import org.scalacheck.Arbitrary._
import org.scalatest._
import prop._
import Check._
class GeomCheck extends PropSpec with Matchers with GeneratorDrivenPropertyChecks {
property("point invariants") {
forAll { (p: Point, g: Geom) =>
p.x2 shouldBe p.x
p.y2 shouldBe p.y
p.height shouldBe 0F
p.width shouldBe 0F
p.area shouldBe 0F
p.wraps(g) shouldBe false
}
}
property("geom isFinite") {
forAll { (x: Float, y: Float, x2: Float, y2: Float) =>
def ok(n: Float): Boolean = !(n.isNaN || n.isInfinite)
Point(x, y).isFinite shouldBe ok(x) && ok(y)
Box(x, y, x2, y2).isFinite shouldBe ok(x) && ok(y) && ok(x2) && ok(y2)
}
}
property("geom height") {
forAll { (g: Geom) =>
g.height should be >= 0F
g.height shouldBe (g.y2 - g.y)
}
}
property("geom width") {
forAll { (g: Geom) =>
g.width should be >= 0F
g.width shouldBe (g.x2 - g.x)
}
}
property("geom area") {
forAll { (g: Geom) =>
g.area should be >= 0F
g.area shouldBe g.width * g.height
}
}
property("geom edges") {
forAll { (g: Geom) =>
g.lowerLeft shouldBe Point(g.x, g.y)
g.upperRight shouldBe Point(g.x2, g.y2)
}
}
}
| non/archery | core/src/test/scala/archery/GeomCheck.scala | Scala | mit | 1,267 |
package chana
import chana.avpath.Evaluator.Ctx
import org.apache.avro.generic.IndexedRecord
import scala.util.Failure
import scala.util.Success
import scala.util.Try
package object avpath {
def select(data: IndexedRecord, path: String): Try[List[Ctx]] = select(new Parser())(data, path)
def select(parser: Parser)(data: IndexedRecord, path: String): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.select(data, ast)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
def update(data: IndexedRecord, path: String, value: Any): Try[List[Ctx]] = update(new Parser())(data, path, value)
def update(parser: Parser)(data: IndexedRecord, path: String, value: Any): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.update(data, ast, value)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
def updateJson(data: IndexedRecord, path: String, value: String): Try[List[Ctx]] = updateJson(new Parser())(data, path, value)
def updateJson(parser: Parser)(data: IndexedRecord, path: String, value: String): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.updateJson(data, ast, value)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
/**
* Applied on array/map only
*/
def insert(data: IndexedRecord, path: String, value: Any): Try[List[Ctx]] = insert(new Parser())(data, path, value)
def insert(parser: Parser)(data: IndexedRecord, path: String, value: Any): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.insert(data, ast, value)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
/**
* Applied on array/map only
*/
def insertJson(data: IndexedRecord, path: String, value: String): Try[List[Ctx]] = insertJson(new Parser())(data, path, value)
def insertJson(parser: Parser)(data: IndexedRecord, path: String, value: String): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.insertJson(data, ast, value)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
/**
* Applied on array/map only
*/
def insertAll(data: IndexedRecord, path: String, values: java.util.Collection[_]): Try[List[Ctx]] = insertAll(new Parser())(data, path, values)
def insertAll(parser: Parser)(data: IndexedRecord, path: String, values: java.util.Collection[_]): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.insertAll(data, ast, values)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
/**
* Applied on array/map only
*/
def insertAllJson(data: IndexedRecord, path: String, value: String): Try[List[Ctx]] = insertAllJson(new Parser())(data, path, value)
def insertAllJson(parser: Parser)(data: IndexedRecord, path: String, value: String): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.insertAllJson(data, ast, value)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
/**
* Applied on array/map elements only
*/
def delete(data: IndexedRecord, path: String): Try[List[Ctx]] = delete(new Parser())(data, path)
def delete(parser: Parser)(data: IndexedRecord, path: String): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.delete(data, ast)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
/**
* Applied on array/map only
*/
def clear(data: IndexedRecord, path: String): Try[List[Ctx]] = clear(new Parser())(data, path)
def clear(parser: Parser)(data: IndexedRecord, path: String): Try[List[Ctx]] = {
try {
val ast = parser.parse(path)
val ctxs = Evaluator.clear(data, ast)
Success(ctxs)
} catch {
case ex: Throwable => Failure(ex)
}
}
}
| matthewtt/chana | avpath/src/main/scala/chana/avpath/package.scala | Scala | apache-2.0 | 4,043 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression
import org.json4s.{DefaultFormats, JValue}
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.rdd.RDD
@Since("0.8.0")
@Experimental
trait RegressionModel extends Serializable {
/**
* Predict values for the given data set using the model trained.
*
* @param testData RDD representing data points to be predicted
* @return RDD[Double] where each entry contains the corresponding prediction
*
*/
@Since("1.0.0")
def predict(testData: RDD[Vector]): RDD[Double]
/**
* Predict values for a single data point using the model trained.
*
* @param testData array representing a single data point
* @return Double prediction from the trained model
*
*/
@Since("1.0.0")
def predict(testData: Vector): Double
/**
* Predict values for examples stored in a JavaRDD.
* @param testData JavaRDD representing data points to be predicted
* @return a JavaRDD[java.lang.Double] where each entry contains the corresponding prediction
*
*/
@Since("1.0.0")
def predict(testData: JavaRDD[Vector]): JavaRDD[java.lang.Double] =
predict(testData.rdd).toJavaRDD().asInstanceOf[JavaRDD[java.lang.Double]]
}
private[mllib] object RegressionModel {
/**
* Helper method for loading GLM regression model metadata.
* @return numFeatures
*/
def getNumFeatures(metadata: JValue): Int = {
implicit val formats = DefaultFormats
(metadata \\ "numFeatures").extract[Int]
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala | Scala | apache-2.0 | 2,394 |
package hash.tree
/**
* @author Simon Dirmeier { @literal [email protected]}
*/
class Leaf[T](obj: T) extends INode[T] {
def getObject: T = {
obj
}
}
| dirmeier/algorithms-and-datastructures | hash-tree/src/hash/tree/Leaf.scala | Scala | gpl-3.0 | 168 |
package utils
import utils.MyStream._
object worksheet {;import org.scalaide.worksheet.runtime.library.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(82); val res$0 =
range(1, 100, 10);System.out.println("""res0: utils.MyStream.AbstractLazyList = """ + $show(res$0))}
}
| julian-lanfranco/funcional-fcyt | lazyEvaluation/.worksheet/src/utils.worksheet.scala | Scala | gpl-2.0 | 310 |
package services
import javax.inject.{Inject, Singleton}
import play.api.Play
import play.api.Play.current
import play.api.libs.json.JsValue
import play.api.libs.ws.WSClient
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}
@Singleton
class LinkedinService @Inject()(val ws: WSClient) {
val linkedinClientId = Play.configuration.getString("linkedin.client.id").get
val linkedinClientSecret = Play.configuration.getString("linkedin.client.secret").get
val linkedinRequestedPermissions = Play.configuration.getString("linkedin.requestedPermissions").get
val linkedinState = GlobalConfig.applicationSecret
val linkedinAuthUri = Play.configuration.getString("linkedin.authUri").get
val linkedinRedirectUriSignIn = Play.configuration.getString("linkedin.redirectUri.signIn").get
val linkedinRedirectUriOrderStepAssessmentInfo = Play.configuration.getString("linkedin.redirectUri.order.assessmentInfo").get
val linkedinRedirectUriOrderStepAccountCreation = Play.configuration.getString("linkedin.redirectUri.order.accountCreation").get
val linkedinAccessTokenUri = Play.configuration.getString("linkedin.accessTokenUri").get
val linkedinProfileDataUri = Play.configuration.getString("linkedin.profileDataUri").get
var authCode: Option[String] = None
var accessToken: Option[String] = None
def getAuthCodeRequestUrl(redirectUri: String): String = {
linkedinAuthUri +
"""?response_type=code""" +
"""&client_id=""" + linkedinClientId +
"""&redirect_uri=""" + redirectUri +
"""&state=""" + linkedinState +
"""&scope=""" + linkedinRequestedPermissions
}
def requestAccessToken(redirectUri: String) {
val wsCallParams = Map(
"grant_type" -> Seq("authorization_code"),
"code" -> Seq(authCode.get),
"redirect_uri" -> Seq(redirectUri),
"client_id" -> Seq(linkedinClientId),
"client_secret" -> Seq(linkedinClientSecret))
val futureAccessToken: Future[String] = ws.url(linkedinAccessTokenUri)
.withHeaders("Content-Type" -> "application/x-www-form-urlencoded")
.post(wsCallParams)
.map { response => (response.json \ "access_token").as[String]}
val accessTokenResult: Try[String] = Await.ready(futureAccessToken, Duration.Inf).value.get
accessTokenResult match {
case Failure(e) => throw e
case Success(token) => accessToken = Some(token)
}
}
def getProfile: JsValue = {
accessToken match {
case None => throw new Exception("Cannot get profile without access token first")
case Some(token) =>
val futureProfile: Future[JsValue] = ws.url(linkedinProfileDataUri)
.withHeaders("Connection" -> "Keep-Alive")
.withHeaders("Authorization" -> ("Bearer " + token))
.withHeaders("Accept-Language" -> "sv-SE, en-US")
.get()
.map { response => response.json}
val profileResult: Try[JsValue] = Await.ready(futureProfile, Duration.Inf).value.get
profileResult match {
case Failure(e) => throw e
case Success(profileJson) => profileJson
}
}
}
def invalidateAccessToken() {
accessToken = None
}
}
| PanzerKunst/redesigned-cruited.com-frontend | website/app/services/LinkedinService.scala | Scala | gpl-3.0 | 3,291 |
package com.idyria.osi.vui.implementation.javafx.factories
import javafx.scene.Group
import javafx.scene.Node
import javafx.scene.Parent
import javafx.scene.Scene
import javafx.scene.layout.Pane
import javafx.scene.layout.StackPane
import javafx.stage.Stage
import com.idyria.osi.vui.core.definitions.VUIFrame
import com.idyria.osi.vui.core.definitions.VUISGNode
import com.idyria.osi.vui.core.utils.UtilsTrait
import com.idyria.osi.vui.implementation.javafx.JavaFXUtilsTrait
import javafx.event.EventHandler
import javafx.stage.WindowEvent
trait JFXFrameFactory extends com.idyria.osi.vui.core.definitions.VUIFrameFactory[Node] with JavaFXUtilsTrait {
// Class Fields
//---------------------
// Methods
//------------------
override def createFrame: com.idyria.osi.vui.core.definitions.VUIFrame[Node, VUIFrame[Node, _]] = {
new Stage() with VUIFrame[Stage, VUIFrame[Stage, _]] {
// Init
//---------------
//-- Create a Default Scene with Group
//var topGroup = new Group
this.setScene(new Scene(new Group))
//-- Per default close, don't hide
// Members declared in com.idyria.osi.vui.core.components.scenegraph.SGGroup
//-------------------
/**
* Override Node Method to add children to Top Group
*/
this.onMatch("child.added") {
// Adding A group in the scene replaces the top group
/*case g: VUISGNode[_,_] =>
println("Adding Group: " + g.base)
this.getScene().setRoot(g.base.asInstanceOf[javafx.scene.Parent])*/
// Adding nodes only addes to the top node
case n: VUISGNode[_, _] =>
// Create Pane to welcome new node
n.base match {
case node: Pane => this.getScene().setRoot(node)
case node =>
println(s"Adding simple node")
var p = new StackPane
this.getScene().setRoot(p)
p.getChildren.add(n.base.asInstanceOf[Node])
}
// this.getScene().getRoot().asInstanceOf[Group].getChildren().add()
}
/**
* Remove scene content by setting an empty group
*/
override def clear: Unit = {
this.sceneProperty().get() match {
case null =>
case scene => scene.getRoot() match {
case g: Group => g.getChildren().clear()
case _ =>
}
}
super.clear
}
/**
* Does nothing
*/
/* def children: Seq[com.idyria.osi.vui.core.components.scenegraph.SGNode[javafx.stage.Stage]] = {
Nil
}
/**
* Does nothing
*/
override def removeChild(c: com.idyria.osi.vui.core.components.scenegraph.SGNode[Stage]): Unit = {
}*/
// Members declared in com.idyria.osi.vui.core.components.scenegraph.SGNode
//-------------------
def base: javafx.stage.Stage = this
/**
* Revalidate requests new layouting
*/
def revalidate: Unit = this.base.getScene().getRoot().requestLayout()
/**
* Name maps to top group id
*/
override def setName(str: String): Unit = this.base.getScene.getRoot.setId(str)
// Members declared in com.idyria.osi.vui.core.components.main.VuiFrame
def height(height: Int): Unit = this.base.setHeight(height)
def width(width: Int): Unit = this.base.setWidth(width)
override def size_=(v: Tuple2[Double, Double]) = {
super.size_=(v)
this.base.setHeight(v._2)
this.base.setWidth(v._1)
}
override def title_=(title: String): Unit = {
this.base.setTitle(title)
}
override def close() = {
onUIThread(super.close)
}
/*override def show() = {
onUIThread(super.show())
}*/
override def visible_=(v: Boolean) = {
super.visible = (v)
v match {
case true => onUIThread(this.base.show())
case false =>onUIThread( this.base.close())
}
}
// Events
//---------------------
/**
* When the Window gets closed
*/
def onClose(cl: => Unit) = {
this.setOnCloseRequest(new EventHandler[WindowEvent] {
def handle(e: WindowEvent) = {
cl
}
})
}
this.onClose(this.@->("close"))
}.asInstanceOf[VUIFrame[Node, VUIFrame[Node, _]]]
}
// Imported Content
//----------------------
}
| richnou/vui2 | vui2-javafx/src/main/scala/com/idyria/osi/vui/implementation/javafx/factories/JFXFrameFactory.scala | Scala | agpl-3.0 | 4,683 |
// package formless
// trait FormRenderer {
// def render(form: Form): Component
// }
| underscoreio/formless | core/src/main/scala/formless/FormRenderer.scala | Scala | apache-2.0 | 89 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.examples.scala.basics
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
/**
* Simple example for demonstrating the use of SQL on a Stream Table in Scala.
*
* <p>Usage: <code>StreamSQLExample --planner <blink|flink></code><br>
*
* <p>This example shows how to:
* - Convert DataStreams to Tables
* - Register a Table under a name
* - Run a StreamSQL query on the registered Table
*
*/
object StreamSQLExample {
// *************************************************************************
// PROGRAM
// *************************************************************************
def main(args: Array[String]): Unit = {
val params = ParameterTool.fromArgs(args)
val planner = if (params.has("planner")) params.get("planner") else "blink"
// set up execution environment
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = if (planner == "blink") { // use blink planner in streaming mode
val settings = EnvironmentSettings.newInstance()
.useBlinkPlanner()
.inStreamingMode()
.build()
StreamTableEnvironment.create(env, settings)
} else if (planner == "flink") { // use flink planner in streaming mode
val settings = EnvironmentSettings.newInstance()
.useOldPlanner()
.inStreamingMode()
.build()
StreamTableEnvironment.create(env, settings)
} else {
System.err.println("The planner is incorrect. Please run 'StreamSQLExample --planner <planner>', " +
"where planner (it is either flink or blink, and the default is blink) indicates whether the " +
"example uses flink planner or blink planner.")
return
}
val orderA: DataStream[Order] = env.fromCollection(Seq(
Order(1L, "beer", 3),
Order(1L, "diaper", 4),
Order(3L, "rubber", 2)))
val orderB: DataStream[Order] = env.fromCollection(Seq(
Order(2L, "pen", 3),
Order(2L, "rubber", 3),
Order(4L, "beer", 1)))
// convert DataStream to Table
val tableA = tEnv.fromDataStream(orderA, $"user", $"product", $"amount")
// register DataStream as Table
tEnv.createTemporaryView("OrderB", orderB, $"user", $"product", $"amount")
// union the two tables
val result = tEnv.sqlQuery(
s"""
|SELECT * FROM $tableA WHERE amount > 2
|UNION ALL
|SELECT * FROM OrderB WHERE amount < 2
""".stripMargin)
result.toAppendStream[Order].print()
env.execute()
}
// *************************************************************************
// USER DATA TYPES
// *************************************************************************
case class Order(user: Long, product: String, amount: Int)
}
| clarkyzl/flink | flink-examples/flink-examples-table/src/main/scala/org/apache/flink/table/examples/scala/basics/StreamSQLExample.scala | Scala | apache-2.0 | 3,809 |
import scala.swing._
import scala.swing.Swing._
import java.awt.Color
import javax.swing.table._
class UsersView extends ScrollPane{
preferredSize = (1100,20 + 20 * GameAdmin.player )
background = Color.blue
val Cols:List[String] = List[String]("名前","資金") ::: Aquire.Hotels.toList
val tableModel = new DefaultTableModel(){
override def isCellEditable(r:Int,c:Int) = {
false
}
}
val t = new Table()
t.peer.setModel(tableModel)
contents = t
t.rowHeight = 20
for(c <- Cols){
tableModel.addColumn(c)
}
GameAdmin.players .foreach(p => tableModel.addRow(Array[java.lang.Object]()))
override def paint(g:Graphics2D) = {
super.paint(g)
update
}
def update() = {
var i = 0
for(p <- GameAdmin.players){
tableModel.setValueAt(p.name, i, 0)
tableModel.setValueAt(p.money, i, 1)
for(j <- 2 to tableModel.getColumnCount-1){
tableModel.setValueAt(p.stocks(tableModel.getColumnName(j)), i, j)
}
i += 1
}
}
} | ksk9687/Aquire | src/main/scala/UsersView.scala | Scala | mit | 963 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package api
import java.io.ObjectStreamException
/**
* A `TypeTag[T]` encapsulates the runtime type representation of some type `T`.
* Like [[scala.reflect.Manifest]], the prime use case of `TypeTag`s is to give access
* to erased types. However, `TypeTag`s should be considered to be a richer
* replacement of the pre-2.10 notion of a [[scala.reflect.Manifest Manifest]], that
* are, in addition, fully integrated with Scala reflection.
*
* There exist three different types of `TypeTags`:
*
* <ul>
* <li>[[scala.reflect.api.TypeTags#TypeTag]]. <br/>A full type descriptor of a Scala type.
* For example, a `TypeTag[List[String]]` contains all type information,
* in this case, of type `scala.List[String]`.</li>
*
* <li>[[scala.reflect.ClassTag]]. <br/>A partial type descriptor of a Scala type. For
* example, a `ClassTag[List[String]]` contains only the erased class
* type information, in this case, of type `scala.collection.immutable.List`.
* `ClassTag`s provide access only to the runtime class of a type.
* Analogous to [[scala.reflect.ClassManifest]]</li>
*
* <li>[[scala.reflect.api.TypeTags#WeakTypeTag]]. <br/>A type descriptor for abstract
* types (see description below).</li>
* </ul>
*
* Like [[scala.reflect.Manifest Manifest]]s, `TypeTag`s are always generated by the
* compiler, and can be obtained in three ways:
*
* === #1 Via the methods [[scala.reflect.api.TypeTags#typeTag typeTag]],
* [[scala.reflect#classTag classTag]], or [[scala.reflect.api.TypeTags#weakTypeTag weakTypeTag]] ===
*
* For example:
* {{{
* import scala.reflect.runtime.universe._
* val tt = typeTag[Int]
*
* import scala.reflect._
* val ct = classTag[String]
* }}}
*
* Each of these methods constructs a `TypeTag[T]` or `ClassTag[T]` for the given
* type argument `T`.
*
* === #2 Using an implicit parameter of type `TypeTag[T]`, `ClassTag[T]`, or `WeakTypeTag[T]` ===
*
* For example:
* {{{
* import scala.reflect.runtime.universe._
*
* def paramInfo[T](x: T)(implicit tag: TypeTag[T]): Unit = {
* val targs = tag.tpe match { case TypeRef(_, _, args) => args }
* println(s"type of \\$x has type arguments \\$targs")
* }
*
* scala> paramInfo(42)
* type of 42 has type arguments List()
*
* scala> paramInfo(List(1, 2))
* type of List(1, 2) has type arguments List(Int)
* }}}
*
* === #3 Context bound of a type parameter ===
*
* ...on methods or classes. The above example can be implemented as follows:
*
* {{{
* import scala.reflect.runtime.universe._
*
* def paramInfo[T: TypeTag](x: T): Unit = {
* val targs = typeOf[T] match { case TypeRef(_, _, args) => args }
* println(s"type of \\$x has type arguments \\$targs")
* }
*
* scala> paramInfo(42)
* type of 42 has type arguments List()
*
* scala> paramInfo(List(1, 2))
* type of List(1, 2) has type arguments List(Int)
* }}}
*
* === `WeakTypeTag`s ===
*
*`WeakTypeTag[T]` generalizes `TypeTag[T]`. Unlike a regular `TypeTag`, components of
* its type representation can be references to type parameters or abstract types.
* However, `WeakTypeTag[T]` tries to be as concrete as possible, i.e. if type tags
* are available for the referenced type arguments or abstract types, they are used to
* embed the concrete types into the `WeakTypeTag[T]`.
*
* Continuing the example above:
* {{{
* def weakParamInfo[T](x: T)(implicit tag: WeakTypeTag[T]): Unit = {
* val targs = tag.tpe match { case TypeRef(_, _, args) => args }
* println(s"type of \\$x has type arguments \\$targs")
* }
*
* scala> def foo[T] = weakParamInfo(List[T]())
* foo: [T]=> Unit
*
* scala> foo[Int]
* type of List() has type arguments List(T)
* }}}
*
* === TypeTags and Manifests ===
*
* `TypeTag`s correspond loosely to the pre-2.10 notion of
* [[scala.reflect.Manifest]]s. While [[scala.reflect.ClassTag]] corresponds to
* [[scala.reflect.ClassManifest]] and [[scala.reflect.api.TypeTags#TypeTag]] mostly
* corresponds to [[scala.reflect.Manifest]], other pre-2.10 `Manifest` types do not
* have a direct correspondence with a 2.10 "`Tag`" type.
*
* <ul>
* <li>'''[[scala.reflect.OptManifest]] is not supported.''' <br/>This is because `Tag`s
* can reify arbitrary types, so they are always available.<li>
*
* <li>'''There is no equivalent for [[scala.reflect.AnyValManifest]].''' <br/>Instead, one
* can compare their `Tag` with one of the base `Tag`s (defined in the corresponding
* companion objects) in order to find out whether or not it represents a primitive
* value class. Additionally, it's possible to simply use
* `<tag>.tpe.typeSymbol.isPrimitiveValueClass`.</li>
*
* <li>'''There are no replacement for factory methods defined in the `Manifest`
* companion objects'''. <br/>Instead, one could generate corresponding types using the
* reflection APIs provided by Java (for classes) and Scala (for types).</li>
*
* <li>'''Certain manifest operations(i.e., <:<, >:> and typeArguments) are not
* supported.''' <br/>Instead, one could use the reflection APIs provided by Java (for
* classes) and Scala (for types).</li>
*</ul>
*
* In Scala 2.10, [[scala.reflect.ClassManifest]]s are deprecated, and it is planned
* to deprecate [[scala.reflect.Manifest]] in favor of `TypeTag`s and `ClassTag`s in
* an upcoming point release. Thus, it is advisable to migrate any `Manifest`-based
* APIs to use `Tag`s.
*
* For more information about `TypeTag`s, see the
* [[https://docs.scala-lang.org/overviews/reflection/typetags-manifests.html Reflection Guide: TypeTags]]
*
* @see [[scala.reflect.ClassTag]], [[scala.reflect.api.TypeTags#TypeTag]], [[scala.reflect.api.TypeTags#WeakTypeTag]]
* @group ReflectionAPI
*/
trait TypeTags { self: Universe =>
import definitions._
/**
* If an implicit value of type `WeakTypeTag[T]` is required, the compiler will create one,
* and the reflective representation of `T` can be accessed via the `tpe` field.
* Components of `T` can be references to type parameters or abstract types. Note that `WeakTypeTag`
* makes an effort to be as concrete as possible, i.e. if `TypeTag`s are available for the referenced type arguments
* or abstract types, they are used to embed the concrete types into the WeakTypeTag. Otherwise the WeakTypeTag will
* contain a reference to an abstract type. This behavior can be useful, when one expects `T` to be perhaps be partially
* abstract, but requires special care to handle this case. However, if `T` is expected to be fully known, use
* [[scala.reflect.api.TypeTags#TypeTag]] instead, which statically guarantees this property.
*
* For more information about `TypeTag`s, see the
* [[https://docs.scala-lang.org/overviews/reflection/typetags-manifests.html Reflection Guide: TypeTags]]
*
* @see [[scala.reflect.api.TypeTags]]
* @group TypeTags
*/
@annotation.implicitNotFound(msg = "No WeakTypeTag available for ${T}")
trait WeakTypeTag[T] extends Equals with Serializable {
/**
* The underlying `Mirror` of this type tag.
*/
val mirror: Mirror
/**
* Migrates the expression into another mirror, jumping into a different universe if necessary.
*
* Migration means that all symbolic references to classes/objects/packages in the expression
* will be re-resolved within the new mirror (typically using that mirror's classloader).
*/
def in[U <: Universe with Singleton](otherMirror: scala.reflect.api.Mirror[U]): U # WeakTypeTag[T]
/**
* Reflective representation of type T.
*/
def tpe: Type
override def canEqual(x: Any) = x.isInstanceOf[WeakTypeTag[_]]
override def equals(x: Any) = x.isInstanceOf[WeakTypeTag[_]] && this.mirror == x.asInstanceOf[WeakTypeTag[_]].mirror && this.tpe == x.asInstanceOf[WeakTypeTag[_]].tpe
override def hashCode = mirror.hashCode * 31 + tpe.hashCode
override def toString = "WeakTypeTag[" + tpe + "]"
}
/**
* Type tags corresponding to primitive types and constructor/extractor for WeakTypeTags.
* @group TypeTags
*/
object WeakTypeTag {
val Byte : WeakTypeTag[scala.Byte] = TypeTag.Byte
val Short : WeakTypeTag[scala.Short] = TypeTag.Short
val Char : WeakTypeTag[scala.Char] = TypeTag.Char
val Int : WeakTypeTag[scala.Int] = TypeTag.Int
val Long : WeakTypeTag[scala.Long] = TypeTag.Long
val Float : WeakTypeTag[scala.Float] = TypeTag.Float
val Double : WeakTypeTag[scala.Double] = TypeTag.Double
val Boolean : WeakTypeTag[scala.Boolean] = TypeTag.Boolean
val Unit : WeakTypeTag[scala.Unit] = TypeTag.Unit
val Any : WeakTypeTag[scala.Any] = TypeTag.Any
val AnyVal : WeakTypeTag[scala.AnyVal] = TypeTag.AnyVal
val AnyRef : WeakTypeTag[scala.AnyRef] = TypeTag.AnyRef
val Object : WeakTypeTag[java.lang.Object] = TypeTag.Object
val Nothing : WeakTypeTag[scala.Nothing] = TypeTag.Nothing
val Null : WeakTypeTag[scala.Null] = TypeTag.Null
def apply[T](mirror1: scala.reflect.api.Mirror[self.type], tpec1: TypeCreator): WeakTypeTag[T] =
new WeakTypeTagImpl[T](mirror1.asInstanceOf[Mirror], tpec1)
def unapply[T](ttag: WeakTypeTag[T]): Option[Type] = Some(ttag.tpe)
}
/* @group TypeTags */
private class WeakTypeTagImpl[T](val mirror: Mirror, val tpec: TypeCreator) extends WeakTypeTag[T] {
lazy val tpe: Type = tpec(mirror)
def in[U <: Universe with Singleton](otherMirror: scala.reflect.api.Mirror[U]): U # WeakTypeTag[T] = {
val otherMirror1 = otherMirror.asInstanceOf[scala.reflect.api.Mirror[otherMirror.universe.type]]
otherMirror.universe.WeakTypeTag[T](otherMirror1, tpec)
}
@throws(classOf[ObjectStreamException])
private def writeReplace(): AnyRef = new SerializedTypeTag(tpec, concrete = false)
}
/**
* A `TypeTag` is a [[scala.reflect.api.TypeTags#WeakTypeTag]] with the additional
* static guarantee that all type references are concrete, i.e. it does <b>not</b> contain any references to
* unresolved type parameters or abstract types.
*
* @see [[scala.reflect.api.TypeTags]]
* @group TypeTags
*/
@annotation.implicitNotFound(msg = "No TypeTag available for ${T}")
trait TypeTag[T] extends WeakTypeTag[T] with Equals with Serializable {
/**
* @inheritdoc
*/
override def in[U <: Universe with Singleton](otherMirror: scala.reflect.api.Mirror[U]): U # TypeTag[T]
override def canEqual(x: Any) = x.isInstanceOf[TypeTag[_]]
override def equals(x: Any) = x.isInstanceOf[TypeTag[_]] && this.mirror == x.asInstanceOf[TypeTag[_]].mirror && this.tpe == x.asInstanceOf[TypeTag[_]].tpe
override def hashCode = mirror.hashCode * 31 + tpe.hashCode
override def toString = "TypeTag[" + tpe + "]"
}
/**
* Type tags corresponding to primitive types and constructor/extractor for WeakTypeTags.
* @group TypeTags
*/
object TypeTag {
val Byte: TypeTag[scala.Byte] = new PredefTypeTag[scala.Byte] (ByteTpe, _.TypeTag.Byte)
val Short: TypeTag[scala.Short] = new PredefTypeTag[scala.Short] (ShortTpe, _.TypeTag.Short)
val Char: TypeTag[scala.Char] = new PredefTypeTag[scala.Char] (CharTpe, _.TypeTag.Char)
val Int: TypeTag[scala.Int] = new PredefTypeTag[scala.Int] (IntTpe, _.TypeTag.Int)
val Long: TypeTag[scala.Long] = new PredefTypeTag[scala.Long] (LongTpe, _.TypeTag.Long)
val Float: TypeTag[scala.Float] = new PredefTypeTag[scala.Float] (FloatTpe, _.TypeTag.Float)
val Double: TypeTag[scala.Double] = new PredefTypeTag[scala.Double] (DoubleTpe, _.TypeTag.Double)
val Boolean: TypeTag[scala.Boolean] = new PredefTypeTag[scala.Boolean] (BooleanTpe, _.TypeTag.Boolean)
val Unit: TypeTag[scala.Unit] = new PredefTypeTag[scala.Unit] (UnitTpe, _.TypeTag.Unit)
val Any: TypeTag[scala.Any] = new PredefTypeTag[scala.Any] (AnyTpe, _.TypeTag.Any)
val AnyVal: TypeTag[scala.AnyVal] = new PredefTypeTag[scala.AnyVal] (AnyValTpe, _.TypeTag.AnyVal)
val AnyRef: TypeTag[scala.AnyRef] = new PredefTypeTag[scala.AnyRef] (AnyRefTpe, _.TypeTag.AnyRef)
val Object: TypeTag[java.lang.Object] = new PredefTypeTag[java.lang.Object] (ObjectTpe, _.TypeTag.Object)
val Nothing: TypeTag[scala.Nothing] = new PredefTypeTag[scala.Nothing] (NothingTpe, _.TypeTag.Nothing)
val Null: TypeTag[scala.Null] = new PredefTypeTag[scala.Null] (NullTpe, _.TypeTag.Null)
def apply[T](mirror1: scala.reflect.api.Mirror[self.type], tpec1: TypeCreator): TypeTag[T] = {
(mirror1: AnyRef) match {
case m: scala.reflect.runtime.JavaMirrors#MirrorImpl
if cacheMaterializedTypeTags && tpec1.getClass.getName.contains("$typecreator")
&& tpec1.getClass.getDeclaredFields.length == 0 => // excludes type creators that splice in bound types.
m.typeTag(tpec1).asInstanceOf[TypeTag[T]]
case _ =>
new TypeTagImpl[T](mirror1.asInstanceOf[Mirror], tpec1)
}
}
def unapply[T](ttag: TypeTag[T]): Option[Type] = Some(ttag.tpe)
private val cacheMaterializedTypeTags = !java.lang.Boolean.getBoolean("scala.reflect.runtime.disable.typetag.cache")
}
private[reflect] def TypeTagImpl[T](mirror: Mirror, tpec: TypeCreator): TypeTag[T] = new TypeTagImpl[T](mirror, tpec)
/* @group TypeTags */
private class TypeTagImpl[T](mirror: Mirror, tpec: TypeCreator) extends WeakTypeTagImpl[T](mirror, tpec) with TypeTag[T] {
override def in[U <: Universe with Singleton](otherMirror: scala.reflect.api.Mirror[U]): U # TypeTag[T] = {
val otherMirror1 = otherMirror.asInstanceOf[scala.reflect.api.Mirror[otherMirror.universe.type]]
otherMirror.universe.TypeTag[T](otherMirror1, tpec)
}
@throws(classOf[ObjectStreamException])
private def writeReplace(): AnyRef = new SerializedTypeTag(tpec, concrete = true)
}
/* @group TypeTags */
// This class only exists to silence MIMA complaining about a binary incompatibility.
// Only the top-level class (api.PredefTypeCreator) should be used.
@deprecated("This class only exists to silence MIMA complaining about a binary incompatibility.", since="forever")
@annotation.unused
private class PredefTypeCreator[T](copyIn: Universe => Universe#TypeTag[T]) extends TypeCreator {
def apply[U <: Universe with Singleton](m: scala.reflect.api.Mirror[U]): U # Type = {
copyIn(m.universe).asInstanceOf[U # TypeTag[T]].tpe
}
}
/* @group TypeTags */
private class PredefTypeTag[T](_tpe: Type, copyIn: Universe => Universe#TypeTag[T]) extends TypeTagImpl[T](rootMirror, new api.PredefTypeCreator(copyIn)) {
override lazy val tpe: Type = _tpe
@throws(classOf[ObjectStreamException])
private def writeReplace(): AnyRef = new SerializedTypeTag(tpec, concrete = true)
}
/**
* Shortcut for `implicitly[WeakTypeTag[T]]`
* @group TypeTags
*/
def weakTypeTag[T](implicit attag: WeakTypeTag[T]) = attag
/**
* Shortcut for `implicitly[TypeTag[T]]`
* @group TypeTags
*/
def typeTag[T](implicit ttag: TypeTag[T]) = ttag
// big thanks to Viktor Klang for this brilliant idea!
/**
* Shortcut for `implicitly[WeakTypeTag[T]].tpe`
* @group TypeTags
*/
def weakTypeOf[T](implicit attag: WeakTypeTag[T]): Type = attag.tpe
/**
* Shortcut for `implicitly[TypeTag[T]].tpe`
* @group TypeTags
*/
def typeOf[T](implicit ttag: TypeTag[T]): Type = ttag.tpe
/**
* Type symbol of `x` as derived from a type tag.
* @group TypeTags
*/
def symbolOf[T: WeakTypeTag]: TypeSymbol
}
// This class should be final, but we can't do that in Scala 2.11.x without breaking
// binary incompatibility.
@SerialVersionUID(1L)
private[scala] class SerializedTypeTag(var tpec: TypeCreator, var concrete: Boolean) extends Serializable {
import scala.reflect.runtime.universe.{TypeTag, WeakTypeTag, runtimeMirror}
@throws(classOf[ObjectStreamException])
private def readResolve(): AnyRef = {
val loader: ClassLoader = try {
Thread.currentThread().getContextClassLoader()
} catch {
case se: SecurityException => null
}
val m = runtimeMirror(loader)
if (concrete) TypeTag(m, tpec)
else WeakTypeTag(m, tpec)
}
}
/* @group TypeTags */
private class PredefTypeCreator[T](copyIn: Universe => Universe#TypeTag[T]) extends TypeCreator {
def apply[U <: Universe with Singleton](m: scala.reflect.api.Mirror[U]): U # Type = {
copyIn(m.universe).asInstanceOf[U # TypeTag[T]].tpe
}
}
| lrytz/scala | src/reflect/scala/reflect/api/TypeTags.scala | Scala | apache-2.0 | 17,033 |
package com.scaledaction.weatherservice.client.service
import akka.actor.{ ActorSystem, Props }
import com.scaledaction.core.akka.HttpServerApp
import com.scaledaction.core.cassandra.HasCassandraConfig
import com.scaledaction.core.spark.HasSparkConfig
import com.scaledaction.core.spark.SparkUtils
object ClientServiceApp extends App with HttpServerApp with HasCassandraConfig with HasSparkConfig {
val cassandraConfig = getCassandraConfig
val sparkConfig = getSparkConfig
//TODO - Need to add ApplicationConfig and replace the hard-coded "sparkAppName" value with application.app-name
val sc = SparkUtils.getActiveOrCreateSparkContext(cassandraConfig, sparkConfig.master, "WeatherService")
implicit val system = ActorSystem("client-service")
val service = system.actorOf(Props(new ClientService(sc)), "client-service")
startServer(service)
sys addShutdownHook {
sc.stop
}
}
| Qyoom/WeatherStation | client-service/src/main/scala/com/scaledaction/weatherservice/client/service/ClientServiceApp.scala | Scala | apache-2.0 | 910 |
package com.socrata.datacoordinator.resources.collocation
import java.util.UUID
import com.socrata.datacoordinator.id.{DatasetId, DatasetInternalName}
import com.socrata.datacoordinator.service.collocation._
import com.socrata.http.server.{HttpRequest, HttpResponse}
case class SecondaryManifestsMoveJobResource(storeGroup: String,
jobId: String,
provider: CoordinatorProvider with MetricProvider) extends CollocationSodaResource {
override def get = doGetSecondaryMoveJobs
import provider._
private def doGetSecondaryMoveJobs(req: HttpRequest): HttpResponse = {
withJobId(jobId, req) { id =>
try {
val result = coordinator.secondaryGroups(storeGroup).map { group =>
secondaryMoveJobsForStoreGroup(group, id)
}.reduce(_ + _)
responseOK(result)
} catch {
case StoreGroupNotFound(group) => storeGroupNotFound(group)
}
}
}
// throws ErrorResult
private def secondaryMoveJobsForStoreGroup(storeGroup: String, jobId: UUID): CollocationResult = {
coordinator.secondaryGroupConfigs.get(storeGroup) match {
case Some(groupConfig) =>
val moves = metric.collocationGroup.flatMap { instance =>
val moveJobs = coordinator.secondaryMoveJobs(instance, jobId).fold(throw _, _.moves)
val datasetCostMap = moveJobs.map(_.datasetId).toSet.map { dataset: DatasetId =>
val internalName = DatasetInternalName(instance, dataset)
val cost = metric.datasetMaxCost(storeGroup, internalName).fold(throw _, identity)
(dataset, cost)
}.toMap
moveJobs.filter { moveJob =>
groupConfig.instances.contains(moveJob.fromStoreId)
}.map { moveJob =>
Move(
datasetInternalName = DatasetInternalName(instance, moveJob.datasetId),
storeIdFrom = moveJob.fromStoreId,
storeIdTo = moveJob.toStoreId,
cost = datasetCostMap(moveJob.datasetId),
complete = Some(moveJob.moveFromStoreComplete && moveJob.moveToStoreComplete)
)
}
}
val status = if (moves.forall(_.complete.get)) Completed else InProgress
CollocationResult(
id = Some(jobId),
status = status,
cost = Move.totalCost(moves),
moves = moves.toSeq
)
case None => throw StoreGroupNotFound(storeGroup)
}
}
}
| socrata-platform/data-coordinator | coordinator/src/main/scala/com/socrata/datacoordinator/resources/collocation/SecondaryManifestsMoveJobResource.scala | Scala | apache-2.0 | 2,506 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.catnap
package cancelables
import cats.effect.IO
import minitest.SimpleTestSuite
import monix.execution.exceptions.{CompositeException, DummyException}
object SingleAssignCancelableFSuite extends SimpleTestSuite {
test("cancel") {
var effect = 0
val s = SingleAssignCancelableF[IO].unsafeRunSync()
val b = BooleanCancelableF.unsafeApply(IO { effect += 1 })
s.set(b).unsafeRunSync()
assert(!s.isCanceled.unsafeRunSync(), "!s.isCanceled")
s.cancel.unsafeRunSync()
assert(s.isCanceled.unsafeRunSync(), "s.isCanceled")
assert(b.isCanceled.unsafeRunSync())
assert(effect == 1)
s.cancel.unsafeRunSync()
assert(effect == 1)
}
test("cancel (plus one)") {
var effect = 0
val extra = BooleanCancelableF.unsafeApply(IO { effect += 1 })
val b = BooleanCancelableF.unsafeApply(IO { effect += 2 })
val s = SingleAssignCancelableF.plusOne(extra).unsafeRunSync()
s.set(b).unsafeRunSync()
s.cancel.unsafeRunSync()
assert(s.isCanceled.unsafeRunSync())
assert(b.isCanceled.unsafeRunSync())
assert(extra.isCanceled.unsafeRunSync())
assert(effect == 3)
s.cancel.unsafeRunSync()
assert(effect == 3)
}
test("cancel on single assignment") {
val s = SingleAssignCancelableF[IO].unsafeRunSync()
s.cancel.unsafeRunSync()
assert(s.isCanceled.unsafeRunSync())
var effect = 0
val b = BooleanCancelableF.unsafeApply(IO { effect += 1 })
s.set(b).unsafeRunSync()
assert(b.isCanceled.unsafeRunSync())
assert(effect == 1)
s.cancel.unsafeRunSync()
assert(effect == 1)
}
test("cancel on single assignment (plus one)") {
var effect = 0
val extra = BooleanCancelableF.unsafeApply(IO { effect += 1 })
val s = SingleAssignCancelableF.plusOne(extra).unsafeRunSync()
s.cancel.unsafeRunSync()
assert(s.isCanceled.unsafeRunSync(), "s.isCanceled")
assert(extra.isCanceled.unsafeRunSync(), "extra.isCanceled")
assert(effect == 1)
val b = BooleanCancelableF.unsafeApply(IO { effect += 1 })
s.set(b).unsafeRunSync()
assert(b.isCanceled.unsafeRunSync())
assert(effect == 2)
s.cancel.unsafeRunSync()
assert(effect == 2)
}
test("throw exception on multi assignment") {
val s = SingleAssignCancelableF[IO].unsafeRunSync()
val b1 = CancelableF.empty[IO]
s.set(b1).unsafeRunSync()
intercept[IllegalStateException] {
s.set(CancelableF.empty[IO]).unsafeRunSync()
}
()
}
test("throw exception on multi assignment when canceled") {
val s = SingleAssignCancelableF[IO].unsafeRunSync()
s.cancel.unsafeRunSync()
val b1 = CancelableF.empty[IO]
s.set(b1).unsafeRunSync()
intercept[IllegalStateException] {
s.set(CancelableF.empty[IO]).unsafeRunSync()
}
()
}
test("cancel when both reference and `extra` throw") {
var effect = 0
val dummy1 = DummyException("dummy1")
val extra = CancelableF.unsafeApply[IO](IO { effect += 1; throw dummy1 })
val s = SingleAssignCancelableF.plusOne(extra).unsafeRunSync()
val dummy2 = DummyException("dummy2")
val b = CancelableF.unsafeApply[IO](IO { effect += 1; throw dummy2 })
s.set(b).unsafeRunSync()
try {
s.cancel.unsafeRunSync()
fail("should have thrown")
} catch {
case CompositeException((_: DummyException) :: (_: DummyException) :: Nil) =>
()
case other: Throwable =>
throw other
}
assertEquals(effect, 2)
}
}
| alexandru/monifu | monix-catnap/shared/src/test/scala/monix/catnap/cancelables/SingleAssignCancelableFSuite.scala | Scala | apache-2.0 | 4,164 |
package com.nutomic.ensichat.core.messages.body
import java.nio.ByteBuffer
import com.nutomic.ensichat.core.messages.Message
import com.nutomic.ensichat.core.util.BufferUtils
object UserInfo {
val Type = 7
/**
* Constructs [[UserInfo]] instance from byte array.
*/
def read(array: Array[Byte]): UserInfo = {
val bb = ByteBuffer.wrap(array)
new UserInfo(getValue(bb), getValue(bb))
}
private def getValue(bb: ByteBuffer): String = {
val length = BufferUtils.getUnsignedInt(bb).toInt
val bytes = new Array[Byte](length)
bb.get(bytes, 0, length)
new String(bytes, Message.Charset)
}
}
/**
* Holds display name and status of the sender.
*/
final case class UserInfo(name: String, status: String) extends MessageBody {
override def protocolType = -1
override def contentType = UserInfo.Type
override def write: Array[Byte] = {
val b = ByteBuffer.allocate(length)
put(b, name)
put(b, status)
b.array()
}
def put(b: ByteBuffer, value: String): ByteBuffer = {
val bytes = value.getBytes(Message.Charset)
BufferUtils.putUnsignedInt(b, bytes.length)
b.put(bytes)
}
override def length = 8 + name.getBytes(Message.Charset).length +
status.getBytes(Message.Charset).length
}
| Nutomic/ensichat | core/src/main/scala/com/nutomic/ensichat/core/messages/body/UserInfo.scala | Scala | mpl-2.0 | 1,267 |
package io.buoyant.linkerd
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonSubTypes, JsonTypeInfo}
import com.twitter.finagle.Stack
import com.twitter.finagle.buoyant.PathMatcher
import io.buoyant.config.PolymorphicConfig
import io.buoyant.router.StackRouter.Client.{PathParams, PerPathParams}
/**
* Svc is the polymorphic type the jackson will use to deserialize the
* `service` section of a linkerd config.
*/
@JsonTypeInfo(
use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.EXISTING_PROPERTY,
property = "kind",
visible = true,
defaultImpl = classOf[DefaultSvcImpl]
)
@JsonSubTypes(Array(
new JsonSubTypes.Type(value = classOf[DefaultSvcImpl], name = "io.l5d.global"),
new JsonSubTypes.Type(value = classOf[StaticSvcImpl], name = "io.l5d.static")
))
abstract class Svc extends PolymorphicConfig {
@JsonIgnore
def pathParams: PerPathParams
}
/**
* DefaultSvc mixes in SvcConfig so that path-stack configuration properties
* can be specified directly on the `service` object in the linkerd config.
* This is a trait so that it can be mixed in to protocol specific versions.
*/
trait DefaultSvc extends SvcConfig { self: Svc =>
kind = "io.l5d.global"
@JsonIgnore
private[this] val matchAll = PathMatcher("/")
@JsonIgnore
private[this] val mk: Map[String, String] => Stack.Params = { vars =>
params(vars)
}
@JsonIgnore
def pathParams = PerPathParams(Seq(PathParams(matchAll, mk)))
}
class DefaultSvcImpl extends Svc with DefaultSvc
/**
* StaticSvc consists of a list of PrefixConfigs. This is a trait so that it
* can be mixed in to protocol specific versions.
*/
trait StaticSvc { self: Svc =>
val configs: Seq[SvcPrefixConfig]
@JsonIgnore
def pathParams = PerPathParams(configs.map { config =>
PathParams(config.prefix, config.params)
})
}
class StaticSvcImpl(val configs: Seq[SvcPrefixConfig]) extends Svc with StaticSvc
class SvcPrefixConfig(val prefix: PathMatcher) extends SvcConfig | denverwilliams/linkerd | linkerd/core/src/main/scala/io/buoyant/linkerd/Svc.scala | Scala | apache-2.0 | 1,972 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.carbondata.api.CarbonStore
/**
* delete segments by id list
*/
// scalastyle:off
object DeleteSegmentById {
def extractSegmentIds(segmentIds: String): Seq[String] = {
segmentIds.split(",").toSeq
}
def deleteSegmentById(spark: SparkSession, dbName: String, tableName: String,
segmentIds: Seq[String]): Unit = {
TableAPIUtil.validateTableExists(spark, dbName, tableName)
val carbonTable = CarbonEnv.getCarbonTable(Some(dbName), tableName)(spark)
CarbonStore.deleteLoadById(segmentIds, dbName, tableName, carbonTable)
}
def main(args: Array[String]): Unit = {
if (args.length < 3) {
System.err.println(
"Usage: DeleteSegmentByID <store path> <table name> <segment id list>")
System.exit(1)
}
val storePath = TableAPIUtil.escape(args(0))
val (dbName, tableName) = TableAPIUtil.parseSchemaName(TableAPIUtil.escape(args(1)))
val segmentIds = extractSegmentIds(TableAPIUtil.escape(args(2)))
val spark = TableAPIUtil.spark(storePath, s"DeleteSegmentById: $dbName.$tableName")
CarbonEnv.getInstance(spark).carbonMetastore.
checkSchemasModifiedTimeAndReloadTable(TableIdentifier(tableName, Some(dbName)))
deleteSegmentById(spark, dbName, tableName, segmentIds)
}
}
| sgururajshetty/carbondata | integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala | Scala | apache-2.0 | 2,214 |
package drt.client.logger
import scala.annotation.elidable
import scala.annotation.elidable._
trait Logger {
/*
* Use @elidable annotation to completely exclude functions from the compiler generated byte-code based on
* the specified level. In a production build most logging functions will simply disappear with no runtime
* performance penalty.
*
* Specify level as a compiler parameter
* > scalac -Xelide-below INFO
*/
@elidable(FINEST) def trace(msg: String, e: Exception): Unit
@elidable(FINEST) def trace(msg: String): Unit
@elidable(FINE) def debug(msg: String, e: Exception): Unit
@elidable(FINE) def debug(msg: String): Unit
@elidable(INFO) def info(msg: String, e: Exception): Unit
@elidable(INFO) def info(msg: String): Unit
@elidable(WARNING) def warn(msg: String, e: Exception): Unit
@elidable(WARNING) def warn(msg: String): Unit
@elidable(SEVERE) def error(msg: String, e: Exception): Unit
@elidable(SEVERE) def error(msg: String): Unit
@elidable(SEVERE) def fatal(msg: String, e: Exception): Unit
@elidable(SEVERE) def fatal(msg: String): Unit
def enableServerLogging(url: String): Unit
def disableServerLogging(): Unit
}
object LoggerFactory {
private[logger] def createLogger(name: String) = {}
lazy val consoleAppender = new BrowserConsoleAppender
lazy val popupAppender = new PopUpAppender
/**
* Create a logger that outputs to browser console
*/
def getLogger(name: String): Logger = {
val nativeLogger = Log4JavaScript.log4javascript.getLogger(name)
nativeLogger.addAppender(consoleAppender)
new L4JSLogger(nativeLogger)
}
/**
* Create a logger that outputs to a separate popup window
*/
def getPopUpLogger(name: String): Logger = {
val nativeLogger = Log4JavaScript.log4javascript.getLogger(name)
nativeLogger.addAppender(popupAppender)
new L4JSLogger(nativeLogger)
}
}
| somanythings/drt-scalajs-spa-exploration | client/src/main/scala/spatutorial/client/logger/LoggerFactory.scala | Scala | apache-2.0 | 1,904 |
package scavlink.link
import akka.util.Timeout
import com.typesafe.config.Config
import scavlink.settings.SettingsCompanion
import scala.concurrent.duration.FiniteDuration
case class VehicleSettings(apiTimeout: Timeout,
channelOverrideInterval: FiniteDuration,
autoloadParameters: Boolean,
autoloadMission: Boolean,
autostartTelemetry: Boolean)
object VehicleSettings extends SettingsCompanion[VehicleSettings]("vehicle") {
def fromSubConfig(config: Config): VehicleSettings =
VehicleSettings(
getDuration(config, "api-timeout"),
getDuration(config, "channel-override-interval"),
config.getBoolean("autoload-parameters"),
config.getBoolean("autoload-mission"),
config.getBoolean("autostart-telemetry")
)
}
| nickolasrossi/scavlink | src/main/scala/scavlink/link/VehicleSettings.scala | Scala | mit | 858 |
package controllers
import java.util.{Date, UUID}
import javax.inject.Inject
import model.{KillProcessRequest, ProcessStatusType, TaskBackoff}
import org.joda.time.DateTime
import com.hbc.svc.sundial.v2
import com.hbc.svc.sundial.v2.models.json._
import dao.SundialDaoFactory
import play.api.libs.json.Json
import play.api.mvc._
class Processes @Inject()(daoFactory: SundialDaoFactory)
extends InjectedController {
def get(processDefinitionName: Option[String],
startTime: Option[DateTime],
endTime: Option[DateTime],
maxRecords: Option[Int],
validStatuses: List[v2.models.ProcessStatus]) = Action {
val validStatusTypes = {
if (validStatuses.isEmpty) {
None
} else {
Some(validStatuses.map(ModelConverter.toInternalProcessStatusType))
}
}
val result: Seq[v2.models.Process] = daoFactory.withSundialDao {
implicit dao =>
val processes = dao.processDao.findProcesses(processDefinitionName,
startTime.map(_.toDate()),
endTime.map(_.toDate()),
validStatusTypes,
maxRecords)
processes.map(ModelConverter.toExternalProcess)
}
Ok(Json.toJson(result))
}
def getByProcessId(processId: UUID) = Action {
val resultOpt = daoFactory.withSundialDao { implicit dao =>
dao.processDao
.loadProcess(processId)
.map(ModelConverter.toExternalProcess)
}
resultOpt match {
case Some(result) => Ok(Json.toJson(result))
case _ => NotFound
}
}
def postRetryByProcessId(processId: UUID) = Action {
daoFactory.withSundialDao { dao =>
val processOpt = dao.processDao.loadProcess(processId)
processOpt match {
case None => NotFound
case Some(process) =>
if (process.status.statusType == ProcessStatusType.Running || process.status.statusType == ProcessStatusType.Failed) {
val tasksToRetry =
dao.processDao.loadTasksForProcess(processId).filter { task =>
task.status match {
case model.TaskStatus.Failure(_, _) => true
case _ => false
}
}
val taskDefinitions =
dao.processDefinitionDao.loadTaskDefinitions(process.id)
taskDefinitions.foreach { taskDefinition =>
if (tasksToRetry.exists(
_.taskDefinitionName == taskDefinition.name)) {
val updatedDefinition = taskDefinition.copy(
limits = taskDefinition.limits.copy(
maxAttempts = taskDefinition.limits.maxAttempts + 1),
backoff = TaskBackoff(0, 0))
dao.processDefinitionDao.saveTaskDefinition(updatedDefinition)
}
}
val newProcess =
process.copy(status = model.ProcessStatus.Running())
dao.processDao.saveProcess(newProcess)
Created
} else {
BadRequest(
"Cannot retry a process that is has successfully completed")
}
}
}
}
def postKillByProcessId(processId: UUID) = Action {
daoFactory.withSundialDao { dao =>
dao.triggerDao.saveKillProcessRequest(
KillProcessRequest(UUID.randomUUID(), processId, new Date()))
}
Created
}
}
| gilt/sundial | app/controllers/Processes.scala | Scala | mit | 3,567 |
package models.daos
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.providers.OAuth1Info
import com.mongodb.casbah.Imports
import com.mongodb.casbah.Imports._
import models.Database
/**
* The DAO to store the OAuth1 information.
*
*/
class OAuth1InfoDAO extends DelegableAuthInfoDAO[OAuth1Info] with AuthInfoDAO[OAuth1Info] {
override def data: Imports.MongoCollection = Database.db("oAuth1Info")
override def convertToDB(loginInfo: LoginInfo, authInfo: OAuth1Info) : DBObject = {
MongoDBObject(
"providerID" -> loginInfo.providerID,
"providerKey" -> loginInfo.providerKey,
"token" -> authInfo.token,
"secret" -> authInfo.secret
)
}
override def loadFromDB(dbAuthInfo : DBObject) : OAuth1Info = {
OAuth1Info(
Database.loadString(dbAuthInfo, "token"),
Database.loadString(dbAuthInfo, "secret")
)
}
}
| OpenCompare/OpenCompare | org.opencompare/play-app/app/models/daos/OAuth1InfoDAO.scala | Scala | apache-2.0 | 973 |
// Copyright 2015-2016 Ricardo Gladwell.
// Licensed under the GNU Affero General Public License.
// See the LICENSE file for more information.
package com.is_hosted_by.api
import java.net.InetAddress
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import net._
trait Network {
this: Dns =>
val ipRanges: Seq[IpPrefix]
val name: String
def inNetwork(address: String) : Future[Boolean] = {
for {
ip <- resolve(address)
} yield ipRanges.exists{ prefix => prefix.inRange(ip) }
}
}
| rgladwell/is-aws-api | src/main/scala/com/is_hosted_by/api/Network.scala | Scala | agpl-3.0 | 550 |
package com.twitter.finatra.json.tests.internal.streaming
import com.fasterxml.jackson.databind.JsonNode
import com.twitter.finatra.conversions.buf._
import com.twitter.finatra.json.internal.streaming.{ParsingState, JsonArrayChunker}
import com.twitter.finatra.json.{JsonDiff, FinatraObjectMapper}
import com.twitter.finatra.json.internal.streaming.ParsingState._
import com.twitter.inject.Test
import com.twitter.io.Buf
class JsonObjectDecoderTest extends Test {
"decode" in {
val decoder = new JsonArrayChunker()
assertDecode(
decoder,
input = "[1",
output = Seq(),
remainder = "1",
pos = 1,
openBraces = 1,
parsingState = InsideArray)
assertDecode(
decoder,
input = ",2",
output = Seq("1"),
remainder = "2",
pos = 1)
assertDecode(
decoder,
input = ",3",
output = Seq("2"),
remainder = "3",
pos = 1)
assertDecode(
decoder,
input = "]",
output = Seq("3"),
remainder = "",
pos = 0,
openBraces = 0,
done = true)
}
val mapper = FinatraObjectMapper.create()
"decode with nested objects" in {
val jsonObj = """
{
"sub_object": {
"msg": "hi"
}
}
"""
assertSingleJsonParse(jsonObj)
}
"decode json inside a string" in {
val jsonObj = """{"foo": "bar"}"""
assertSingleJsonParse(jsonObj)
}
"Caling decode when already finished" in {
val decoder = new JsonArrayChunker()
decoder.decode(Buf.Utf8("[]"))
intercept[Exception] {
decoder.decode(Buf.Utf8("{}"))
}
}
private def assertDecode(
decoder: JsonArrayChunker,
input: String,
output: Seq[String],
remainder: String,
pos: Int,
openBraces: Int = 1,
parsingState: ParsingState = InsideArray,
done: Boolean = false): Unit = {
val result = decoder.decode(Buf.Utf8(input))
result map { _.utf8str } should equal(output)
val copiedByteBuffer = decoder.copiedByteBuffer.duplicate()
copiedByteBuffer.position(0)
val recvBuf = Buf.ByteBuffer.Shared(copiedByteBuffer)
println("Result remainder: " + recvBuf.utf8str)
recvBuf.utf8str should equal(remainder)
decoder.copiedByteBuffer.position() should equal(pos)
decoder.openBraces should equal(openBraces)
decoder.parsingState should equal(parsingState)
decoder.done should equal(done)
}
def assertSingleJsonParse(jsonObj: String): Unit = {
val decoder = new JsonArrayChunker()
val result = decoder.decode(Buf.Utf8("[" + jsonObj + "]"))
val nodes = result map mapper.parse[JsonNode]
nodes.size should be(1)
JsonDiff.jsonDiff(nodes.head, jsonObj)
}
}
| syamantm/finatra | jackson/src/test/scala/com/twitter/finatra/json/tests/internal/streaming/JsonObjectDecoderTest.scala | Scala | apache-2.0 | 2,704 |
/*
* Copyright (C) 2015 Noorq, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.typesafe.scalalogging.slf4j.LazyLogging
import play.api.Application
import play.api.GlobalSettings
import play.api.mvc.WithFilters
import filters.BasicAuthFilter
object Global extends WithFilters(BasicAuthFilter) with GlobalSettings with LazyLogging {
override def onStart(app: Application) {
logger.info("Application has started")
}
override def onStop(app: Application) {
logger.info("Application shutdown...")
}
} | mailrest/mailrest | app/Global.scala | Scala | apache-2.0 | 1,093 |
package com.bisphone.launcher
import scala.concurrent.Future
import com.bisphone.util.{ArgumentExtractor, ValueExtractor}
object Task {
sealed trait Result
object Result {
case class Successful(message: String) extends Result
case class Unsuccessful(code: Int, message: String ,cause: Option[Throwable]) extends Result
val Done = 0x0
val Error = 0x1
val Usage = 0x10
val Config = 0x20
val Enviroment = 0x30
val Remote = 0x40
val Internal = 0x50
def successful(message: String) = Result.Successful(message)
def usageError(message: String) = Unsuccessful(Usage, message, None)
def usageError(message: String, cause: Throwable) = Unsuccessful(Usage, message, Some(cause))
def configError(message: String) = Unsuccessful(Config, message, None)
def configError(message: String, cause: Throwable)= Unsuccessful(Config, message, Some(cause))
def environmentError(message: String) = Unsuccessful(Enviroment, message, None)
def environmentError(message: String, cause: Throwable) = Unsuccessful(Enviroment, message, Some(cause))
def remoteError(message: String) = Unsuccessful(Remote, message, None)
def remoteError(message: String, cause: Throwable) = Unsuccessful(Remote, message, Some(cause))
def internalError(message: String) = Unsuccessful(Internal, message, None)
def internalError(message: String, cause: Throwable) = Unsuccessful(Internal, message, Some(cause))
}
case class Props(key: String, usage: String)
trait Syntax extends ArgumentExtractor.Syntax { self =>
import Result._
@inline protected def successful(message: String): Result = Result.Successful(message)
@inline protected def usageError(message: String): Result = Unsuccessful(Usage, message, None)
@inline protected def usageError(message: String, cause: Throwable): Result = Unsuccessful(Usage, message, Some(cause))
@inline protected def configError(message: String): Result = Unsuccessful(Config, message, None)
@inline protected def configError(message: String, cause: Throwable): Result = Unsuccessful(Config, message, Some(cause))
@inline protected def environmentError(message: String): Result = Unsuccessful(Enviroment, message, None)
@inline protected def environmentError(message: String, cause: Throwable): Result = Unsuccessful(Enviroment, message, Some(cause))
@inline protected def remoteError(message: String): Result = Unsuccessful(Remote, message, None)
@inline protected def remoteError(message: String, cause: Throwable): Result = Unsuccessful(Remote, message, Some(cause))
@inline protected def internalError(message: String): Result = Unsuccessful(Internal, message, None)
@inline protected def internalError(message: String, cause: Throwable): Result = Unsuccessful(Internal, message, Some(cause))
@inline protected def debug(message: String)(implicit context: LauncherContext.Flat): Unit = context.logger.debug(message)
@inline protected def debug(message: String, exception: Throwable)(implicit context: LauncherContext.Flat): Unit = context.logger.debug(message, exception)
@inline protected def trace(message: String)(implicit context: LauncherContext.Flat): Unit = context.logger.trace(message)
@inline protected def trace(message: String, exception: Throwable)(implicit context: LauncherContext.Flat): Unit = context.logger.trace(message, exception)
@inline protected def info(message: String)(implicit context: LauncherContext.Flat): Unit = context.logger.info(message)
@inline protected def info(message: String, exception: Throwable)(implicit context: LauncherContext.Flat): Unit = context.logger.info(message, exception)
@inline protected def warn(message: String)(implicit context: LauncherContext.Flat): Unit = context.logger.warn(message)
@inline protected def warn(message: String, exception: Throwable)(implicit context: LauncherContext.Flat): Unit = context.logger.warn(message, exception)
@inline protected def error(message: String)(implicit context: LauncherContext.Flat): Unit = context.logger.error(message)
@inline protected def erro(message: String, exception: Throwable)(implicit context: LauncherContext.Flat): Unit = context.logger.error(message, exception)
}
}
trait Task extends Task.Syntax {
type In
type Fn = LauncherContext.Flat => In => Future[Task.Result]
def props: Task.Props
def extract: LauncherContext.Flat => ValueExtractor.Result[In]
def run: Fn
protected implicit def syntax$taskresult$to$future(rsl: Task.Result): Future[Task.Result] = Future successful rsl
protected def func(fn: => Fn): Fn = fn
override def toString(): String = {
val ref = super.toString.replace(getClass.getName, "")
s"${getClass.getName}(${props},${ref})"
}
} | reza-samei/bisphone-std | src/main/scala/com/bisphone/launcher/Task.scala | Scala | mit | 5,011 |
package com.sksamuel.elastic4s
import org.elasticsearch.action.get.GetResponse
import org.elasticsearch.client.{ Client, Requests }
import org.elasticsearch.index.VersionType
import org.elasticsearch.search.fetch.source.FetchSourceContext
import scala.concurrent.Future
import scala.language.implicitConversions
/** @author Stephen Samuel */
trait GetDsl extends IndexesTypesDsl {
def get(id: Any) = new GetWithIdExpectsFrom(id.toString)
implicit def any2get(id: Any): GetWithIdExpectsFrom = new GetWithIdExpectsFrom(id.toString)
class GetWithIdExpectsFrom(id: String) {
@deprecated("type is not used for get requests, remove the type name", "1.5.5")
def from(index: IndexesTypes): GetDefinition = new GetDefinition(index, id)
@deprecated("type is not used for get requests, remove the type name", "1.5.5")
def from(index: IndexType): GetDefinition = new GetDefinition(index.index, id)
@deprecated("type is not used for get requests, remove the type name", "1.5.5")
def from(index: String, `type`: String): GetDefinition = from(IndexesTypes(index, `type`))
def from(index: String): GetDefinition = new GetDefinition(index, id)
}
implicit object GetDslExecutable extends Executable[GetDefinition, GetResponse] {
override def apply(c: Client, t: GetDefinition): Future[GetResponse] = {
injectFuture(c.get(t.build, _))
}
}
}
case class GetDefinition(indexesTypes: IndexesTypes, id: String) {
private val _builder = Requests.getRequest(indexesTypes.index).`type`(indexesTypes.typ.orNull).id(id)
def build = _builder
def fetchSourceContext(context: Boolean) = {
_builder.fetchSourceContext(new FetchSourceContext(context))
this
}
def fetchSourceContext(context: FetchSourceContext) = {
_builder.fetchSourceContext(context)
this
}
def fields(fields: String*) = {
_builder.fields(fields: _*)
this
}
def parent(p: String) = {
_builder.parent(p)
this
}
def ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields: Boolean) = {
_builder.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields)
this
}
def version(version: Long) = {
_builder.version(version)
this
}
def versionType(versionType: VersionType) = {
_builder.versionType(versionType)
this
}
def preference(pref: Preference): GetDefinition = preference(pref.elastic)
def preference(pref: String): GetDefinition = {
_builder.preference(pref)
this
}
def realtime(r: Boolean) = {
_builder.realtime(r)
this
}
def refresh(refresh: Boolean) = {
_builder.refresh(refresh)
this
}
def routing(r: String) = {
_builder.routing(r)
this
}
}
object GetDefinitionExecutable extends Executable[GetDefinition, GetResponse] {
override def apply(client: Client, t: GetDefinition): Future[GetResponse] = injectFuture(client.get(t.build, _))
}
| l15k4/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/GetDsl.scala | Scala | apache-2.0 | 2,895 |
package com.bryanjswift.model
import org.joda.time.DateTime
sealed case class User(
id:Option[String], name:Option[String], username:String
, password:String, created:DateTime, modified:DateTime
)
object User {
def apply(username:String, password:String):User = {
val now = new DateTime
User(None, None, username, password, now, now)
}
}
| bryanjswift/bryanjswift.com | src/main/scala/model/User.scala | Scala | mit | 359 |
package aecor.example.transaction
import java.util.UUID
import aecor.example.common.Timestamp
import aecor.example.transaction.transaction.Transactions
import aecor.runtime.Eventsourced
import aecor.runtime.akkapersistence.AkkaPersistenceRuntime
import aecor.util.Clock
import cats.implicits._
import cats.effect.{ ContextShift, Effect }
import scodec.codecs.implicits._
object deployment {
def deploy[F[_]: Effect: ContextShift](runtime: AkkaPersistenceRuntime[UUID],
clock: Clock[F]): F[Transactions[F]] =
runtime
.deploy(
"Transaction",
EventsourcedAlgebra.behavior[F].enrich(clock.instant.map(Timestamp(_))),
EventsourcedAlgebra.tagging
)
.map(Eventsourced.Entities.rejectable(_))
}
| notxcain/aecor | modules/example/src/main/scala/aecor/example/transaction/deployment.scala | Scala | mit | 778 |
package sangria.validation.rules
import sangria.ast
import sangria.ast.AstVisitorCommand
import sangria.validation._
import scala.collection.mutable.{Set => MutableSet}
/** Unique operation names
*
* A GraphQL document is only valid if all defined operations have unique names.
*/
class UniqueOperationNames extends ValidationRule {
override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
val knownOpNames = MutableSet[String]()
override val onEnter: ValidationVisit = {
case ast.OperationDefinition(_, Some(name), _, _, _, _, _, pos) =>
if (knownOpNames contains name)
Left(Vector(DuplicateOperationNameViolation(name, ctx.sourceMapper, pos.toList)))
else {
knownOpNames += name
AstVisitorCommand.RightContinue
}
}
}
}
| sangria-graphql/sangria | modules/core/src/main/scala/sangria/validation/rules/UniqueOperationNames.scala | Scala | apache-2.0 | 824 |
package com.daodecode.scalax
object NonEmptyString extends (String => Option[String]) {
/**
* @param s a String to check for non emptiness
* @return `None` if `s` is `null` or `""`, `Some(s)` otherwise
* @since 0.2.1
*
* Example
* {{{
* scala> NonEmptyString(null)
* res0: Option[String] = None
* scala> NonEmptyString("")
* res1: Option[String] = None
* scala> NonEmptyString(" boo ")
* res2: Option[String] = Some( boo )
* }}}
*/
@inline
def apply(s: String): Option[String] =
if (s == null || s.isEmpty) None else Some(s)
/**
* Extractor for non-empty strings
*
* @param s String to check for non emptiness
* @return `None` if `s` is `null` or `""`, `Some(s)` otherwise
* @since 0.2.1
*
* Example
* {{{
* scala> null match {
* | case NonEmptyString(_) => "no way!"
* | case _ => "works!"
* |}
* res0: String = works!
* scala> "" match {
* | case NonEmptyString(_) => "no way!"
* | case _ => "works!"
* |}
* res1: String = works!
* scala> "works!" match {
* | case NonEmptyString(s) => s
* | case _ => "no way!"
* |}
* res2: String = works!
* }}}
*/
@inline
def unapply(s: String): Option[String] = apply(s)
}
| jozic/scalax-collection | src/main/scala/com/daodecode/scalax/NonEmptyString.scala | Scala | bsd-3-clause | 1,591 |
import scala.collection.immutable.TreeSet
import scala.io.Source
object ManasaAndStones extends App {
val console = Source.stdin.bufferedReader()
val t = console.readLine().toInt
(1 to t).foreach {
i =>
val n = console.readLine().toInt
val a = console.readLine().toInt
val b = console.readLine().toInt
println(treasures(Set(0), 1, n, a, b).mkString(" "))
}
def treasures(l: Set[Int], depth: Int, n: Int, a: Int, b: Int): Set[Int] = {
if(depth == n) l
else treasures(TreeSet((l.map(_ + a).toList ++ l.map(_ + b).toList) :_*), depth + 1, n, a, b)
}
} | PaulNoth/hackerrank | practice/algorithms/implementation/manasa_and_stones/ManasaAndStones.scala | Scala | mit | 598 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.example
import java.io.File
import org.joda.time.{DateTime, DateTimeZone}
import com.twitter.scalding.{Config, Execution}
import au.com.cba.omnia.parlour.ParlourImportOptions
import au.com.cba.omnia.maestro.api._, Maestro._
import au.com.cba.omnia.maestro.example.thrift.Customer
/** Configuration for `CustomerSqoopImportExecution` */
case class CustomerImportConfig(config: Config) {
val maestro = MaestroConfig(
conf = config,
source = "sales",
domain = "books",
tablename = "customer_import"
)
val sqoopImport = maestro.sqoopImport(
initialOptions = Some(ParlourImportDsl().splitBy("id"))
)
val load = maestro.load[Customer](
none = "null"
)
val catTable = maestro.partitionedHiveTable[Customer, String](
partition = Partition.byField(Fields[Customer].Cat),
tablename = "by_cat"
)
}
/** Example customer execution, importing data via Sqoop */
object CustomerSqoopImportExecution {
/** Create an example customer sqoop import execution */
def execute: Execution[CustomerImportStatus] = for {
conf <- Execution.getConfig.map(CustomerImportConfig(_))
(path, sqoopCount) <- sqoopImport(conf.sqoopImport)
(pipe, loadInfo) <- load[Customer](conf.load, List(path))
loadSuccess <- loadInfo.withSuccess
hiveCount <- viewHive(conf.catTable, pipe)
} yield (CustomerImportStatus(sqoopCount, loadSuccess.written, hiveCount))
}
case class CustomerImportStatus(sqoopCount: Long, loadCount: Long, hiveCount: Long)
| toddmowen/maestro | maestro-example/src/main/scala/au/com/cba/omnia/maestro/example/CustomerSqoopImportExecution.scala | Scala | apache-2.0 | 2,234 |
package reactivemongo.api
import scala.language.higherKinds
import scala.collection.generic.CanBuildFrom
import scala.concurrent.{ ExecutionContext, Future }
/**
* '''EXPERIMENTAL:''' Base class to implement test-only/mocked [[Cursor]].
*
* All functions failed future by default,
* make sure to override the required functions with appropriate results
* to execute the tests.
*
* {{{
* import scala.concurrent.{ ExecutionContext, Future }
*
* import reactivemongo.api.{ Cursor, TestCursor }
*
* final class MyTestCursor[T](h: T) extends TestCursor[T] {
* override def head(implicit ctx: ExecutionContext): Future[T] =
* Future.successful(h)
* }
*
* val cursor: Cursor[String] = new MyTestCursor("foo")
*
* def foo(implicit ec: ExecutionContext) {
* cursor.headOption // Future.failed by default
*
* cursor.head // Future.successful("foo")
*
* ()
* }
* }}}
*/
class TestCursor[T] extends Cursor[T] {
import Cursor.NoSuchResultException
def collect[M[_]](maxDocs: Int, err: Cursor.ErrorHandler[M[T]])(
implicit
cbf: CanBuildFrom[M[_], T, M[T]],
ec: ExecutionContext): Future[M[T]] = Future.failed[M[T]](NoSuchResultException)
def foldBulks[A](z: => A, maxDocs: Int)(suc: (A, Iterator[T]) => Cursor.State[A], err: Cursor.ErrorHandler[A])(
implicit
ctx: ExecutionContext): Future[A] = Future.failed[A](NoSuchResultException)
def foldBulksM[A](z: => A, maxDocs: Int)(
suc: (A, Iterator[T]) => Future[Cursor.State[A]],
err: Cursor.ErrorHandler[A])(implicit ctx: ExecutionContext): Future[A] =
Future.failed[A](NoSuchResultException)
def foldWhile[A](z: => A, maxDocs: Int)(suc: (A, T) => Cursor.State[A], err: Cursor.ErrorHandler[A])(
implicit
ctx: ExecutionContext): Future[A] = Future.failed[A](NoSuchResultException)
def foldWhileM[A](z: => A, maxDocs: Int)(suc: (A, T) => Future[Cursor.State[A]], err: Cursor.ErrorHandler[A])(
implicit
ctx: ExecutionContext): Future[A] = Future.failed[A](NoSuchResultException)
def head(implicit ctx: ExecutionContext): Future[T] =
Future.failed[T](NoSuchResultException)
def headOption(implicit ctx: ExecutionContext): Future[Option[T]] =
Future.failed[Option[T]](NoSuchResultException)
def peek[M[_]](
maxDocs: Int)(implicit cbf: CanBuildFrom[M[_], T, M[T]], ec: ExecutionContext): Future[Cursor.Result[M[T]]] = Future.failed[Cursor.Result[M[T]]](NoSuchResultException)
}
| ReactiveMongo/ReactiveMongo | test/src/main/scala-2.13-/api/TestCursor.scala | Scala | apache-2.0 | 2,436 |
package edu.gemini.model.p1.visibility
import edu.gemini.model.p1.immutable.TargetVisibility
import edu.gemini.spModel.core.Angle.{DMS, HMS}
import edu.gemini.spModel.core.{Declination, RightAscension}
private case class Degrees(raw: Double) extends Ordered[Degrees] {
val deg: Double = ((raw % 360.0) + 360.0) % 360.0 // [0, 360)
def compare(that: Degrees): Int = deg.compare(that.deg)
}
private sealed trait CoordinateDegrees[T] {
def toDegrees(obj: T): Degrees
}
private object CoordinateDegrees {
implicit object RaDegrees extends CoordinateDegrees[RightAscension] {
def toDegrees(hms: RightAscension) = Degrees(hms.toAngle.toDegrees)
}
implicit object DecDegrees extends CoordinateDegrees[Declination] {
def toDegrees(dms: Declination) = Degrees(dms.toDegrees)
}
}
private case class DegRange(start: Degrees, end: Degrees) {
def this(s: Double, e: Double) = this(Degrees(s), Degrees(e))
private val comp: Degrees => Boolean =
if (start < end)
d => (start <= d) && (d < end)
else
d => (start <= d) || (0 <= d.deg && d < end)
def includes[T](coord: T)(implicit ev: CoordinateDegrees[T]): Boolean =
comp(ev.toDegrees(coord))
}
private case class VisibilityRange(v: TargetVisibility, r: DegRange)
private[visibility] case class VisibilityRangeList(ranges: List[VisibilityRange]) {
def visibility[T : CoordinateDegrees](coord: T): TargetVisibility =
ranges.find(_.r.includes(coord)).map(_.v).getOrElse(TargetVisibility.Bad)
}
private[visibility] object VisibilityRangeList {
// Folded across a specification to produce a List[VisibilityRange].
private case class F(lst: List[VisibilityRange], start: Double, vis: TargetVisibility)
/** Creates a range list that covers the full 0-360 range according to specification. */
def deg(visSeq: (Double, TargetVisibility)*): VisibilityRangeList =
visSeq.toList match {
case Nil => VisibilityRangeList(Nil)
case h :: t =>
val (startDeg, startVis) = h
val res = (F(Nil, startDeg, startVis)/:t) {
(f, tup) => {
val (curDeg, curVis) = tup
val range = VisibilityRange(f.vis, new DegRange(f.start, curDeg))
F(range :: f.lst, curDeg, curVis)
}
}
VisibilityRangeList(VisibilityRange(res.vis, new DegRange(res.start, startDeg)) :: res.lst)
}
/**
* Creates a range list from 0 to 24 hours. Converts the hours to degrees.
*/
def hr(vis: (Double, TargetVisibility)*): VisibilityRangeList =
deg(vis.map { case (h, v) => (h*15.0, v) }: _*)
}
| fnussber/ocs | bundle/edu.gemini.model.p1/src/main/scala/edu/gemini/model/p1/visibility/VisibilityRange.scala | Scala | bsd-3-clause | 2,575 |
object ch12_6 {
import ch12.Applicative
sealed trait Validation[+E, +A]
case class Failure[E](head: E, tail: Vector[E] = Vector())
extends Validation[E, Nothing] {
def merge[E](failure: Failure[E]): Failure[E] = ???/*{
Failure(head, tail ++ Vector(failure.head) ++ failure.tail)
}*/
}
case class Success[A](a: A) extends Validation[Nothing, A]
def validationApplicative[E] = new Applicative[({type f[x] = Validation[E,x]})#f] {
def unit[A](a: => A) = Success(a)
override def apply[A,B](fab: Validation[E,A => B])(fa: Validation[E,A]) =
(fab, fa) match {
case (f1@Failure(_,_), f2@Failure(_,_)) => ???
case (f1@Failure(_,_), _) => f1
case (_, f2@Failure(_,_)) => f2
case (Success(f), Success(a)) => Success(f(a))
}
}
}
import ch12_6._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/lib/Monad.scala
:load src/main/scala/fpinscala/ch12/Applicative.scala
:load src/main/scala/fpinscala/ch12/Exercise6.scala
*/
| rucka/fpinscala | src/main/scala/fpinscala/ch12/Exercise6.scala | Scala | gpl-2.0 | 1,021 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.framework
import de.hpi.ingestion.deduplication.models.config.{AttributeConfig, SimilarityMeasureConfig}
import de.hpi.ingestion.deduplication.similarity.{ExactMatchString, JaroWinkler, MongeElkan}
import scala.xml.{Node, XML}
// scalastyle:off line.size.limit
object TestData {
def parsedScoreConfig: List[AttributeConfig] = List(
AttributeConfig(
"name",
1.0,
List(
SimilarityMeasureConfig(similarityMeasure = MongeElkan, weight = 0.5333),
SimilarityMeasureConfig(similarityMeasure = JaroWinkler, weight = 0.4667)
)
)
)
def parsedScoreConfigWithoutWeights: List[AttributeConfig] = List(
AttributeConfig(
"name",
0.5,
List(
SimilarityMeasureConfig(similarityMeasure = MongeElkan, weight = 0.5),
SimilarityMeasureConfig(similarityMeasure = JaroWinkler, weight = 0.5)
)
),
AttributeConfig(
"category",
0.5,
List(
SimilarityMeasureConfig(similarityMeasure = ExactMatchString, weight = 1.0)
)
)
)
def parsedSettings: Map[String, String] = {
Map(
"key1" -> "val 1",
"key2" -> "val 2",
"key3" -> "val 3",
"key4" -> "val 4")
}
def configXML: Node = {
XML.load(getClass.getResource("/framework/test.xml"))
}
def configWithoutSettingsXML: Node = {
XML.load(getClass.getResource("/framework/test2.xml"))
}
def configsWithoutWeightsXML: Node = {
XML.load(getClass.getResource("/framework/test3.xml"))
}
def importConfigXML: Node = {
XML.load(getClass.getResource("/datalake/normalization.xml"))
}
def normalizationSettings: Map[String, List[String]] = {
Map(
"rootKey" -> List("root_value"),
"nestedKey1" -> List("nested_value:1.1", "nested_value:1.2", "nested_value:1.3"),
"nestedKey2" -> List("nested_value:2.1")
)
}
def sectorSettings: Map[String, List[String]] = {
Map(
"Category 1" -> List("value1.1", "value1.2"),
"Category 2" -> List("value2.1"),
"Category 3" -> List("value3.1", "value3.2")
)
}
def commitJson: String = {
"{\\"created\\":{\\"6a7b2436-255e-447f-8740-f7d353560cc3\\":{\\"name\\":\\"Test ag\\",\\"id\\":\\"6a7b2436-255e-447f-8740-f7d353560cc3\\",\\"properties\\":{}}},\\"updated\\":{},\\"deleted\\":{\\"3254650b-269e-4d20-bb2b-48ee44013c88\\":{\\"master\\":\\"3254650b-269e-4d20-bb2b-48ee44013c88\\",\\"id\\":\\"3254650b-269e-4d20-bb2b-48ee44013c88\\",\\"datasource\\":\\"master\\",\\"name\\":\\"Deutschland AG\\",\\"aliases\\":null,\\"category\\":\\"business\\",\\"properties\\":{\\"gen_legal_form\\":[\\"AG\\"],\\"id_dbpedia\\":[\\"Deutschland AG\\"],\\"id_wikidata\\":[\\"Q1206257\\"],\\"id_wikipedia\\":[\\"Deutschland AG\\"]},\\"relations\\":{\\"c177326a-8898-4bc7-8aca-a040824aa87c\\":{\\"master\\":\\"1.0\\"}},\\"selected\\":true}}}"
}
def base64Commit: String = {
"eyJjcmVhdGVkIjp7IjZhN2IyNDM2LTI1NWUtNDQ3Zi04NzQwLWY3ZDM1MzU2MGNjMyI6eyJuYW1lIjoiVGVzdCBhZyIsImlkIjoiNmE3YjI0MzYtMjU1ZS00NDdmLTg3NDAtZjdkMzUzNTYwY2MzIiwicHJvcGVydGllcyI6e319fSwidXBkYXRlZCI6e30sImRlbGV0ZWQiOnsiMzI1NDY1MGItMjY5ZS00ZDIwLWJiMmItNDhlZTQ0MDEzYzg4Ijp7Im1hc3RlciI6IjMyNTQ2NTBiLTI2OWUtNGQyMC1iYjJiLTQ4ZWU0NDAxM2M4OCIsImlkIjoiMzI1NDY1MGItMjY5ZS00ZDIwLWJiMmItNDhlZTQ0MDEzYzg4IiwiZGF0YXNvdXJjZSI6Im1hc3RlciIsIm5hbWUiOiJEZXV0c2NobGFuZCBBRyIsImFsaWFzZXMiOm51bGwsImNhdGVnb3J5IjoiYnVzaW5lc3MiLCJwcm9wZXJ0aWVzIjp7Imdlbl9sZWdhbF9mb3JtIjpbIkFHIl0sImlkX2RicGVkaWEiOlsiRGV1dHNjaGxhbmQgQUciXSwiaWRfd2lraWRhdGEiOlsiUTEyMDYyNTciXSwiaWRfd2lraXBlZGlhIjpbIkRldXRzY2hsYW5kIEFHIl19LCJyZWxhdGlvbnMiOnsiYzE3NzMyNmEtODg5OC00YmM3LThhY2EtYTA0MDgyNGFhODdjIjp7Im1hc3RlciI6IjEuMCJ9fSwic2VsZWN0ZWQiOnRydWV9fX0="
}
}
// scalastyle:on line.size.limit
| bpn1/ingestion | src/test/scala/de/hpi/ingestion/framework/TestData.scala | Scala | apache-2.0 | 4,552 |
/*
* Copyright (c) 2014 Dufresne Management Consulting LLC.
*/
package com.nickelsoftware.bettercare4me.hedis.hedis2014
import scala.util.Random
import org.joda.time.DateTime
import org.joda.time.Interval
import com.nickelsoftware.bettercare4me.hedis.HEDISRule
import com.nickelsoftware.bettercare4me.hedis.HEDISRuleBase
import com.nickelsoftware.bettercare4me.hedis.Scorecard
import com.nickelsoftware.bettercare4me.models.Claim
import com.nickelsoftware.bettercare4me.models.MedClaim
import com.nickelsoftware.bettercare4me.models.Patient
import com.nickelsoftware.bettercare4me.models.PatientHistory
import com.nickelsoftware.bettercare4me.models.PersistenceLayer
import com.nickelsoftware.bettercare4me.models.Provider
import com.nickelsoftware.bettercare4me.models.RuleConfig
import com.nickelsoftware.bettercare4me.utils.Utils
object W34 {
val name = "W34-HEDIS-2014"
val wellChildVisit = "Well Child Visit"
// CPT for well child visit
val cptA = List("99382", "99383", "99392", "99393")
val cptAS = cptA.toSet
// ICD D for well child visit
val icdDA = List("V20.2", "V70.0", "V70.3", "V70.5", "V70.6", "V70.8", "V70.9")
val icdDAS = icdDA.toSet
}
/**
* Well-Child Visits in the Third, Fourth, Fifth and Sixth Years of Life
*
* The percentage of members 3–6 years of age who received one or more well-child visits with a PCP during
* the measurement year.
*
* NUMERATOR:
* At least one well-child visit with a PCP during the measurement year. The PCP does
* not have to be the practitioner assigned to the child.
*
*/
class W34_Rule(config: RuleConfig, hedisDate: DateTime) extends HEDISRuleBase(config, hedisDate) {
val name = W34.name
val fullName = "Well-Child Visits in the Third, Fourth, Fifth and Sixth Years of Life"
val description = "The percentage of members 3–6 years of age who received one or more well-child visits with a PCP during " +
"the measurement year."
override def isPatientMeetDemographic(patient: Patient): Boolean = {
val age = patient.age(hedisDate)
age >= 3 && age <= 6
}
import W34._
// This rule has 100% eligibility when the demographics are meet
override val eligibleRate: Int = 100
// This rule has 0% exclusion when the demographics are meet
override val exclusionRate: Int = 0
override def scorePatientExcluded(scorecard: Scorecard, patient: Patient, ph: PatientHistory): Scorecard = scorecard.addScore(name, fullName, HEDISRule.excluded, false)
override def generateMeetMeasureClaims(pl: PersistenceLayer, patient: Patient, provider: Provider): List[Claim] = {
val days = Utils.daysBetween(hedisDate.minusYears(1), hedisDate)
val dos = hedisDate.minusDays(Random.nextInt(days))
pickOne(List(
// One possible set of claims based on cpt
() => List(pl.createMedClaim(patient.patientID, patient.firstName, patient.lastName, provider.providerID, provider.firstName, provider.lastName, dos, dos, cpt = pickOne(cptA))),
// Another possible set of claims based on ICD D
() => List(pl.createMedClaim(patient.patientID, patient.firstName, patient.lastName, provider.providerID, provider.firstName, provider.lastName, dos, dos, icdDPri = pickOne(icdDA)))))()
}
override def scorePatientMeetMeasure(scorecard: Scorecard, patient: Patient, ph: PatientHistory): Scorecard = {
val measurementInterval = getIntervalFromYears(1)
def rules = List[(Scorecard) => Scorecard](
(s: Scorecard) => {
val claims1 = filterClaims(ph.cpt, cptAS, { claim: MedClaim => measurementInterval.contains(claim.dos) })
val claims2 = filterClaims(ph.icdD, icdDAS, { claim: MedClaim => measurementInterval.contains(claim.dos) })
val claims = List.concat(claims1, claims2)
s.addScore(name, fullName, HEDISRule.meetMeasure, wellChildVisit, claims)
})
applyRules(scorecard, rules)
}
}
| reactivecore01/bettercare4.me | play/app/com/nickelsoftware/bettercare4me/hedis/hedis2014/W34_Rule.scala | Scala | apache-2.0 | 3,876 |
// scalac: -Xfatal-warnings
//
class Test {
object severity extends Enumeration
class Severity(val id: Int) extends severity.Value
val INFO = new Severity(0)
val WARNING = new Severity(1)
(0: Int) match {
case WARNING.id =>
case INFO.id => // reachable
case WARNING.id => // unreachable
}
}
| scala/scala | test/files/neg/virtpatmat_unreach_select.scala | Scala | apache-2.0 | 319 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.broker.kafka
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactory
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactoryProvider
import com.lightbend.lagom.internal.scaladsl.broker.kafka.KafkaTopicFactory
import com.lightbend.lagom.scaladsl.api.ServiceInfo
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.typesafe.config.Config
import scala.concurrent.ExecutionContext
trait LagomKafkaClientComponents extends TopicFactoryProvider {
def serviceInfo: ServiceInfo
def actorSystem: ActorSystem
def materializer: Materializer
def executionContext: ExecutionContext
def serviceLocator: ServiceLocator
def config: Config
lazy val topicFactory: TopicFactory =
new KafkaTopicFactory(serviceInfo, actorSystem, serviceLocator, config)(materializer, executionContext)
override def optionalTopicFactory: Option[TopicFactory] = Some(topicFactory)
}
| lagom/lagom | service/scaladsl/kafka/client/src/main/scala/com/lightbend/lagom/scaladsl/broker/kafka/LagomKafkaClientComponents.scala | Scala | apache-2.0 | 1,075 |
package sublimeSpam
abstract class Entity(val time: Long) {
override def toString: String
val typeChar: Char
} | Berthur/SublimeSpam | src/sublimeSpam/Entity.scala | Scala | gpl-3.0 | 121 |
package beam.calibration
import java.io.File
import beam.experiment.ExperimentApp
import beam.tags.Periodic
import com.sigopt.Sigopt
import com.sigopt.exception.APIConnectionError
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.util.{Failure, Success, Try}
class BeamSigoptTunerSpec extends WordSpecLike with Matchers with BeforeAndAfterAll {
override protected def beforeAll(): Unit = {
super.beforeAll()
Sigopt.clientToken = Option {
System.getenv("SIGOPT_DEV_API_TOKEN")
}.getOrElse(
throw new APIConnectionError(
"Correct developer client token must be present in environment as SIGOPT_DEV_API Token "
)
)
}
val TEST_BEAM_EXPERIMENT_LOC = "test/input/beamville/example-calibration/experiment.yml"
val TEST_BEAM_BENCHMARK_DATA_LOC = "test/input/beamville/example-calibration/benchmark.csv"
"BeamSigoptTuner" ignore {
"create a proper experiment def from the test experiment specification file" taggedAs Periodic in {
wrapWithTestExperiment { experimentData =>
val header = experimentData.experimentDef.header
header.title equals "Example-Experiment"
header.beamTemplateConfPath equals "test/input/sf-light/sf-light-0.5k.conf"
}
}
"create an experiment in the SigOpt API" taggedAs Periodic in {
wrapWithTestExperiment { experimentData =>
{
val expParams = experimentData.experiment.getParameters
// First is the rideHailParams
val rideHailParams = expParams.iterator.next
rideHailParams.getName equals "beam.agentsim.agents.rideHail.initialization.procedural.numDriversAsFractionOfPopulation"
rideHailParams.getBounds.getMax equals 0.1
rideHailParams.getBounds.getMin equals 0.001
// Second is transitCapacityParams
val transitCapacityParams = expParams.iterator.next
transitCapacityParams.getName equals "beam.agentsim.agents.rideHail.initialization.procedural.numDriversAsFractionOfPopulation"
transitCapacityParams.getBounds.getMax equals 0.1
transitCapacityParams.getBounds.getMin equals 0.001
}
}
}
"create an experiment and run for 2 iterations" taggedAs Periodic in {
wrapWithTestExperiment { implicit experimentData =>
val runner = ExperimentRunner()
runner.runExperiment(2)
}
}
}
private def wrapWithTestExperiment(experimentDataFunc: SigoptExperimentData => Any): Unit = {
Try {
val file = new File(TEST_BEAM_BENCHMARK_DATA_LOC)
SigoptExperimentData(
ExperimentApp.loadExperimentDefs(file),
TEST_BEAM_BENCHMARK_DATA_LOC,
"None",
development = true
)
} match {
case Success(e) => experimentDataFunc(e)
case Failure(t) => t.printStackTrace()
}
}
}
| colinsheppard/beam | src/test/scala/beam/calibration/BeamSigoptTunerSpec.scala | Scala | gpl-3.0 | 2,857 |
/*
Copyright (c) 2016, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.pilar.test.parser
import org.sireum.pilar.ast.Builder
import org.sireum.pilar.parser.FastParser
import org.sireum.pilar.test.parser.FastParserTestDefProvider._
import org.sireum.test._
import org.sireum.util._
final class Antlr4PilarParserTestDefProvider(tf: TestFramework)
extends TestDefProvider {
override def testDefs: ISeq[TestDef] = ivector(
EqualTest("Model1",
Builder(model1), FastParser(model1))
,
EqualTest("Model2",
Builder(model2), FastParser(model2))
,
EqualTest("Model3",
Builder(model3), FastParser(model3))
)
}
| sireum/v3 | pilar/jvm/src/test/scala/org/sireum/pilar/test/parser/Antlr4PilarParserTestDefProvider.scala | Scala | bsd-2-clause | 1,937 |
package sorra.lanka.core
import org.eclipse.jdt.core.dom._
import scala.reflect.ClassTag
import scala.collection.mutable.ArrayBuffer
class Selector[T <: ASTNode: ClassTag](nodeVisit: T => Boolean) extends ASTVisitor {
private val clazz = implicitly[ClassTag[T]].runtimeClass
private var started = false
private val hitNodes = ArrayBuffer[T]()
def start(scope: ASTNode) = {
started = true
scope.accept(this)
this
}
def results: Seq[T] = {
if (started) hitNodes
else throw new IllegalStateException
}
protected def add(node: T) {
hitNodes += node
}
override def preVisit2(node: ASTNode): Boolean = {
preVisit(node)
if (clazz.isInstance(node)) nodeVisit(node.asInstanceOf[T])
else true
}
}
object Selector {
/**
* nodeVisit gets the node, returns true:CONTINUE or false:STOP
*/
//TODO
def apply[T <: ASTNode: ClassTag](nodeVisit: T => Boolean): Selector[T] = {
new Selector[T](nodeVisit)
}
} | sorra/Lanka | src/sorra/lanka/core/Selector.scala | Scala | apache-2.0 | 972 |
package mesosphere.marathon.core.task.update.impl.steps
import com.google.inject.Inject
import mesosphere.marathon.core.task.TaskStateOp
import mesosphere.marathon.core.task.bus.MarathonTaskStatus
import mesosphere.marathon.core.task.bus.TaskChangeObservables.TaskChanged
import mesosphere.marathon.core.task.update.TaskUpdateStep
import mesosphere.marathon.health.HealthCheckManager
import scala.concurrent.Future
/**
* Notify the health check manager of this update.
*/
class NotifyHealthCheckManagerStepImpl @Inject() (healthCheckManager: HealthCheckManager) extends TaskUpdateStep {
override def name: String = "notifyHealthCheckManager"
override def processUpdate(taskChanged: TaskChanged): Future[_] = {
taskChanged.stateOp match {
// forward health changes to the health check manager
case TaskStateOp.MesosUpdate(task, MarathonTaskStatus.WithMesosStatus(mesosStatus), _) =>
// it only makes sense to handle health check results for launched tasks
task.launched.foreach { launched =>
healthCheckManager.update(mesosStatus, launched.appVersion)
}
case _ =>
// not interested in other task updates
}
Future.successful(())
}
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/task/update/impl/steps/NotifyHealthCheckManagerStepImpl.scala | Scala | apache-2.0 | 1,217 |
package filodb.prom.downsample
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.http.scaladsl.model.Uri.Query
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.StrictLogging
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport
import filodb.query.PromCirceSupport
import filodb.query.SuccessResponse
/**
* Use this tool to validate raw data against downsampled data for gauges.
*
*
* Run as main class with following system properties:
*
* -Dquery-endpoint=https://myFiloDbEndpoint.com
* -Draw-data-promql=jvm_threads::value{_ns=\\"myApplication\\",measure=\\"daemon\\"}[@@@@s]
* -Dflush-interval=12h
* -Dquery-range=6h
*
* raw-data-promql property value should end with '}[@@@@s]'.
* The lookback window is replaced by validation tool when running the query.
*
*/
object GaugeDownsampleValidator extends App with StrictLogging {
import FailFastCirceSupport._
// DO NOT REMOVE PromCirceSupport import below assuming it is unused - Intellij removes it in auto-imports :( .
// Needed to override Sampl case class Encoder.
import PromCirceSupport._
import io.circe.generic.auto._
case class DownsampleValidation(name: String, rawQuery: String, dsQuery: String)
case class DownsampleLevel(step: Duration, endpoint: String)
val config = ConfigFactory.load()
val rawPromql = config.getString("raw-data-promql")
val filodbHttpEndpoint = config.getString("query-endpoint")
val flushIntervalHours = config.getDuration("flush-interval")
val queryRange = config.getDuration("query-range")
require((rawPromql.endsWith("""}[@@@@s]""")),
"""Raw Data PromQL should end with }[@@@@s]""")
// List of validations to perform
val validations = Seq (
DownsampleValidation("min", s"""min_over_time($rawPromql)""",
s"""min_over_time(${rawPromql.replace("\\"value\\"", "\\"min\\"")})"""),
DownsampleValidation("max", s"""max_over_time($rawPromql)""",
s"""max_over_time(${rawPromql.replace("\\"value\\"", "\\"max\\"")})"""),
DownsampleValidation("sum", s"""sum_over_time($rawPromql)""",
s"""sum_over_time(${rawPromql.replace("\\"value\\"", "\\"sum\\"")})"""),
DownsampleValidation("count", s"""count_over_time($rawPromql)""",
s"""sum_over_time(${rawPromql.replace("\\"value\\"", "\\"count\\"")})""")
)
val now = System.currentTimeMillis()
val endTime = (now - flushIntervalHours.toMillis) / 1000
val lastHourEnd = (endTime / 1.hour.toSeconds) * 1.hour.toSeconds
val startTime = (lastHourEnd - queryRange.toMillis / 1000)
val urlPrefixRaw = s"$filodbHttpEndpoint/promql/prometheus/api"
implicit val as = ActorSystem()
implicit val materializer = ActorMaterializer()
// TODO configure dataset name etc.
val downsampleLevels = Seq (
DownsampleLevel(1.minute, s"$filodbHttpEndpoint/promql/prometheus_ds_1m/api"),
DownsampleLevel(15.minutes, s"$filodbHttpEndpoint/promql/prometheus_ds_15m/api"),
DownsampleLevel(60.minutes, s"$filodbHttpEndpoint/promql/prometheus_ds_1hr/api"))
val params = Map( "start" -> startTime.toString, "end" -> endTime.toString)
// validation loop:
val results = for {
level <- downsampleLevels // for each downsample dataset
validation <- validations // for each validation
} yield {
val step = level.step.toSeconds
// invoke query on downsample dataset
val dsPromQLFull = validation.dsQuery.replace("@@@@", step.toString)
val dsParams = params ++ Map("step" -> step.toString, "query" -> dsPromQLFull)
val dsUrl = Uri(s"${level.endpoint}/v1/query_range").withQuery(Query(dsParams))
val dsRespFut = Http().singleRequest(HttpRequest(uri = dsUrl)).flatMap(Unmarshal(_).to[SuccessResponse])
val dsResp = try {
Some(Await.result(dsRespFut, 10.seconds))
} catch {
case e: Throwable =>
e.printStackTrace()
None
}
// invoke query on raw dataset
val rawPromQLFull = validation.rawQuery.replace("@@@@", step.toString)
val rawParams = params ++ Map("step" -> step.toString, "query" -> rawPromQLFull)
val rawUrl = Uri(s"$urlPrefixRaw/v1/query_range").withQuery(Query(rawParams))
val rawRespFut = Http().singleRequest(HttpRequest(uri = rawUrl)).flatMap(Unmarshal(_).to[SuccessResponse])
val rawResp = try {
Some(Await.result(rawRespFut, 10.seconds))
} catch {
case e: Throwable =>
e.printStackTrace()
None
}
// normalize the results by sorting the range vectors so we can do comparison
val dsNorm = dsResp.get.data.copy(result =
dsResp.get.data.result.sortWith((a, b) => a.metric("instance").compareTo(b.metric("instance")) > 0))
val rawNorm = rawResp.get.data.copy(result =
rawResp.get.data.result.sortWith((a, b) => a.metric("instance").compareTo(b.metric("instance")) > 0))
logger.info(s"Downsampler=${validation.name} step=${step}s validationResult=${dsNorm == rawNorm} " +
s"rawUrl=$rawUrl dsUrl=$dsUrl")
if (dsNorm != rawNorm) {
logger.error(s"Raw results: $rawNorm")
logger.error(s"DS results: $dsNorm")
}
dsNorm == rawNorm
}
CoordinatedShutdown(as).run(CoordinatedShutdown.UnknownReason)
if (results.exists(b => !b)) {
logger.info("Validation had a failure. See logs for details.")
System.exit(10)
}
else
logger.info("Validation was a success")
}
| tuplejump/FiloDB | http/src/test/scala/filodb/prom/downsample/GaugeDownsampleValidator.scala | Scala | apache-2.0 | 5,642 |
package com.yiguang.mqtt.provider
import org.scalatest.{Matchers, FlatSpec}
/**
* Created by yigli on 14-11-19.
*/
class EventBusSpec extends FlatSpec with Matchers {
"Publish Event" should "serialize and unSerialize" in {
// val e = PublishEvent("client","topic","messageId")
// val s = e.toBytes()
// val u = PublishEvent.fromBytes(s)
//
// assert(u.messageId == e.messageId)
// assert(u.topicName == e.topicName)
val e = PublishEvent("client", "topic", "messageId")
val json = e.toJson()
val p2 = PublishEvent.fromJson(json).getOrElse(PublishEvent("", "", ""))
assert(p2.messageId == e.messageId)
assert(p2.topicName == e.topicName)
}
}
| liyiguang/finagle-mqtt | src/test/scala/com/yiguang/mqtt/provider/EventBusSpec.scala | Scala | apache-2.0 | 712 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scouter.server.netio.service.handle;
import scouter.io.DataInputX
import scouter.io.DataOutputX
import scouter.lang.SummaryEnum
import scouter.lang.pack.MapPack
import scouter.lang.pack.SummaryPack
import scouter.lang.value.ListValue
import scouter.net.TcpFlag
import scouter.server.db.SummaryRD
import scouter.server.netio.service.anotation.ServiceHandler
import scouter.util.{HashUtil, IntKeyLinkedMap, LongKeyLinkedMap, StringKeyLinkedMap}
import scouter.net.RequestCmd
class SummaryService {
class TempObject() {
var hash: Int = 0;
var count: Int = 0;
var errorCnt: Int = 0;
var elapsedSum: Long = 0;
var cpuSum: Long = 0;
var memSum: Long = 0;
}
class TempError() {
var hash: Int = 0;
var error: Int = 0;
var service: Int = 0;
var message: Int = 0;
var count: Int = 0;
var txid: Long = 0;
var sql: Int = 0;
var apicall: Int = 0;
var fullstack: Int = 0;
}
class TempAlert() {
var hash: Int = 0;
var title: String = "";
var count: Int = 0;
var level: Byte = 0;
}
def load(stype: Byte, din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readMapPack();
val date = param.getText("date");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val objType = param.getText("objType");
val objHash = param.getInt("objHash");
val tempMap = new IntKeyLinkedMap[TempObject]().setMax(50000)
val handler = (time: Long, data: Array[Byte]) => {
val p = new DataInputX(data).readPack().asInstanceOf[SummaryPack];
if (p.stype == stype
&& (objHash == 0 || objHash == p.objHash)
&& (objType == null || objType == "" || objType == p.objType)) {
val id = p.table.getList("id")
val count = p.table.getList("count")
val error = p.table.getList("error")
val elapsed = p.table.getList("elapsed")
val cpu = p.table.getList("cpu")
val mem = p.table.getList("mem");
for (i <- 0 to id.size() - 1) {
var tempObj = tempMap.get(id.getInt(i));
if (tempObj == null) {
tempObj = new TempObject();
tempObj.hash = id.getInt(i);
tempMap.put(id.getInt(i), tempObj);
}
tempObj.count += count.getInt(i);
tempObj.errorCnt += error.getInt(i);
tempObj.elapsedSum += elapsed.getLong(i);
if (cpu != null && mem != null) {
tempObj.cpuSum += cpu.getLong(i);
tempObj.memSum += mem.getLong(i);
}
}
}
}
SummaryRD.readByTime(stype, date, stime, etime, handler)
val map = new MapPack();
val newIdList = map.newList("id");
val newCountList = map.newList("count");
val newErrorCntList = map.newList("error"); //count
val newElapsedSumList = map.newList("elapsed"); //elapsed Time Sum
var newCpuSumList: ListValue = null;
var newMemSumList: ListValue = null;
if (stype == SummaryEnum.APP) {
newCpuSumList = map.newList("cpu"); // cpu time sum
newMemSumList = map.newList("mem"); // mem sum
}
val itr = tempMap.keys();
while (itr.hasMoreElements()) {
val hash = itr.nextInt();
val obj = tempMap.get(hash);
newIdList.add(obj.hash);
newCountList.add(obj.count);
newErrorCntList.add(obj.errorCnt);
newElapsedSumList.add(obj.elapsedSum);
if (stype == SummaryEnum.APP) {
newCpuSumList.add(obj.cpuSum)
newMemSumList.add(obj.memSum)
}
}
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(map);
}
def loadIpAndUA(stype: Byte, din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readMapPack();
val date = param.getText("date");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val objType = param.getText("objType");
val objHash = param.getInt("objHash");
val tempMap = new IntKeyLinkedMap[TempObject]().setMax(50000)
val handler = (time: Long, data: Array[Byte]) => {
val p = new DataInputX(data).readPack().asInstanceOf[SummaryPack];
if (p.stype == stype
&& (objHash == 0 || objHash == p.objHash)
&& (objType == null || objType == p.objType)) {
val id = p.table.getList("id")
val count = p.table.getList("count")
for (i <- 0 to id.size() - 1) {
var tempObj = tempMap.get(id.getInt(i));
if (tempObj == null) {
tempObj = new TempObject();
tempObj.hash = id.getInt(i);
tempMap.put(id.getInt(i), tempObj);
}
tempObj.count += count.getInt(i);
}
}
}
SummaryRD.readByTime(stype, date, stime, etime, handler)
val map = new MapPack();
val newIdList = map.newList("id");
val newCountList = map.newList("count");
val itr = tempMap.keys();
while (itr.hasMoreElements()) {
val hash = itr.nextInt();
val obj = tempMap.get(hash);
newIdList.add(obj.hash);
newCountList.add(obj.count);
}
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(map);
}
def loadServiceErrorSum(stype: Byte, din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readMapPack();
val date = param.getText("date");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val objType = param.getText("objType");
val objHash = param.getInt("objHash");
val tempMap = new LongKeyLinkedMap[TempError]().setMax(10000)
val handler = (time: Long, data: Array[Byte]) => {
val p = new DataInputX(data).readPack().asInstanceOf[SummaryPack];
if (p.stype == stype
&& (objHash == 0 || objHash == p.objHash)
&& (objType == null || objType == p.objType)) {
val id = p.table.getList("id")
val error = p.table.getList("error")
val service = p.table.getList("service")
val message = p.table.getList("message")
val count = p.table.getList("count")
val txid = p.table.getList("txid")
val sql = p.table.getList("sql")
val apicall = p.table.getList("apicall")
val fullstack = p.table.getList("fullstack")
for (i <- 0 to id.size() - 1) {
var tempObj = tempMap.get(id.getInt(i));
if (tempObj == null) {
tempObj = new TempError();
tempObj.hash = id.getInt(i);
tempMap.put(id.getInt(i), tempObj);
tempObj.error = error.getInt(i);
tempObj.service = service.getInt(i);
tempObj.txid = txid.getLong(i);
}
tempObj.count += count.getInt(i);
if (tempObj.message == 0) {
tempObj.message = message.getInt(i);
}
if (tempObj.sql == 0) {
tempObj.sql = sql.getInt(i);
}
if (tempObj.apicall == 0) {
tempObj.apicall = apicall.getInt(i);
}
if (tempObj.fullstack == 0) {
tempObj.fullstack = fullstack.getInt(i);
}
}
}
}
SummaryRD.readByTime(stype, date, stime, etime, handler)
//summary의 id는 error+service이다.
val map = new MapPack();
val newIdList = map.newList("id");
val newErrorList = map.newList("error");
val newServiceList = map.newList("service");
val newMessageList = map.newList("message");
val newCountList = map.newList("count");
val newTxidList = map.newList("txid");
val newSqlList = map.newList("sql");
val newApiCallList = map.newList("apicall");
val newFullStackList = map.newList("fullstack");
val itr = tempMap.keys();
while (itr.hasMoreElements()) {
val id = itr.nextLong();
val obj = tempMap.get(id);
newIdList.add(obj.hash);
newErrorList.add(obj.error);
newServiceList.add(obj.service);
newMessageList.add(obj.message);
newCountList.add(obj.count);
newTxidList.add(obj.txid);
newSqlList.add(obj.sql);
newApiCallList.add(obj.apicall);
newFullStackList.add(obj.fullstack);
}
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(map);
}
def loadAlertSum(stype: Byte, din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readMapPack();
val date = param.getText("date");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val objType = param.getText("objType");
val objHash = param.getInt("objHash");
val tempMap = new StringKeyLinkedMap[TempAlert]().setMax(50000)
val handler = (time: Long, data: Array[Byte]) => {
val p = new DataInputX(data).readPack().asInstanceOf[SummaryPack];
if (p.stype == stype
&& (objHash == 0 || objHash == p.objHash)
&& (objType == null || objType == p.objType)) {
val title = p.table.getList("title")
val count = p.table.getList("count")
val level = p.table.getList("level")
for (i <- 0 to title.size() - 1) {
var tempObj = tempMap.get(title.getString(i));
if (tempObj == null) {
tempObj = new TempAlert();
tempObj.hash = HashUtil.hash(title.getString(i))
tempObj.title = title.getString(i);
tempObj.level = level.getInt(i).toByte;
tempMap.put(title.getString(i), tempObj);
}
tempObj.count += count.getInt(i);
}
}
}
SummaryRD.readByTime(stype, date, stime, etime, handler)
val map = new MapPack();
val newIdList = map.newList("id");
val newTitleList = map.newList("title");
val newCountList = map.newList("count");
val newLevelList = map.newList("level");
val itr = tempMap.keys();
while (itr.hasMoreElements()) {
val title = itr.nextString();
val obj = tempMap.get(title);
newIdList.add(obj.hash);
newTitleList.add(obj.title);
newCountList.add(obj.count);
newLevelList.add(obj.level);
}
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(map);
}
@ServiceHandler(RequestCmd.LOAD_SERVICE_SUMMARY)
def LOAD_SERVICE_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
load(SummaryEnum.APP, din, dout, login);
}
@ServiceHandler(RequestCmd.LOAD_SQL_SUMMARY)
def LOAD_SQL_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
load(SummaryEnum.SQL, din, dout, login);
}
@ServiceHandler(RequestCmd.LOAD_APICALL_SUMMARY)
def LOAD_APICALL_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
load(SummaryEnum.APICALL, din, dout, login);
}
@ServiceHandler(RequestCmd.LOAD_IP_SUMMARY)
def LOAD_IP_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
loadIpAndUA(SummaryEnum.IP, din, dout, login);
}
@ServiceHandler(RequestCmd.LOAD_UA_SUMMARY)
def LOAD_UA_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
loadIpAndUA(SummaryEnum.USER_AGENT, din, dout, login);
}
@ServiceHandler(RequestCmd.LOAD_SERVICE_ERROR_SUMMARY)
def LOAD_ERROR_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
loadServiceErrorSum(SummaryEnum.SERVICE_ERROR, din, dout, login);
}
@ServiceHandler(RequestCmd.LOAD_ALERT_SUMMARY)
def LOAD_ALERT_SUMMARY(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
loadAlertSum(SummaryEnum.ALERT, din, dout, login);
}
} | scouter-project/scouter | scouter.server/src/main/scala/scouter/server/netio/service/handle/SummaryService.scala | Scala | apache-2.0 | 13,882 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}
class LogicalOrSpec extends FlatSpec with Matchers {
"LogicalOr operation" should "works correctly" in {
import com.intel.analytics.bigdl.numeric.NumericBoolean
val input =
T(
Tensor(T(true, false, true)),
Tensor(T(false, false, true))
)
val expectOutput = Tensor(T(true, false, true))
val output = LogicalOr().forward(input)
output should be(expectOutput)
}
}
class LogicalOrSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val logicalOr = LogicalOr[Float].setName("logicalOr")
val input = T(Tensor[Boolean](T(true, false)), Tensor[Boolean](T(true, false)))
runSerializationTest(logicalOr, input, logicalOr
.asInstanceOf[ModuleToOperation[Float]].module.getClass)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/LogicalOrSpec.scala | Scala | apache-2.0 | 1,621 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils.timer
import kafka.utils.MockTime
import scala.collection.mutable
class MockTimer extends Timer {
val time = new MockTime
private val taskQueue = mutable.PriorityQueue[TimerTaskEntry]()(Ordering[TimerTaskEntry].reverse)
def add(timerTask: TimerTask) {
if (timerTask.delayMs <= 0)
timerTask.run()
else
taskQueue.enqueue(new TimerTaskEntry(timerTask, timerTask.delayMs + time.milliseconds))
}
def advanceClock(timeoutMs: Long): Boolean = {
time.sleep(timeoutMs)
var executed = false
val now = time.milliseconds
while (taskQueue.nonEmpty && now > taskQueue.head.expirationMs) {
val taskEntry = taskQueue.dequeue()
if (!taskEntry.cancelled) {
val task = taskEntry.timerTask
task.run()
executed = true
}
}
executed
}
def size: Int = taskQueue.size
override def shutdown(): Unit = {}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/utils/timer/MockTimer.scala | Scala | apache-2.0 | 1,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.communication.actors
import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import org.apache.toree.communication.{ZMQMessage, SocketManager}
import org.apache.toree.utils.LogLike
import org.zeromq.ZMQ
/**
* Represents an actor containing a dealer socket.
*
* @param connection The address to connect to
* @param listener The actor to send incoming messages back to
*/
class DealerSocketActor(connection: String, listener: ActorRef)
extends Actor with LogLike
{
logger.debug(s"Initializing dealer socket actor for $connection")
private val manager: SocketManager = new SocketManager
private val socket = manager.newDealerSocket(connection, (message: Seq[String]) => {
listener ! ZMQMessage(message.map(ByteString.apply): _*)
})
override def postStop(): Unit = {
manager.closeSocket(socket)
}
override def receive: Actor.Receive = {
case zmqMessage: ZMQMessage =>
val frames = zmqMessage.frames.map(byteString =>
new String(byteString.toArray, ZMQ.CHARSET))
socket.send(frames: _*)
}
}
| asorianostratio/incubator-toree | communication/src/main/scala/org/apache/toree/communication/actors/DealerSocketActor.scala | Scala | apache-2.0 | 1,891 |
package com.softwaremill.bootzooka.email
class EmailTemplates {
def registrationConfirmation(userName: String): EmailSubjectContent = {
EmailTemplateRenderer("registrationConfirmation", Map("userName" -> userName))
}
def passwordReset(userName: String, resetLink: String): EmailSubjectContent = {
EmailTemplateRenderer("resetPassword", Map("userName" -> userName, "resetLink" -> resetLink))
}
def passwordChangeNotification(userName: String): EmailSubjectContent = {
EmailTemplateRenderer("passwordChangeNotification", Map("userName" -> userName))
}
def profileDetailsChangeNotification(userName: String): EmailSubjectContent = {
EmailTemplateRenderer("profileDetailsChangeNotification", Map("userName" -> userName))
}
}
| softwaremill/bootzooka | backend/src/main/scala/com/softwaremill/bootzooka/email/EmailTemplates.scala | Scala | apache-2.0 | 757 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.stat
import java.util.Random
import org.apache.commons.math3.distribution.{ExponentialDistribution,
NormalDistribution, UniformRealDistribution}
import org.apache.commons.math3.stat.inference.KolmogorovSmirnovTest
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.mllib.linalg.{DenseVector, Matrices, Vectors}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.stat.test.ChiSqTest
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
class HypothesisTestSuite extends SparkFunSuite with MLlibTestSparkContext {
test("chi squared pearson goodness of fit") {
val observed = new DenseVector(Array[Double](4, 6, 5))
val pearson = Statistics.chiSqTest(observed)
// Results validated against the R command `chisq.test(c(4, 6, 5), p=c(1/3, 1/3, 1/3))`
assert(pearson.statistic === 0.4)
assert(pearson.degreesOfFreedom === 2)
assert(pearson.pValue ~== 0.8187 relTol 1e-4)
assert(pearson.method === ChiSqTest.PEARSON.name)
assert(pearson.nullHypothesis === ChiSqTest.NullHypothesis.goodnessOfFit.toString)
// different expected and observed sum
val observed1 = new DenseVector(Array[Double](21, 38, 43, 80))
val expected1 = new DenseVector(Array[Double](3, 5, 7, 20))
val pearson1 = Statistics.chiSqTest(observed1, expected1)
// Results validated against the R command
// `chisq.test(c(21, 38, 43, 80), p=c(3/35, 1/7, 1/5, 4/7))`
assert(pearson1.statistic ~== 14.1429 relTol 1e-4)
assert(pearson1.degreesOfFreedom === 3)
assert(pearson1.pValue ~== 0.002717 relTol 1e-4)
assert(pearson1.method === ChiSqTest.PEARSON.name)
assert(pearson1.nullHypothesis === ChiSqTest.NullHypothesis.goodnessOfFit.toString)
// Vectors with different sizes
val observed3 = new DenseVector(Array(1.0, 2.0, 3.0))
val expected3 = new DenseVector(Array(1.0, 2.0, 3.0, 4.0))
intercept[IllegalArgumentException](Statistics.chiSqTest(observed3, expected3))
// negative counts in observed
val negObs = new DenseVector(Array(1.0, 2.0, 3.0, -4.0))
intercept[IllegalArgumentException](Statistics.chiSqTest(negObs, expected1))
// count = 0.0 in expected but not observed
val zeroExpected = new DenseVector(Array(1.0, 0.0, 3.0))
val inf = Statistics.chiSqTest(observed, zeroExpected)
assert(inf.statistic === Double.PositiveInfinity)
assert(inf.degreesOfFreedom === 2)
assert(inf.pValue === 0.0)
assert(inf.method === ChiSqTest.PEARSON.name)
assert(inf.nullHypothesis === ChiSqTest.NullHypothesis.goodnessOfFit.toString)
// 0.0 in expected and observed simultaneously
val zeroObserved = new DenseVector(Array(2.0, 0.0, 1.0))
intercept[IllegalArgumentException](Statistics.chiSqTest(zeroObserved, zeroExpected))
}
test("chi squared pearson matrix independence") {
val data = Array(40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0)
// [[40.0, 56.0, 31.0, 30.0],
// [24.0, 32.0, 10.0, 15.0],
// [29.0, 42.0, 0.0, 12.0]]
val chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
// Results validated against R command
// `chisq.test(rbind(c(40, 56, 31, 30),c(24, 32, 10, 15), c(29, 42, 0, 12)))`
assert(chi.statistic ~== 21.9958 relTol 1e-4)
assert(chi.degreesOfFreedom === 6)
assert(chi.pValue ~== 0.001213 relTol 1e-4)
assert(chi.method === ChiSqTest.PEARSON.name)
assert(chi.nullHypothesis === ChiSqTest.NullHypothesis.independence.toString)
// Negative counts
val negCounts = Array(4.0, 5.0, 3.0, -3.0)
intercept[IllegalArgumentException](Statistics.chiSqTest(Matrices.dense(2, 2, negCounts)))
// Row sum = 0.0
val rowZero = Array(0.0, 1.0, 0.0, 2.0)
intercept[IllegalArgumentException](Statistics.chiSqTest(Matrices.dense(2, 2, rowZero)))
// Column sum = 0.0
val colZero = Array(0.0, 0.0, 2.0, 2.0)
// IllegalArgumentException thrown here since it's thrown on driver, not inside a task
intercept[IllegalArgumentException](Statistics.chiSqTest(Matrices.dense(2, 2, colZero)))
}
test("chi squared pearson RDD[LabeledPoint]") {
// labels: 1.0 (2 / 6), 0.0 (4 / 6)
// feature1: 0.5 (1 / 6), 1.5 (2 / 6), 3.5 (3 / 6)
// feature2: 10.0 (1 / 6), 20.0 (1 / 6), 30.0 (2 / 6), 40.0 (2 / 6)
val data = Seq(
LabeledPoint(0.0, Vectors.dense(0.5, 10.0)),
LabeledPoint(0.0, Vectors.dense(1.5, 20.0)),
LabeledPoint(1.0, Vectors.dense(1.5, 30.0)),
LabeledPoint(0.0, Vectors.dense(3.5, 30.0)),
LabeledPoint(0.0, Vectors.dense(3.5, 40.0)),
LabeledPoint(1.0, Vectors.dense(3.5, 40.0)))
for (numParts <- List(2, 4, 6, 8)) {
val chi = Statistics.chiSqTest(sc.parallelize(data, numParts))
val feature1 = chi(0)
assert(feature1.statistic === 0.75)
assert(feature1.degreesOfFreedom === 2)
assert(feature1.pValue ~== 0.6873 relTol 1e-4)
assert(feature1.method === ChiSqTest.PEARSON.name)
assert(feature1.nullHypothesis === ChiSqTest.NullHypothesis.independence.toString)
val feature2 = chi(1)
assert(feature2.statistic === 1.5)
assert(feature2.degreesOfFreedom === 3)
assert(feature2.pValue ~== 0.6823 relTol 1e-4)
assert(feature2.method === ChiSqTest.PEARSON.name)
assert(feature2.nullHypothesis === ChiSqTest.NullHypothesis.independence.toString)
}
// Test that the right number of results is returned
val numCols = 1001
val sparseData = Array(
new LabeledPoint(0.0, Vectors.sparse(numCols, Seq((100, 2.0)))),
new LabeledPoint(0.1, Vectors.sparse(numCols, Seq((200, 1.0)))))
val chi = Statistics.chiSqTest(sc.parallelize(sparseData))
assert(chi.size === numCols)
assert(chi(1000) != null) // SPARK-3087
// Detect continuous features or labels
val tooManyCategories: Int = 100000
assert(tooManyCategories > ChiSqTest.maxCategories, "This unit test requires that " +
"tooManyCategories be large enough to cause ChiSqTest to throw an exception.")
val random = new Random(11L)
val continuousLabel = Seq.fill(tooManyCategories)(
LabeledPoint(random.nextDouble(), Vectors.dense(random.nextInt(2))))
intercept[SparkException] {
Statistics.chiSqTest(sc.parallelize(continuousLabel, 2))
}
val continuousFeature = Seq.fill(tooManyCategories)(
LabeledPoint(random.nextInt(2), Vectors.dense(random.nextDouble())))
intercept[SparkException] {
Statistics.chiSqTest(sc.parallelize(continuousFeature, 2))
}
}
test("1 sample Kolmogorov-Smirnov test: apache commons math3 implementation equivalence") {
// Create theoretical distributions
val stdNormalDist = new NormalDistribution(0, 1)
val expDist = new ExponentialDistribution(0.6)
val unifDist = new UniformRealDistribution()
// set seeds
val seed = 10L
stdNormalDist.reseedRandomGenerator(seed)
expDist.reseedRandomGenerator(seed)
unifDist.reseedRandomGenerator(seed)
// Sample data from the distributions and parallelize it
val n = 100000
val sampledNorm = sc.parallelize(stdNormalDist.sample(n), 10)
val sampledExp = sc.parallelize(expDist.sample(n), 10)
val sampledUnif = sc.parallelize(unifDist.sample(n), 10)
// Use a apache math commons local KS test to verify calculations
val ksTest = new KolmogorovSmirnovTest()
val pThreshold = 0.05
// Comparing a standard normal sample to a standard normal distribution
val result1 = Statistics.kolmogorovSmirnovTest(sampledNorm, "norm", 0, 1)
val referenceStat1 = ksTest.kolmogorovSmirnovStatistic(stdNormalDist, sampledNorm.collect())
val referencePVal1 = 1 - ksTest.cdf(referenceStat1, n)
// Verify vs apache math commons ks test
assert(result1.statistic ~== referenceStat1 relTol 1e-4)
assert(result1.pValue ~== referencePVal1 relTol 1e-4)
// Cannot reject null hypothesis
assert(result1.pValue > pThreshold)
// Comparing an exponential sample to a standard normal distribution
val result2 = Statistics.kolmogorovSmirnovTest(sampledExp, "norm", 0, 1)
val referenceStat2 = ksTest.kolmogorovSmirnovStatistic(stdNormalDist, sampledExp.collect())
val referencePVal2 = 1 - ksTest.cdf(referenceStat2, n)
// verify vs apache math commons ks test
assert(result2.statistic ~== referenceStat2 relTol 1e-4)
assert(result2.pValue ~== referencePVal2 relTol 1e-4)
// reject null hypothesis
assert(result2.pValue < pThreshold)
// Testing the use of a user provided CDF function
// Distribution is not serializable, so will have to create in the lambda
val expCDF = (x: Double) => new ExponentialDistribution(0.2).cumulativeProbability(x)
// Comparing an exponential sample with mean X to an exponential distribution with mean Y
// Where X != Y
val result3 = Statistics.kolmogorovSmirnovTest(sampledExp, expCDF)
val referenceStat3 = ksTest.kolmogorovSmirnovStatistic(new ExponentialDistribution(0.2),
sampledExp.collect())
val referencePVal3 = 1 - ksTest.cdf(referenceStat3, sampledNorm.count().toInt)
// verify vs apache math commons ks test
assert(result3.statistic ~== referenceStat3 relTol 1e-4)
assert(result3.pValue ~== referencePVal3 relTol 1e-4)
// reject null hypothesis
assert(result3.pValue < pThreshold)
}
test("1 sample Kolmogorov-Smirnov test: R implementation equivalence") {
/*
Comparing results with R's implementation of Kolmogorov-Smirnov for 1 sample
> sessionInfo()
R version 3.2.0 (2015-04-16)
Platform: x86_64-apple-darwin13.4.0 (64-bit)
> set.seed(20)
> v <- rnorm(20)
> v
[1] 1.16268529 -0.58592447 1.78546500 -1.33259371 -0.44656677 0.56960612
[7] -2.88971761 -0.86901834 -0.46170268 -0.55554091 -0.02013537 -0.15038222
[13] -0.62812676 1.32322085 -1.52135057 -0.43742787 0.97057758 0.02822264
[19] -0.08578219 0.38921440
> ks.test(v, pnorm, alternative = "two.sided")
One-sample Kolmogorov-Smirnov test
data: v
D = 0.18874, p-value = 0.4223
alternative hypothesis: two-sided
*/
val rKSStat = 0.18874
val rKSPVal = 0.4223
val rData = sc.parallelize(
Array(
1.1626852897838, -0.585924465893051, 1.78546500331661, -1.33259371048501,
-0.446566766553219, 0.569606122374976, -2.88971761441412, -0.869018343326555,
-0.461702683149641, -0.555540910137444, -0.0201353678515895, -0.150382224136063,
-0.628126755843964, 1.32322085193283, -1.52135057001199, -0.437427868856691,
0.970577579543399, 0.0282226444247749, -0.0857821886527593, 0.389214404984942
)
)
val rCompResult = Statistics.kolmogorovSmirnovTest(rData, "norm", 0, 1)
assert(rCompResult.statistic ~== rKSStat relTol 1e-4)
assert(rCompResult.pValue ~== rKSPVal relTol 1e-4)
}
}
| akopich/spark | mllib/src/test/scala/org/apache/spark/mllib/stat/HypothesisTestSuite.scala | Scala | apache-2.0 | 11,772 |
package vultura.factor.generation
import org.specs2.Specification
import vultura.factor.generation.graph._
class GraphGeneratorTest extends Specification {
def is =
s2"""Check constant degree of some graphs
|
| torodial 2d lattice has only nodes of degree 4 ${allDegreesAre(_ == 4)(lattice(3 -> true, 3 -> true))}
| torodial 3d lattice has only nodes of degree 6 ${allDegreesAre(_ == 6)(lattice(3 -> true, 3 -> true, 3 -> true))}
|
| complete graph 6 has only nodes of degree 5 ${allDegreesAre(_ == 5)(complete(6))}
| empty graph 6 has only nodes of degree 0 ${allDegreesAre(_ == 0)(unconnected(6))}
|
| 2d lattice with one wrapping dim has degrees of 3 and 4 ${allDegreesAre(Set(3,4))(lattice(3 -> true, 3 -> true))}
|
| 2d grid 3x4 has 12 nodes ${lattice(3 -> false, 4 -> false).nodes.size === 12}
|""".stripMargin
def degreesOf[N](g: Graph[N]): Seq[Int] = g.nodes.toSeq.map(g.neighboursOf).map(_.size)
def allDegreesAre[N](cond: Int => Boolean)(g: Graph[N]) = forall(degreesOf(g))(cond)
}
| ziggystar/vultura-factor | src/test/scala/vultura/factor/generation/GraphGeneratorTest.scala | Scala | mit | 1,100 |
package org.vaadin.hezamu.dungeongame
import rx.lang.scala.Observable
import rx.lang.scala.Observer
import vaadin.scala._
import vaadin.scala.server.FontAwesome
import org.vaadin.hezamu.dungeongame.model._
import Implicits._
class DungeonGameUI extends UI(title = "Dungeon Game", theme = "dungeongame") {
content = new VerticalLayout {
margin = true
val board = add(new GameBoard, ratio = 1, alignment = Alignment.TopCenter)
val up = add(new Button {
icon = FontAwesome.ArrowUp
clickShortcut = KeyShortcut(KeyCode.ArrowUp)
}, alignment = Alignment.BottomCenter)
val left = add(new Button {
icon = FontAwesome.ArrowLeft
clickShortcut = KeyShortcut(KeyCode.ArrowLeft)
})
val right = add(new Button {
icon = FontAwesome.ArrowRight
clickShortcut = KeyShortcut(KeyCode.ArrowRight)
})
add(new HorizontalLayout {
add(left)
add(right)
}, alignment = Alignment.MiddleCenter)
val down = add(new Button {
icon = FontAwesome.ArrowDown
clickShortcut = KeyShortcut(KeyCode.ArrowDown)
}, alignment = Alignment.TopCenter)
setupUILogic(board, up, down, left, right)
}
def setupUILogic(board: GameBoard, up: Button, down: Button, left: Button, right: Button) = {
def tryMove(delta: Cell) = board.dungeon.playerPosition map { cell =>
cell + delta
} filter board.dungeon.canMoveTo
val movesColl = Vector(
up.clickEvents map { e => tryMove(Cell(0, -1)) },
down.clickEvents map { e => tryMove(Cell(0, 1)) },
left.clickEvents map { e => tryMove(Cell(-1, 0)) },
right.clickEvents map { e => tryMove(Cell(1, 0)) })
// Emit a Option[Cell] every time player tries to move
val moves = Observable from movesColl flatten
// Map a legal destination cell to the set of visible cells after the move
val visibleCells = moves collect {
case Some(cell) =>
board.dungeon.playerOpt foreach { board.dungeon.put(_, cell) }
board.dungeon.visibleIlluminatedCells
}
// Subscribe the game board instance to the stream of legal moves.
// This will call board.onNext() with a set of visible cells whenever
// player performs a legal move.
visibleCells subscribe board
// Subscribe to the illegal move stream to show a notification every
// time player tries an illegal move.
moves collect { case None => None } subscribe { none =>
Notification.show("That direction is blocked", Notification.Type.Tray)
}
}
}
class GameBoard extends GridLayout with Observer[Set[Cell]] {
val dungeon = new Dungeon {
floors = (for {
x <- 1 to 15
y <- 1 to 15
} yield Cell(x, y)).toSet
entities = entities.updated(new Player, randomFreeCell)
0 to 6 foreach { i => entities = entities.updated(new NPC, randomFreeCell) }
}
onNext(dungeon.visibleIlluminatedCells) // Initial draw
override def onNext(cells: Set[Cell]) {
removeAllComponents
rows = 17
columns = 17
drawBorders
cells foreach { cell =>
val style = dungeon.entityAt(cell) map {
_._1 match {
case _: Player => "player"
case _: NPC => "monster"
}
} getOrElse {
"floor"
}
add(new Label {
styleName = s"dungeoncell $style"
}, cell.x, cell.y, alignment = Alignment.MiddleCenter)
}
}
private def drawBorders {
val horizBorders = for (c <- 0 until columns; r <- List(0, rows - 1)) yield (c, r)
val vertBorders = for (r <- 1 until rows - 1; c <- List(0, columns - 1)) yield (c, r)
(horizBorders ++ vertBorders) foreach {
case (c, r) => add(new Label {
styleName = "dungeoncell wall"
}, c, r, alignment = Alignment.MiddleCenter)
}
}
}
| hezamu/DungeonGame | src/main/scala/org/vaadin/hezamu/dungeongame/DungeonGameUI.scala | Scala | apache-2.0 | 3,771 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio
import com.google.api.client.http.javanet.NetHttpTransport
import com.google.api.client.http.{GenericUrl, HttpRequest, HttpRequestInitializer}
import com.google.api.client.json.JsonObjectParser
import com.google.api.client.json.jackson2.JacksonFactory
import org.apache.beam.sdk.util.ReleaseInfo
import org.apache.beam.sdk.{PipelineResult, PipelineRunner}
import org.slf4j.LoggerFactory
import scala.io.AnsiColor._
import scala.jdk.CollectionConverters._
import scala.collection.mutable
import scala.util.Try
private[scio] object VersionUtil {
case class SemVer(major: Int, minor: Int, rev: Int, suffix: String) extends Ordered[SemVer] {
def compare(that: SemVer): Int =
Ordering[(Int, Int, Int, String)].compare(SemVer.unapply(this).get, SemVer.unapply(that).get)
}
private[this] val Timeout = 3000
private[this] val Url = "https://api.github.com/repos/spotify/scio/releases"
/**
* example versions: version = "0.10.0-beta1+42-828dca9a-SNAPSHOT" version = "0.10.0-beta1"
* version = "0.10.0-SNAPSHOT"
*/
private[this] val Pattern = """v?(\d+)\.(\d+).(\d+)((-\w+)?(\+\d+-\w+(\+\d+-\d+)?(-\w+)?)?)?""".r
private[this] val Logger = LoggerFactory.getLogger(this.getClass)
private[this] val MessagePattern: (String, String) => String = (version, url) => s"""
| $YELLOW>$BOLD Scio $version introduced some breaking changes in the API.$RESET
| $YELLOW>$RESET Follow the migration guide to upgrade: $url.
| $YELLOW>$RESET Scio provides automatic migration rules (See migration guide).
""".stripMargin
private[this] val NewerVersionPattern: (String, String) => String = (current, v) => s"""
| $YELLOW>$BOLD A newer version of Scio is available: $current -> $v$RESET
| $YELLOW>$RESET Use `-Dscio.ignoreVersionWarning=true` to disable this check.$RESET
|""".stripMargin
private lazy val latest: Option[String] = Try {
val transport = new NetHttpTransport()
val response = transport
.createRequestFactory(new HttpRequestInitializer {
override def initialize(request: HttpRequest): Unit = {
request.setConnectTimeout(Timeout)
request.setReadTimeout(Timeout)
request.setParser(new JsonObjectParser(new JacksonFactory))
()
}
})
.buildGetRequest(new GenericUrl(Url))
.execute()
.parseAs(classOf[java.util.List[java.util.Map[String, AnyRef]]])
response.asScala
.filter(node => !node.get("prerelease").asInstanceOf[Boolean])
.find(node => !node.get("draft").asInstanceOf[Boolean])
.map(latestNode => latestNode.get("tag_name").asInstanceOf[String])
}.toOption.flatten
private def parseVersion(version: String): SemVer = {
val m = Pattern.findFirstMatchIn(version).get
// higher value for no "-SNAPSHOT"
val snapshot = if (!m.group(4).isEmpty()) m.group(4).toUpperCase else "\uffff"
SemVer(m.group(1).toInt, m.group(2).toInt, m.group(3).toInt, snapshot)
}
private[scio] def ignoreVersionCheck: Boolean =
sys.props.get("scio.ignoreVersionWarning").exists(_.trim == "true")
private def messages(current: SemVer, latest: SemVer): Option[String] = (current, latest) match {
case (SemVer(0, minor, _, _), SemVer(0, 7, _, _)) if minor < 7 =>
Some(
MessagePattern("0.7", "https://spotify.github.io/scio/migrations/v0.7.0-Migration-Guide")
)
case (SemVer(0, minor, _, _), SemVer(0, 8, _, _)) if minor < 8 =>
Some(
MessagePattern("0.8", "https://spotify.github.io/scio/migrations/v0.8.0-Migration-Guide")
)
case (SemVer(0, minor, _, _), SemVer(0, 9, _, _)) if minor < 9 =>
Some(
MessagePattern("0.9", "https://spotify.github.io/scio/migrations/v0.9.0-Migration-Guide")
)
case (SemVer(0, minor, _, _), SemVer(0, 10, _, _)) if minor < 10 =>
Some(
MessagePattern("0.10", "https://spotify.github.io/scio/migrations/v0.10.0-Migration-Guide")
)
case _ => None
}
def checkVersion(
current: String,
latestOverride: Option[String] = None,
ignore: Boolean = ignoreVersionCheck
): Seq[String] =
if (ignore) {
Nil
} else {
val buffer = mutable.Buffer.empty[String]
val v1 = parseVersion(current)
if (v1.suffix.contains("-SNAPSHOT")) {
buffer.append(s"Using a SNAPSHOT version of Scio: $current")
}
latestOverride.orElse(latest).foreach { v =>
val v2 = parseVersion(v)
if (v2 > v1) {
buffer.append(NewerVersionPattern(current, v))
messages(v1, v2).foreach(buffer.append(_))
}
}
buffer.toSeq
}
def checkVersion(): Unit = checkVersion(BuildInfo.version).foreach(Logger.warn)
def checkRunnerVersion(runner: Class[_ <: PipelineRunner[_ <: PipelineResult]]): Unit = {
val name = runner.getSimpleName
val version = ReleaseInfo.getReleaseInfo.getVersion
if (version != BuildInfo.beamVersion) {
Logger.warn(
s"Mismatched version for $name, expected: ${BuildInfo.beamVersion}, actual: $version"
)
}
}
}
| spotify/scio | scio-core/src/main/scala/com/spotify/scio/VersionUtil.scala | Scala | apache-2.0 | 5,695 |
package edu.ucsc.dbtune.cli
import java.sql.Connection
import edu.ucsc.dbtune.DatabaseSystem
import edu.ucsc.dbtune.metadata.Catalog
import edu.ucsc.dbtune.metadata.ColumnOrdering
import edu.ucsc.dbtune.metadata.Index
import edu.ucsc.dbtune.optimizer.ExplainedSQLStatement
import edu.ucsc.dbtune.optimizer.Optimizer
import edu.ucsc.dbtune.util.Environment
import edu.ucsc.dbtune.util.MetadataUtils
import com.google.common.collect.Sets
import java.util.HashSet
import java.util.Properties
import java.util.Set
import edu.ucsc.dbtune.DatabaseSystem._
import edu.ucsc.dbtune.util.Environment.getInstance
import edu.ucsc.dbtune.util.EnvironmentProperties.IBG
import edu.ucsc.dbtune.util.EnvironmentProperties.INUM
import edu.ucsc.dbtune.util.EnvironmentProperties.JDBC_URL
import edu.ucsc.dbtune.util.EnvironmentProperties.USERNAME
import edu.ucsc.dbtune.util.EnvironmentProperties.OPTIMIZER
import edu.ucsc.dbtune.util.EnvironmentProperties.PASSWORD
/** The Scala-fied interface to a DBMS.
*/
class Database(dbms: DatabaseSystem) extends Catalog(dbms.getCatalog) {
val DBMS = dbms
/** Recommends indexes for the given SQL statement
*
* @param sql
* sql statement
* @return
* a configuration
*/
def recommend(sql:String) : Set[Index] = {
dbms.getOptimizer.recommendIndexes(sql)
}
/** Explains a SQL statement
*
* @param sql
* sql statement
* @return
* a configuration
*/
def explain(sql:String) : ExplainedSQLStatement = {
dbms.getOptimizer.explain(sql)
}
/** Explains a SQL statement
*
* @param sql
* sql statement
* @param conf
* configuration to be used
* @return
* a configuration
*/
def explain(sql:String, conf:Set[Index]) : ExplainedSQLStatement = {
dbms.getOptimizer.explain(sql, conf)
}
/** Closes the connection to the DBMS
*/
def close() = {
dbms.getConnection.close
}
/** Returns the underlying JDBC connection
*
* @return
* the JDBC connection
*/
def connection() : Connection = {
dbms.getConnection
}
/** Explains a SQL statement
*
* @param sql
* sql statement
* @return
* a configuration
*/
def optimizer() : Optimizer = {
dbms.getOptimizer
}
/** creates an index.
*
* @param orderingSpec
* specification of columns and their ordering
* @return
* new index
*/
def newIndex(orderingSpec: String) : Index = {
DBMS.newIndex(ColumnOrdering.newOrdering(dbms.getCatalog, orderingSpec))
}
/** creates a set of indexes by reading their definition from a file.
*
* @param file
* absolute or relative path to a file containing the definition of a set of indexes to load
* @return
* a set of indexes that are read from a file
*/
def loadIndexes(fileName: String) : Set[Index] = {
MetadataUtils.loadIndexes(DBMS, fileName)
}
}
object Database
{
/** connects to a database.
*
* @param url
* JDBC url
* @param usr
* username used to authenticate
* @param pwd
* password used to authenticate
* @return
* a databse instance
*/
def connect(url:String, usr:String, pwd:String) : Database = {
var env = Environment.getInstance
env.setProperty(USERNAME, usr)
env.setProperty(PASSWORD, pwd)
env.setProperty(JDBC_URL, url)
//env.setProperty(OPTIMIZER, INUM + "," + IBG)
env.setProperty(OPTIMIZER, IBG)
System.out.println("Extracting metadata...")
new Database(newDatabaseSystem(env))
}
}
| dbgroup-at-ucsc/dbtune | extensions/cli/src/edu/ucsc/dbtune/cli/Database.scala | Scala | bsd-3-clause | 3,576 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight, BuildSide}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ShuffleExchangeExec}
import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec
import org.apache.spark.sql.internal.SQLConf
/**
* A rule to optimize the shuffle reader to local reader iff no additional shuffles
* will be introduced:
* 1. if the input plan is a shuffle, add local reader directly as we can never introduce
* extra shuffles in this case.
* 2. otherwise, add local reader to the probe side of broadcast hash join and
* then run `EnsureRequirements` to check whether additional shuffle introduced.
* If introduced, we will revert all the local readers.
*/
case class OptimizeLocalShuffleReader(conf: SQLConf) extends Rule[SparkPlan] {
import OptimizeLocalShuffleReader._
private val ensureRequirements = EnsureRequirements(conf)
// The build side is a broadcast query stage which should have been optimized using local reader
// already. So we only need to deal with probe side here.
private def createProbeSideLocalReader(plan: SparkPlan): SparkPlan = {
val optimizedPlan = plan.transformDown {
case join @ BroadcastJoinWithShuffleLeft(shuffleStage, BuildRight) =>
val localReader = createLocalReader(shuffleStage)
join.asInstanceOf[BroadcastHashJoinExec].copy(left = localReader)
case join @ BroadcastJoinWithShuffleRight(shuffleStage, BuildLeft) =>
val localReader = createLocalReader(shuffleStage)
join.asInstanceOf[BroadcastHashJoinExec].copy(right = localReader)
}
val numShuffles = ensureRequirements.apply(optimizedPlan).collect {
case e: ShuffleExchangeExec => e
}.length
// Check whether additional shuffle introduced. If introduced, revert the local reader.
if (numShuffles > 0) {
logDebug("OptimizeLocalShuffleReader rule is not applied due" +
" to additional shuffles will be introduced.")
plan
} else {
optimizedPlan
}
}
private def createLocalReader(plan: SparkPlan): CustomShuffleReaderExec = {
plan match {
case c @ CustomShuffleReaderExec(s: ShuffleQueryStageExec, _) =>
CustomShuffleReaderExec(s, getPartitionSpecs(s, Some(c.partitionSpecs.length)))
case s: ShuffleQueryStageExec =>
CustomShuffleReaderExec(s, getPartitionSpecs(s, None))
}
}
// TODO: this method assumes all shuffle blocks are the same data size. We should calculate the
// partition start indices based on block size to avoid data skew.
private def getPartitionSpecs(
shuffleStage: ShuffleQueryStageExec,
advisoryParallelism: Option[Int]): Seq[ShufflePartitionSpec] = {
val numMappers = shuffleStage.shuffle.numMappers
val numReducers = shuffleStage.shuffle.numPartitions
val expectedParallelism = advisoryParallelism.getOrElse(numReducers)
val splitPoints = if (numMappers == 0) {
Seq.empty
} else {
equallyDivide(numReducers, math.max(1, expectedParallelism / numMappers))
}
(0 until numMappers).flatMap { mapIndex =>
(splitPoints :+ numReducers).sliding(2).map {
case Seq(start, end) => PartialMapperPartitionSpec(mapIndex, start, end)
}
}
}
/**
* To equally divide n elements into m buckets, basically each bucket should have n/m elements,
* for the remaining n%m elements, add one more element to the first n%m buckets each. Returns
* a sequence with length numBuckets and each value represents the start index of each bucket.
*/
private def equallyDivide(numElements: Int, numBuckets: Int): Seq[Int] = {
val elementsPerBucket = numElements / numBuckets
val remaining = numElements % numBuckets
val splitPoint = (elementsPerBucket + 1) * remaining
(0 until remaining).map(_ * (elementsPerBucket + 1)) ++
(remaining until numBuckets).map(i => splitPoint + (i - remaining) * elementsPerBucket)
}
override def apply(plan: SparkPlan): SparkPlan = {
if (!conf.getConf(SQLConf.LOCAL_SHUFFLE_READER_ENABLED)) {
return plan
}
plan match {
case s: SparkPlan if canUseLocalShuffleReader(s) =>
createLocalReader(s)
case s: SparkPlan =>
createProbeSideLocalReader(s)
}
}
}
object OptimizeLocalShuffleReader {
object BroadcastJoinWithShuffleLeft {
def unapply(plan: SparkPlan): Option[(SparkPlan, BuildSide)] = plan match {
case join: BroadcastHashJoinExec if canUseLocalShuffleReader(join.left) =>
Some((join.left, join.buildSide))
case _ => None
}
}
object BroadcastJoinWithShuffleRight {
def unapply(plan: SparkPlan): Option[(SparkPlan, BuildSide)] = plan match {
case join: BroadcastHashJoinExec if canUseLocalShuffleReader(join.right) =>
Some((join.right, join.buildSide))
case _ => None
}
}
def canUseLocalShuffleReader(plan: SparkPlan): Boolean = plan match {
case s: ShuffleQueryStageExec =>
s.shuffle.canChangeNumPartitions && s.mapStats.isDefined
case CustomShuffleReaderExec(s: ShuffleQueryStageExec, partitionSpecs) =>
s.shuffle.canChangeNumPartitions && s.mapStats.isDefined && partitionSpecs.nonEmpty
case _ => false
}
}
| rednaxelafx/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeLocalShuffleReader.scala | Scala | apache-2.0 | 6,182 |
package beam.calibration
import java.nio.file.Paths
import beam.calibration.BeamSigoptTuner.{createExperiment, fetchExperiment}
import beam.experiment.ExperimentDef
import com.sigopt.model.Experiment
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.LazyLogging
import scala.util.Try
case class SigoptExperimentData(
experimentDef: ExperimentDef,
benchmarkFileLoc: String,
experimentId: String,
development: Boolean = false
) extends LazyLogging {
val baseConfig: Config =
ConfigFactory
.parseFile(Paths.get(experimentDef.getHeader.getBeamTemplateConfPath).toFile)
.withFallback(ConfigFactory.parseString(s"config=${experimentDef.getHeader.getBeamTemplateConfPath}"))
// Always default to single JVM if incorrect entry
val numWorkers: Int = Try {
experimentDef.header.numWorkers.toInt
}.getOrElse(1)
val isParallel: Boolean = numWorkers > 1
val isMaster: Boolean = experimentId == "None"
val experiment: Experiment =
fetchExperiment(experimentId) match {
case Some(foundExperiment) =>
logger.info(s"Retrieved the existing experiment with experiment id $experimentId")
if (isParallel) {
Experiment.update(foundExperiment.getId).data(s"""{"parallel_bandwidth":$numWorkers}""").call()
}
foundExperiment
case None =>
val createdExperiment: Experiment = createExperiment(experimentDef)
logger.info("New Experiment created with experimentId [" + createdExperiment.getId + "]")
createdExperiment
}
}
| colinsheppard/beam | src/main/scala/beam/calibration/SigoptExperimentData.scala | Scala | gpl-3.0 | 1,565 |
package skadi.container.processing
import org.junit.{Assert, Test}
import skadi.beans.{Bean, Beans, Prop}
class PropertiesResolverTest {
// class under test
val resolver = new PropertiesResolver
@Test
def testProcess {
val beans = Beans(
new Bean named 'postDao
implementedWith classOf[com.sample.app.dao.PostDaoImpl],
new Bean named 'userDao
implementedWith classOf[com.sample.app.dao.UserDaoImpl],
new Bean named 'userService
implementedWith classOf[com.sample.app.service.UserServiceImpl]
constructorArgs('userDao, 'postDao, Prop("max.users"))
inject("${max.posts}" -> 'maxPosts),
new Bean named 'admin
implementedWith classOf[com.sample.app.model.User]
constructorArgs("${user.name}", "${admin.pass}"),
new Bean implementedWith classOf[skadi.util.PropertiesHandle]
inject("app.properties;props.xml" -> 'files)
)
val processedBeans = resolver.process(beans)
val userServiceBean = processedBeans(2)
Assert.assertEquals("100", userServiceBean.injectables.first._1)
Assert.assertEquals("5", userServiceBean.args(2))
val adminBean = processedBeans(3)
val username = System.getProperty("user.name")
Assert.assertEquals(username, adminBean.args(0))
Assert.assertEquals("adminadmin", adminBean.args(1))
}
}
| nmilinkovic/Skadi | src/test/scala/skadi/container/processing/PropertiesResolverTest.scala | Scala | bsd-3-clause | 1,342 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LeafNode
import org.apache.spark.sql.catalyst.plans.logical.Statistics
import org.apache.spark.sql.execution.LeafExecNode
import org.apache.spark.sql.execution.datasources.DataSource
object StreamingRelation {
def apply(dataSource: DataSource): StreamingRelation = {
StreamingRelation(
dataSource, dataSource.sourceInfo.name, dataSource.sourceInfo.schema.toAttributes)
}
}
/**
* Used to link a streaming [[DataSource]] into a
* [[org.apache.spark.sql.catalyst.plans.logical.LogicalPlan]]. This is only used for creating
* a streaming [[org.apache.spark.sql.DataFrame]] from [[org.apache.spark.sql.DataFrameReader]].
* It should be used to create [[Source]] and converted to [[StreamingExecutionRelation]] when
* passing to [[StreamExecution]] to run a query.
*/
case class StreamingRelation(dataSource: DataSource, sourceName: String, output: Seq[Attribute])
extends LeafNode {
override def isStreaming: Boolean = true
override def toString: String = sourceName
}
/**
* Used to link a streaming [[Source]] of data into a
* [[org.apache.spark.sql.catalyst.plans.logical.LogicalPlan]].
*/
case class StreamingExecutionRelation(
source: Source,
output: Seq[Attribute])(session: SparkSession)
extends LeafNode {
override def isStreaming: Boolean = true
override def toString: String = source.toString
// There's no sensible value here. On the execution path, this relation will be
// swapped out with microbatches. But some dataframe operations (in particular explain) do lead
// to this node surviving analysis. So we satisfy the LeafNode contract with the session default
// value.
override def computeStats(): Statistics = Statistics(
sizeInBytes = BigInt(session.sessionState.conf.defaultSizeInBytes)
)
}
/**
* A dummy physical plan for [[StreamingRelation]] to support
* [[org.apache.spark.sql.Dataset.explain]]
*/
case class StreamingRelationExec(sourceName: String, output: Seq[Attribute]) extends LeafExecNode {
override def toString: String = sourceName
override protected def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException("StreamingRelationExec cannot be executed")
}
}
object StreamingExecutionRelation {
def apply(source: Source, session: SparkSession): StreamingExecutionRelation = {
StreamingExecutionRelation(source, source.schema.toAttributes)(session)
}
}
| minixalpha/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamingRelation.scala | Scala | apache-2.0 | 3,468 |
package gh.test.gh2011.event
import gh2011.events.WatchEventParser
import net.liftweb.json._
import org.scalatest.{FlatSpec, Matchers}
class WatchEventTest extends FlatSpec with Matchers
{
"A valid WatchEvent" must "be correctly parsed" in {
val json = parse(
"""
| {
|
| "repo":{
| "id":760797,
| "url":"https://api.github.dev/repos/jruby/using_jruby",
| "name":"jruby/using_jruby"
| },
| "type":"WatchEvent",
| "org":{
| "gravatar_id":"4f169ebd30ce70024c64ff027828ae94",
| "id":55687,
| "url":"https://api.github.dev/orgs/jruby",
| "avatar_url":"https://secure.gravatar.com/avatar/4f169ebd30ce70024c64ff027828ae94?d=http://github.dev%2Fimages%2Fgravatars%2Fgravatar-org-420.png",
| "login":"jruby"
| },
| "public":true,
| "created_at":"2011-02-12T17:30:20Z",
| "payload":{
| "repo":"jruby/using_jruby",
| "actor":"fbehrens",
| "actor_gravatar":"07426bc321f9f519e7545e650c6cbe3b",
| "action":"started"
| },
| "actor":{
| "gravatar_id":"07426bc321f9f519e7545e650c6cbe3b",
| "id":13704,
| "url":"https://api.github.dev/users/fbehrens",
| "avatar_url":"https://secure.gravatar.com/avatar/07426bc321f9f519e7545e650c6cbe3b?d=http://github.dev%2Fimages%2Fgravatars%2Fgravatar-user-420.png",
| "login":"fbehrens"
| },
| "id":"1128376088"
|
|}
""".stripMargin)
gh2011.parser(WatchEventParser)(json).isDefined shouldBe true
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh2011/event/WatchEventTest.scala | Scala | mit | 1,878 |
package models
import net.fwbrasil.activate.ActivateContext
import net.fwbrasil.activate.storage.relational.idiom.postgresqlDialect
import net.fwbrasil.activate.storage.relational.PooledJdbcRelationalStorage
import net.fwbrasil.activate.storage.relational.idiom.mySqlDialect
import play.api.Play
import net.fwbrasil.activate.OptimisticOfflineLocking
object persistenceContext extends ActivateContext {
require(OptimisticOfflineLocking.isEnabled)
require(OptimisticOfflineLocking.validateReads)
private def config = Play.current.configuration
val storage = new PooledJdbcRelationalStorage {
val jdbcDriver = config.getString("db.default.driver").get
val user = config.getString("db.default.user").get
val password = config.getString("db.default.password").get
val url = config.getString("db.default.url").get
val dialect = mySqlDialect
override val poolSize = 400
}
val indexWorldByLegacyId = memoryIndex[ActivateWorld].on(_.legacyId)
val indexFortuneAll = memoryIndex[ActivateFortune].on(_ => 1)
} | seem-sky/FrameworkBenchmarks | play-activate-mysql/app/models/PersistenceContext.scala | Scala | bsd-3-clause | 1,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.util.Locale
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import com.google.common.primitives.UnsignedLong
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.{JobContext, TaskAttemptContext}
import org.apache.parquet.column.{Encoding, ParquetProperties}
import org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_1_0
import org.apache.parquet.example.data.Group
import org.apache.parquet.example.data.simple.{SimpleGroup, SimpleGroupFactory}
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.example.ExampleParquetWriter
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.parquet.hadoop.metadata.CompressionCodecName.GZIP
import org.apache.parquet.schema.{MessageType, MessageTypeParser}
import org.apache.spark.{SPARK_VERSION_SHORT, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, ScalaReflection}
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeRow}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* A test suite that tests basic Parquet I/O.
*/
class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession {
import testImplicits._
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}
test("basic data types (without binary)") {
val data = (1 to 4).map { i =>
(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble)
}
checkParquetFile(data)
}
test("raw binary") {
val data = (1 to 4).map(i => Tuple1(Array.fill(3)(i.toByte)))
withParquetDataFrame(data) { df =>
assertResult(data.map(_._1.mkString(",")).sorted) {
df.collect().map(_.getAs[Array[Byte]](0).mkString(",")).sorted
}
}
}
test("SPARK-11694 Parquet logical types are not being tested properly") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 a(INT_8);
| required int32 b(INT_16);
| required int32 c(DATE);
| required int32 d(DECIMAL(1,0));
| required int64 e(DECIMAL(10,0));
| required binary f(UTF8);
| required binary g(ENUM);
| required binary h(DECIMAL(32,0));
| required fixed_len_byte_array(32) i(DECIMAL(32,0));
| required int64 j(TIMESTAMP_MILLIS);
| required int64 k(TIMESTAMP_MICROS);
|}
""".stripMargin)
val expectedSparkTypes = Seq(ByteType, ShortType, DateType, DecimalType(1, 0),
DecimalType(10, 0), StringType, StringType, DecimalType(32, 0), DecimalType(32, 0),
TimestampType, TimestampType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
readParquetFile(path.toString)(df => {
val sparkTypes = df.schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
})
}
}
test("string") {
val data = (1 to 4).map(i => Tuple1(i.toString))
// Property spark.sql.parquet.binaryAsString shouldn't affect Parquet files written by Spark SQL
// as we store Spark SQL schema in the extra metadata.
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "false")(checkParquetFile(data))
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true")(checkParquetFile(data))
}
testStandardAndLegacyModes("fixed-length decimals") {
def makeDecimalRDD(decimal: DecimalType): DataFrame = {
spark
.range(1000)
// Parquet doesn't allow column names with spaces, have to add an alias here.
// Minus 500 here so that negative decimals are also tested.
.select((('id - 500) / 100.0) cast decimal as 'dec)
.coalesce(1)
}
val combinations = Seq((5, 2), (1, 0), (1, 1), (18, 10), (18, 17), (19, 0), (38, 37))
for ((precision, scale) <- combinations) {
withTempPath { dir =>
val data = makeDecimalRDD(DecimalType(precision, scale))
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df => {
checkAnswer(df, data.collect().toSeq)
}}
}
}
}
test("date type") {
def makeDateRDD(): DataFrame =
sparkContext
.parallelize(0 to 1000)
.map(i => Tuple1(DateTimeUtils.toJavaDate(i)))
.toDF()
.select($"_1")
withTempPath { dir =>
val data = makeDateRDD()
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df =>
checkAnswer(df, data.collect().toSeq)
}
}
}
testStandardAndLegacyModes("map") {
val data = (1 to 4).map(i => Tuple1(Map(i -> s"val_$i")))
checkParquetFile(data)
}
testStandardAndLegacyModes("array") {
val data = (1 to 4).map(i => Tuple1(Seq(i, i + 1)))
checkParquetFile(data)
}
testStandardAndLegacyModes("array and double") {
val data = (1 to 4).map(i => (i.toDouble, Seq(i.toDouble, (i + 1).toDouble)))
checkParquetFile(data)
}
testStandardAndLegacyModes("struct") {
val data = (1 to 4).map(i => Tuple1((i, s"val_$i")))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("array of struct") {
val data = (1 to 4).map { i =>
Tuple1(
Seq(
Tuple1(s"1st_val_$i"),
Tuple1(s"2nd_val_$i")
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(array) =>
Row(array.map(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
testStandardAndLegacyModes("array of nested struct") {
val data = (1 to 4).map { i =>
Tuple1(
Seq(
Tuple1(
Tuple1(s"1st_val_$i")),
Tuple1(
Tuple1(s"2nd_val_$i"))
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(array) =>
Row(array.map { case Tuple1(Tuple1(str)) => Row(Row(str))})
})
}
}
testStandardAndLegacyModes("nested struct with array of array as field") {
val data = (1 to 4).map(i => Tuple1((i, Seq(Seq(s"val_$i")))))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("nested map with struct as key type") {
val data = (1 to 4).map { i =>
Tuple1(
Map(
(i, s"kA_$i") -> s"vA_$i",
(i, s"kB_$i") -> s"vB_$i"
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.map { case (k, v) => Row(k.productIterator.toSeq: _*) -> v })
})
}
}
testStandardAndLegacyModes("nested map with struct as value type") {
val data = (1 to 4).map { i =>
Tuple1(
Map(
s"kA_$i" -> ((i, s"vA_$i")),
s"kB_$i" -> ((i, s"vB_$i"))
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.mapValues(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
test("nulls") {
val allNulls = (
null.asInstanceOf[java.lang.Boolean],
null.asInstanceOf[Integer],
null.asInstanceOf[java.lang.Long],
null.asInstanceOf[java.lang.Float],
null.asInstanceOf[java.lang.Double])
withParquetDataFrame(allNulls :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(5)(null): _*))
}
}
test("nones") {
val allNones = (
None.asInstanceOf[Option[Int]],
None.asInstanceOf[Option[Long]],
None.asInstanceOf[Option[String]])
withParquetDataFrame(allNones :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(3)(null): _*))
}
}
test("SPARK-34817: Support for unsigned Parquet logical types") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required INT32 a(UINT_8);
| required INT32 b(UINT_16);
| required INT32 c(UINT_32);
| required INT64 d(UINT_64);
|}
""".stripMargin)
val expectedSparkTypes = Seq(ShortType, IntegerType, LongType, DecimalType.LongDecimal)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val sparkTypes = spark.read.parquet(path.toString).schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
}
}
test("SPARK-11692 Support for Parquet logical types, JSON and BSON (embedded types)") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required binary a(JSON);
| required binary b(BSON);
|}
""".stripMargin)
val expectedSparkTypes = Seq(StringType, BinaryType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val sparkTypes = spark.read.parquet(path.toString).schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
}
}
test("compression codec") {
val hadoopConf = spark.sessionState.newHadoopConf()
def compressionCodecFor(path: String, codecName: String): String = {
val codecs = for {
footer <- readAllFootersWithoutSummaryFiles(new Path(path), hadoopConf)
block <- footer.getParquetMetadata.getBlocks.asScala
column <- block.getColumns.asScala
} yield column.getCodec.name()
assert(codecs.distinct === Seq(codecName))
codecs.head
}
val data = (0 until 10).map(i => (i, i.toString))
def checkCompressionCodec(codec: CompressionCodecName): Unit = {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> codec.name()) {
withParquetFile(data) { path =>
assertResult(spark.conf.get(SQLConf.PARQUET_COMPRESSION).toUpperCase(Locale.ROOT)) {
compressionCodecFor(path, codec.name())
}
}
}
}
// Checks default compression codec
checkCompressionCodec(
CompressionCodecName.fromConf(spark.conf.get(SQLConf.PARQUET_COMPRESSION)))
checkCompressionCodec(CompressionCodecName.UNCOMPRESSED)
checkCompressionCodec(CompressionCodecName.GZIP)
checkCompressionCodec(CompressionCodecName.SNAPPY)
checkCompressionCodec(CompressionCodecName.ZSTD)
}
private def createParquetWriter(
schema: MessageType,
path: Path,
dictionaryEnabled: Boolean = false): ParquetWriter[Group] = {
val hadoopConf = spark.sessionState.newHadoopConf()
ExampleParquetWriter
.builder(path)
.withDictionaryEncoding(dictionaryEnabled)
.withType(schema)
.withWriterVersion(PARQUET_1_0)
.withCompressionCodec(GZIP)
.withRowGroupSize(1024 * 1024)
.withPageSize(1024)
.withConf(hadoopConf)
.build()
}
test("read raw Parquet file") {
def makeRawParquetFile(path: Path): Unit = {
val schemaStr =
"""
|message root {
| required boolean _1;
| required int32 _2;
| required int64 _3;
| required float _4;
| required double _5;
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path)
(0 until 10).foreach { i =>
val record = new SimpleGroup(schema)
record.add(0, i % 2 == 0)
record.add(1, i)
record.add(2, i.toLong)
record.add(3, i.toFloat)
record.add(4, i.toDouble)
writer.write(record)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (0 until 10).map { i =>
Row(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble) })
}
}
}
test("SPARK-34817: Read UINT_8/UINT_16/UINT_32 from parquet") {
Seq(true, false).foreach { dictionaryEnabled =>
def makeRawParquetFile(path: Path): Unit = {
val schemaStr =
"""message root {
| required INT32 a(UINT_8);
| required INT32 b(UINT_16);
| required INT32 c(UINT_32);
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path, dictionaryEnabled)
val factory = new SimpleGroupFactory(schema)
(0 until 1000).foreach { i =>
val group = factory.newGroup()
.append("a", i % 100 + Byte.MaxValue)
.append("b", i % 100 + Short.MaxValue)
.append("c", i % 100 + Int.MaxValue)
writer.write(group)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (0 until 1000).map { i =>
Row(i % 100 + Byte.MaxValue,
i % 100 + Short.MaxValue,
i % 100 + Int.MaxValue.toLong)
})
}
}
}
}
test("SPARK-34817: Read UINT_64 as Decimal from parquet") {
Seq(true, false).foreach { dictionaryEnabled =>
def makeRawParquetFile(path: Path): Unit = {
val schemaStr =
"""message root {
| required INT64 a(UINT_64);
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path, dictionaryEnabled)
val factory = new SimpleGroupFactory(schema)
(-500 until 500).foreach { i =>
val group = factory.newGroup()
.append("a", i % 100L)
writer.write(group)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (-500 until 500).map { i =>
val bi = UnsignedLong.fromLongBits(i % 100L).bigIntegerValue()
Row(new java.math.BigDecimal(bi))
})
}
}
}
}
test("write metadata") {
val hadoopConf = spark.sessionState.newHadoopConf()
withTempPath { file =>
val path = new Path(file.toURI.toString)
val fs = FileSystem.getLocal(hadoopConf)
val schema = StructType.fromAttributes(ScalaReflection.attributesFor[(Int, String)])
writeMetadata(schema, path, hadoopConf)
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_COMMON_METADATA_FILE)))
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_METADATA_FILE)))
val expectedSchema = new SparkToParquetSchemaConverter().convert(schema)
val actualSchema = readFooter(path, hadoopConf).getFileMetaData.getSchema
actualSchema.checkContains(expectedSchema)
expectedSchema.checkContains(actualSchema)
}
}
test("save - overwrite") {
withParquetFile((1 to 10).map(i => (i, i.toString))) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Overwrite).save(file)
readParquetFile(file) { df =>
checkAnswer(df, newData.map(Row.fromTuple))
}
}
}
test("save - ignore") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Ignore).save(file)
readParquetFile(file) { df =>
checkAnswer(df, data.map(Row.fromTuple))
}
}
}
test("save - throw") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
val errorMessage = intercept[Throwable] {
newData.toDF().write.format("parquet").mode(SaveMode.ErrorIfExists).save(file)
}.getMessage
assert(errorMessage.contains("already exists"))
}
}
test("save - append") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Append).save(file)
readParquetFile(file) { df =>
checkAnswer(df, (data ++ newData).map(Row.fromTuple))
}
}
}
test("SPARK-6315 regression test") {
// Spark 1.1 and prior versions write Spark schema as case class string into Parquet metadata.
// This has been deprecated by JSON format since 1.2. Notice that, 1.3 further refactored data
// types API, and made StructType.fields an array. This makes the result of StructType.toString
// different from prior versions: there's no "Seq" wrapping the fields part in the string now.
val sparkSchema =
"StructType(Seq(StructField(a,BooleanType,false),StructField(b,IntegerType,false)))"
// The Parquet schema is intentionally made different from the Spark schema. Because the new
// Parquet data source simply falls back to the Parquet schema once it fails to parse the Spark
// schema. By making these two different, we are able to assert the old style case class string
// is parsed successfully.
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 c;
|}
""".stripMargin)
withTempPath { location =>
val extraMetadata = Map(ParquetReadSupport.SPARK_METADATA_KEY -> sparkSchema.toString)
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf, extraMetadata)
readParquetFile(path.toString) { df =>
assertResult(df.schema) {
StructType(
StructField("a", BooleanType, nullable = true) ::
StructField("b", IntegerType, nullable = true) ::
Nil)
}
}
}
}
test("SPARK-8121: spark.sql.parquet.output.committer.class shouldn't be overridden") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
val extraOptions = Map(
SQLConf.OUTPUT_COMMITTER_CLASS.key -> classOf[ParquetOutputCommitter].getCanonicalName,
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key ->
classOf[JobCommitFailureParquetOutputCommitter].getCanonicalName
)
withTempPath { dir =>
val message = intercept[SparkException] {
spark.range(0, 1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(message === "Intentional exception for testing purposes")
}
}
}
test("SPARK-6330 regression test") {
// In 1.3.0, save to fs other than file: without configuring core-site.xml would get:
// IllegalArgumentException: Wrong FS: hdfs://..., expected: file:///
intercept[Throwable] {
spark.read.parquet("file:///nonexistent")
}
val errorMessage = intercept[Throwable] {
spark.read.parquet("hdfs://nonexistent")
}.toString
assert(errorMessage.contains("UnknownHostException"))
}
test("SPARK-7837 Do not close output writer twice when commitTask() fails") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
// Using a output committer that always fail when committing a task, so that both
// `commitTask()` and `abortTask()` are invoked.
val extraOptions = Map[String, String](
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key ->
classOf[TaskCommitFailureParquetOutputCommitter].getCanonicalName
)
// Before fixing SPARK-7837, the following code results in an NPE because both
// `commitTask()` and `abortTask()` try to close output writers.
withTempPath { dir =>
val m1 = intercept[SparkException] {
spark.range(1).coalesce(1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m1.contains("Intentional exception for testing purposes"))
}
withTempPath { dir =>
val m2 = intercept[SparkException] {
val df = spark.range(1).select('id as 'a, 'id as 'b).coalesce(1)
df.write.partitionBy("a").options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m2.contains("Intentional exception for testing purposes"))
}
}
}
test("SPARK-11044 Parquet writer version fixed as version1 ") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
// For dictionary encoding, Parquet changes the encoding types according to its writer
// version. So, this test checks one of the encoding types in order to ensure that
// the file is written with writer version2.
val extraOptions = Map[String, String](
// Write a Parquet file with writer version2.
ParquetOutputFormat.WRITER_VERSION -> ParquetProperties.WriterVersion.PARQUET_2_0.toString,
// By default, dictionary encoding is enabled from Parquet 1.2.0 but
// it is enabled just in case.
ParquetOutputFormat.ENABLE_DICTIONARY -> "true"
)
val hadoopConf = spark.sessionState.newHadoopConfWithOptions(extraOptions)
withSQLConf(ParquetOutputFormat.JOB_SUMMARY_LEVEL -> "ALL") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part-r-0.parquet"
spark.range(1 << 16).selectExpr("(id % 4) AS i")
.coalesce(1).write.options(extraOptions).mode("overwrite").parquet(path)
val blockMetadata = readFooter(new Path(path), hadoopConf).getBlocks.asScala.head
val columnChunkMetadata = blockMetadata.getColumns.asScala.head
// If the file is written with version2, this should include
// Encoding.RLE_DICTIONARY type. For version1, it is Encoding.PLAIN_DICTIONARY
assert(columnChunkMetadata.getEncodings.contains(Encoding.RLE_DICTIONARY))
}
}
}
}
test("null and non-null strings") {
// Create a dataset where the first values are NULL and then some non-null values. The
// number of non-nulls needs to be bigger than the ParquetReader batch size.
val data: Dataset[String] = spark.range(200).map (i =>
if (i < 150) null
else "a"
)
val df = data.toDF("col")
assert(df.agg("col" -> "count").collect().head.getLong(0) == 50)
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/data"
df.write.parquet(path)
readParquetFile(path) { df2 =>
assert(df2.agg("col" -> "count").collect().head.getLong(0) == 50)
}
}
}
test("read dictionary encoded decimals written as INT32") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i32.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(5, 2) as 'i32_dec))
}
}
test("read dictionary encoded decimals written as INT64") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i64.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'i64_dec))
}
}
test("read dictionary encoded decimals written as FIXED_LEN_BYTE_ARRAY") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-fixed-len.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'fixed_len_dec))
}
}
test("read dictionary and plain encoded timestamp_millis written as INT64") {
withAllParquetReaders {
checkAnswer(
// timestamp column in this file is encoded using combination of plain
// and dictionary encodings.
readResourceParquetFile("test-data/timemillis-in-i64.parquet"),
(1 to 3).map(i => Row(new java.sql.Timestamp(10))))
}
}
test("SPARK-12589 copy() on rows returned from reader works for strings") {
withTempPath { dir =>
val data = (1, "abc") ::(2, "helloabcde") :: Nil
data.toDF().write.parquet(dir.getCanonicalPath)
var hash1: Int = 0
var hash2: Int = 0
(false :: true :: Nil).foreach { v =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> v.toString) {
val df = spark.read.parquet(dir.getCanonicalPath)
val rows = df.queryExecution.toRdd.map(_.copy()).collect()
val unsafeRows = rows.map(_.asInstanceOf[UnsafeRow])
if (!v) {
hash1 = unsafeRows(0).hashCode()
hash2 = unsafeRows(1).hashCode()
} else {
assert(hash1 == unsafeRows(0).hashCode())
assert(hash2 == unsafeRows(1).hashCode())
}
}
}
}
}
test("VectorizedParquetRecordReader - direct path read") {
val data = (0 to 10).map(i => (i, (i + 'a').toChar.toString))
withTempPath { dir =>
spark.createDataFrame(data).repartition(1).write.parquet(dir.getCanonicalPath)
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0);
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, null)
val result = mutable.ArrayBuffer.empty[(Int, String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getInt(0), row.getString(1))
result += v
}
assert(data.toSet == result.toSet)
} finally {
reader.close()
}
}
// Project just one column
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, ("_2" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
result += row.getString(0)
}
assert(data.map(_._2).toSet == result.toSet)
} finally {
reader.close()
}
}
// Project columns in opposite order
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, ("_2" :: "_1" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String, Int)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getString(0), row.getInt(1))
result += v
}
assert(data.map { x => (x._2, x._1) }.toSet == result.toSet)
} finally {
reader.close()
}
}
// Empty projection
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, List[String]().asJava)
var result = 0
while (reader.nextKeyValue()) {
result += 1
}
assert(result == data.length)
} finally {
reader.close()
}
}
}
}
test("VectorizedParquetRecordReader - partition column types") {
withTempPath { dir =>
Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath)
val dataTypes =
Seq(StringType, BooleanType, ByteType, BinaryType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType)
val constantValues =
Seq(
UTF8String.fromString("a string"),
true,
1.toByte,
"Spark SQL".getBytes,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75D,
Decimal("1234.23456"),
DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")),
DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123")))
dataTypes.zip(constantValues).foreach { case (dt, v) =>
val schema = StructType(StructField("pcol", dt) :: Nil)
val conf = sqlContext.conf
val vectorizedReader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
val partitionValues = new GenericInternalRow(Array(v))
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0)
try {
vectorizedReader.initialize(file, null)
vectorizedReader.initBatch(schema, partitionValues)
vectorizedReader.nextKeyValue()
val row = vectorizedReader.getCurrentValue.asInstanceOf[InternalRow]
// Use `GenericMutableRow` by explicitly copying rather than `ColumnarBatch`
// in order to use get(...) method which is not implemented in `ColumnarBatch`.
val actual = row.copy().get(1, dt)
val expected = v
if (dt.isInstanceOf[BinaryType]) {
assert(actual.asInstanceOf[Array[Byte]] sameElements expected.asInstanceOf[Array[Byte]])
} else {
assert(actual == expected)
}
} finally {
vectorizedReader.close()
}
}
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> "snappy") {
val option = new ParquetOptions(Map("Compression" -> "uncompressed"), spark.sessionState.conf)
assert(option.compressionCodecClassName == "UNCOMPRESSED")
}
}
test("SPARK-23173 Writing a file with data converted from JSON with and incorrect user schema") {
withTempPath { file =>
val jsonData =
"""{
| "a": 1,
| "c": "foo"
|}
|""".stripMargin
val jsonSchema = new StructType()
.add("a", LongType, nullable = false)
.add("b", StringType, nullable = false)
.add("c", StringType, nullable = false)
spark.range(1).select(from_json(lit(jsonData), jsonSchema) as "input")
.write.parquet(file.getAbsolutePath)
checkAnswer(spark.read.parquet(file.getAbsolutePath), Seq(Row(Row(1, null, "foo"))))
}
}
test("Write Spark version into Parquet metadata") {
withTempPath { dir =>
spark.range(1).repartition(1).write.parquet(dir.getAbsolutePath)
assert(getMetaData(dir)(SPARK_VERSION_METADATA_KEY) === SPARK_VERSION_SHORT)
}
}
Seq(true, false).foreach { vec =>
test(s"SPARK-34167: read LongDecimals with precision < 10, VectorizedReader $vec") {
// decimal32-written-as-64-bit.snappy.parquet was generated using a 3rd-party library. It has
// 10 rows of Decimal(9, 1) written as LongDecimal instead of an IntDecimal
readParquetFile(testFile("test-data/decimal32-written-as-64-bit.snappy.parquet"), vec) {
df =>
assert(10 == df.collect().length)
val first10Df = df.head(10)
assert(
Seq(792059492, 986842987, 540247998, null, 357991078,
494131059, 92536396, 426847157, -999999999, 204486094)
.zip(first10Df).forall(d =>
d._2.isNullAt(0) && d._1 == null ||
d._1 == d._2.getDecimal(0).unscaledValue().intValue()
))
}
// decimal32-written-as-64-bit-dict.snappy.parquet was generated using a 3rd-party library. It
// has 2048 rows of Decimal(3, 1) written as LongDecimal instead of an IntDecimal
readParquetFile(
testFile("test-data/decimal32-written-as-64-bit-dict.snappy.parquet"), vec) {
df =>
assert(2048 == df.collect().length)
val first10Df = df.head(10)
assert(Seq(751, 937, 511, null, 337, 467, 84, 403, -999, 190)
.zip(first10Df).forall(d =>
d._2.isNullAt(0) && d._1 == null ||
d._1 == d._2.getDecimal(0).unscaledValue().intValue()))
val last10Df = df.tail(10)
assert(Seq(866, 20, 492, 76, 824, 604, 343, 820, 864, 243)
.zip(last10Df).forall(d =>
d._1 == d._2.getDecimal(0).unscaledValue().intValue()))
}
}
}
}
class JobCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitJob(jobContext: JobContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
class TaskCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitTask(context: TaskAttemptContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
| BryanCutler/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala | Scala | apache-2.0 | 35,616 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.utils
import org.apache.hadoop.mapreduce.{Job, JobStatus}
import org.locationtech.geomesa.tools.Command
/**
* Helper for running a job and reporting back status
*/
object JobRunner {
/**
* Run a job
*
* @param job job
* @param reporter status callback
* @param mapCounters map status counters
* @param reduceCounters reduce status counters (will be added to map phase if no reduce phase)
* @return true if job completes successfully
*/
def run(
job: Job,
reporter: StatusCallback,
mapCounters: => Seq[(String, Long)],
reduceCounters: => Seq[(String, Long)]): Boolean = {
Command.user.info("Submitting job - please wait...")
job.submit()
Command.user.info(s"Tracking available at ${job.getStatus.getTrackingUrl}")
val status: Boolean => Unit = if (job.getNumReduceTasks != 0) {
var mapping = true
done => {
if (mapping) {
val mapProgress = job.mapProgress()
if (mapProgress < 1f) {
reporter("Map: ", mapProgress, mapCounters, done = false)
} else {
reporter("Map: ", mapProgress, mapCounters, done = true)
reporter.reset()
mapping = false
}
} else {
reporter("Reduce: ", job.reduceProgress(), reduceCounters, done)
}
}
} else {
// we don't have any reducers, just track mapper progress
done => reporter("", job.mapProgress(), mapCounters ++ reduceCounters, done)
}
while (!job.isComplete) {
if (job.getStatus.getState != JobStatus.State.PREP) {
status(false)
}
Thread.sleep(500)
}
status(true)
if (job.isSuccessful) { true } else {
Command.user.error(s"Job failed with state ${job.getStatus.getState} due to: ${job.getStatus.getFailureInfo}")
false
}
}
}
| elahrvivaz/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/utils/JobRunner.scala | Scala | apache-2.0 | 2,370 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.examples
import com.signalcollect._
import com.signalcollect.configuration.ExecutionMode
import com.signalcollect.factory.messagebus.IntIdDoubleSignalMessageBusFactory
/**
* Placeholder edge that gets discarded by memory efficient vertices that
* have their own internal edge representations.
*/
class PlaceholderEdge[Id](targetId: Id) extends DefaultEdge(targetId) {
def signal = throw new Exception("This is a placeholder edge, its signal function should never be called.")
}
class EfficientPageRankVertex(id: Int)
extends MemoryEfficientDataFlowVertex[Double, Double](id = id, state = 0.15) {
type OutgoingSignalType = Double
def computeSignal(edgeId: Int) =
throw new Exception("This vertex type computes signals inside of 'executeSignalOperation', " +
"'computeSignal' should never be called.")
override def executeSignalOperation(graphEditor: GraphEditor[Int, Double]) {
if (edgeCount != 0) {
val signal = (state - lastSignalState) / edgeCount
targetIds.foreach(graphEditor.sendSignal(signal, _))
}
lastSignalState = state
}
def collect(signal: Double): Double = {
state + 0.85 * signal
}
def scoreSignal = {
state - lastSignalState
}
def addTargetId(targetId: Int) {
_targetIds.insert(targetId)
}
}
/** Builds a PageRank compute graph and executes the computation */
object MemoryEfficientPageRank extends App {
val graph = new GraphBuilder[Int, Double]().
withMessageBusFactory(new IntIdDoubleSignalMessageBusFactory(10000)).
// withConsole(true).
build
graph.addVertex(new EfficientPageRankVertex(1))
graph.addVertex(new EfficientPageRankVertex(2))
graph.addVertex(new EfficientPageRankVertex(3))
graph.addEdge(1, new PlaceholderEdge(2))
graph.addEdge(2, new PlaceholderEdge(1))
graph.addEdge(2, new PlaceholderEdge(3))
graph.addEdge(3, new PlaceholderEdge(2))
graph.awaitIdle
val stats = graph.execute //(ExecutionConfiguration.withExecutionMode(ExecutionMode.Interactive))
println(stats)
graph.foreachVertex(println(_))
graph.shutdown
}
| uzh/signal-collect | src/main/scala/com/signalcollect/examples/EfficientPageRank.scala | Scala | apache-2.0 | 2,749 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.codegen.calls
import java.lang.reflect.Method
import java.lang.{Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import org.apache.calcite.linq4j.tree.Types
import org.apache.calcite.runtime.SqlFunctions
import org.apache.flink.table.runtime.functions.ScalarFunctions
/**
* Contains references to built-in functions.
*
* NOTE: When adding functions here. Check if Calcite provides it in
* [[org.apache.calcite.util.BuiltInMethod]]. The function generator supports Java's auto casting
* so we don't need the full matrix of data types for every function. Only [[JBigDecimal]] needs
* special handling.
*/
object BuiltInMethods {
val LOG = Types.lookupMethod(classOf[ScalarFunctions], "log", classOf[Double])
val LOG_WITH_BASE =
Types.lookupMethod(classOf[ScalarFunctions], "log", classOf[Double], classOf[Double])
val LOG10 = Types.lookupMethod(classOf[Math], "log10", classOf[Double])
val LOG2 = Types.lookupMethod(classOf[ScalarFunctions], "log2", classOf[Double])
val EXP = Types.lookupMethod(classOf[Math], "exp", classOf[Double])
val POWER = Types.lookupMethod(classOf[Math], "pow", classOf[Double], classOf[Double])
val POWER_DEC = Types.lookupMethod(
classOf[ScalarFunctions], "power", classOf[Double], classOf[JBigDecimal])
val POWER_DEC_DEC = Types.lookupMethod(
classOf[SqlFunctions], "power", classOf[JBigDecimal], classOf[JBigDecimal])
val LN = Types.lookupMethod(classOf[Math], "log", classOf[Double])
val ABS = Types.lookupMethod(classOf[SqlFunctions], "abs", classOf[Double])
val ABS_DEC = Types.lookupMethod(classOf[SqlFunctions], "abs", classOf[JBigDecimal])
val LIKE_WITH_ESCAPE = Types.lookupMethod(classOf[SqlFunctions], "like",
classOf[String], classOf[String], classOf[String])
val SIMILAR_WITH_ESCAPE = Types.lookupMethod(classOf[SqlFunctions], "similar",
classOf[String], classOf[String], classOf[String])
val SIN = Types.lookupMethod(classOf[Math], "sin", classOf[Double])
val SIN_DEC = Types.lookupMethod(classOf[SqlFunctions], "sin", classOf[JBigDecimal])
val COS = Types.lookupMethod(classOf[Math], "cos", classOf[Double])
val COS_DEC = Types.lookupMethod(classOf[SqlFunctions], "cos", classOf[JBigDecimal])
val TAN = Types.lookupMethod(classOf[Math], "tan", classOf[Double])
val TAN_DEC = Types.lookupMethod(classOf[SqlFunctions], "tan", classOf[JBigDecimal])
val TANH = Types.lookupMethod(classOf[Math], "tanh", classOf[Double])
val TANH_DEC = Types.lookupMethod(classOf[ScalarFunctions], "tanh", classOf[JBigDecimal])
val COT = Types.lookupMethod(classOf[SqlFunctions], "cot", classOf[Double])
val COT_DEC = Types.lookupMethod(classOf[SqlFunctions], "cot", classOf[JBigDecimal])
val ASIN = Types.lookupMethod(classOf[Math], "asin", classOf[Double])
val ASIN_DEC = Types.lookupMethod(classOf[SqlFunctions], "asin", classOf[JBigDecimal])
val ACOS = Types.lookupMethod(classOf[Math], "acos", classOf[Double])
val ACOS_DEC = Types.lookupMethod(classOf[SqlFunctions], "acos", classOf[JBigDecimal])
val SINH = Types.lookupMethod(classOf[Math], "sinh", classOf[Double])
val SINH_DEC = Types.lookupMethod(classOf[ScalarFunctions], "sinh", classOf[JBigDecimal])
val ATAN = Types.lookupMethod(classOf[Math], "atan", classOf[Double])
val ATAN_DEC = Types.lookupMethod(classOf[SqlFunctions], "atan", classOf[JBigDecimal])
val COSH = Types.lookupMethod(classOf[Math], "cosh", classOf[Double])
val COSH_DEC = Types.lookupMethod(classOf[ScalarFunctions], "cosh", classOf[JBigDecimal])
val ATAN2_DOUBLE_DOUBLE = Types.lookupMethod(
classOf[Math],
"atan2",
classOf[Double],
classOf[Double])
val ATAN2_DEC_DEC = Types.lookupMethod(
classOf[SqlFunctions],
"atan2",
classOf[JBigDecimal],
classOf[JBigDecimal])
val DEGREES = Types.lookupMethod(classOf[Math], "toDegrees", classOf[Double])
val DEGREES_DEC = Types.lookupMethod(classOf[SqlFunctions], "degrees", classOf[JBigDecimal])
val RADIANS = Types.lookupMethod(classOf[Math], "toRadians", classOf[Double])
val RADIANS_DEC = Types.lookupMethod(classOf[SqlFunctions], "radians", classOf[JBigDecimal])
val SIGN_DOUBLE = Types.lookupMethod(classOf[Math], "signum", classOf[Double])
val SIGN_INT = Types.lookupMethod(classOf[Integer], "signum", classOf[Int])
val SIGN_LONG = Types.lookupMethod(classOf[JLong], "signum", classOf[Long])
val SIGN_DEC = Types.lookupMethod(classOf[SqlFunctions], "sign", classOf[JBigDecimal])
val ROUND_DOUBLE = Types.lookupMethod(classOf[SqlFunctions], "sround", classOf[Double],
classOf[Int])
val ROUND_INT = Types.lookupMethod(classOf[SqlFunctions], "sround", classOf[Int], classOf[Int])
val ROUND_LONG = Types.lookupMethod(classOf[SqlFunctions], "sround", classOf[Long], classOf[Int])
val ROUND_DEC = Types.lookupMethod(classOf[SqlFunctions], "sround", classOf[JBigDecimal],
classOf[Int])
val CONCAT = Types.lookupMethod(classOf[ScalarFunctions], "concat", classOf[Array[String]])
val CONCAT_WS =
Types.lookupMethod(
classOf[ScalarFunctions], "concat_ws", classOf[String], classOf[Array[String]])
val LPAD = Types.lookupMethod(
classOf[ScalarFunctions],
"lpad",
classOf[String],
classOf[Integer],
classOf[String])
val RPAD = Types.lookupMethod(
classOf[ScalarFunctions],
"rpad",
classOf[String],
classOf[Integer],
classOf[String])
val BIN = Types.lookupMethod(classOf[JLong], "toBinaryString", classOf[Long])
val REGEXP_REPLACE = Types.lookupMethod(
classOf[ScalarFunctions],
"regexp_replace",
classOf[String],
classOf[String],
classOf[String])
val REGEXP_EXTRACT = Types.lookupMethod(
classOf[ScalarFunctions],
"regexp_extract",
classOf[String],
classOf[String],
classOf[Integer])
val REGEXP_EXTRACT_WITHOUT_INDEX = Types.lookupMethod(
classOf[ScalarFunctions],
"regexp_extract",
classOf[String],
classOf[String])
val FROMBASE64 = Types.lookupMethod(classOf[ScalarFunctions], "fromBase64", classOf[String])
val TOBASE64 = Types.lookupMethod(classOf[ScalarFunctions], "toBase64", classOf[String])
val HEX_LONG: Method = Types.lookupMethod(classOf[ScalarFunctions], "hex", classOf[Long])
val HEX_STRING: Method = Types.lookupMethod(classOf[ScalarFunctions], "hex", classOf[String])
val UUID: Method = Types.lookupMethod(classOf[ScalarFunctions], "uuid")
val REPEAT: Method = Types.lookupMethod(
classOf[ScalarFunctions],
"repeat",
classOf[String],
classOf[Int])
}
| mylog00/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala | Scala | apache-2.0 | 7,358 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.storage
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.apache.spark.Success
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler._
import org.apache.spark.storage._
/**
* Test various functionality in the StorageListener that supports the StorageTab.
*/
class StorageTabSuite extends FunSuite with BeforeAndAfter {
private var bus: LiveListenerBus = _
private var storageStatusListener: StorageStatusListener = _
private var storageListener: StorageListener = _
private val memAndDisk = StorageLevel.MEMORY_AND_DISK
private val memOnly = StorageLevel.MEMORY_ONLY
private val none = StorageLevel.NONE
private val taskInfo = new TaskInfo(0, 0, 0, 0, "big", "dog", TaskLocality.ANY, false)
private val taskInfo1 = new TaskInfo(1, 1, 1, 1, "big", "cat", TaskLocality.ANY, false)
private def rddInfo0 = new RDDInfo(0, "freedom", 100, memOnly)
private def rddInfo1 = new RDDInfo(1, "hostage", 200, memOnly)
private def rddInfo2 = new RDDInfo(2, "sanity", 300, memAndDisk)
private def rddInfo3 = new RDDInfo(3, "grace", 400, memAndDisk)
private val bm1 = BlockManagerId("big", "dog", 1)
before {
bus = new LiveListenerBus
storageStatusListener = new StorageStatusListener
storageListener = new StorageListener(storageStatusListener)
bus.addListener(storageStatusListener)
bus.addListener(storageListener)
}
test("stage submitted / completed") {
assert(storageListener._rddInfoMap.isEmpty)
assert(storageListener.rddInfoList.isEmpty)
// 2 RDDs are known, but none are cached
val stageInfo0 = new StageInfo(0, 0, "0", 100, Seq(rddInfo0, rddInfo1), "details")
bus.postToAll(SparkListenerStageSubmitted(stageInfo0))
assert(storageListener._rddInfoMap.size === 2)
assert(storageListener.rddInfoList.isEmpty)
// 4 RDDs are known, but only 2 are cached
val rddInfo2Cached = rddInfo2
val rddInfo3Cached = rddInfo3
rddInfo2Cached.numCachedPartitions = 1
rddInfo3Cached.numCachedPartitions = 1
val stageInfo1 = new StageInfo(1, 0, "0", 100, Seq(rddInfo2Cached, rddInfo3Cached), "details")
bus.postToAll(SparkListenerStageSubmitted(stageInfo1))
assert(storageListener._rddInfoMap.size === 4)
assert(storageListener.rddInfoList.size === 2)
// Submitting RDDInfos with duplicate IDs does nothing
val rddInfo0Cached = new RDDInfo(0, "freedom", 100, StorageLevel.MEMORY_ONLY)
rddInfo0Cached.numCachedPartitions = 1
val stageInfo0Cached = new StageInfo(0, 0, "0", 100, Seq(rddInfo0), "details")
bus.postToAll(SparkListenerStageSubmitted(stageInfo0Cached))
assert(storageListener._rddInfoMap.size === 4)
assert(storageListener.rddInfoList.size === 2)
// We only keep around the RDDs that are cached
bus.postToAll(SparkListenerStageCompleted(stageInfo0))
assert(storageListener._rddInfoMap.size === 2)
assert(storageListener.rddInfoList.size === 2)
}
test("unpersist") {
val rddInfo0Cached = rddInfo0
val rddInfo1Cached = rddInfo1
rddInfo0Cached.numCachedPartitions = 1
rddInfo1Cached.numCachedPartitions = 1
val stageInfo0 = new StageInfo(0, 0, "0", 100, Seq(rddInfo0Cached, rddInfo1Cached), "details")
bus.postToAll(SparkListenerStageSubmitted(stageInfo0))
assert(storageListener._rddInfoMap.size === 2)
assert(storageListener.rddInfoList.size === 2)
bus.postToAll(SparkListenerUnpersistRDD(0))
assert(storageListener._rddInfoMap.size === 1)
assert(storageListener.rddInfoList.size === 1)
bus.postToAll(SparkListenerUnpersistRDD(4)) // doesn't exist
assert(storageListener._rddInfoMap.size === 1)
assert(storageListener.rddInfoList.size === 1)
bus.postToAll(SparkListenerUnpersistRDD(1))
assert(storageListener._rddInfoMap.size === 0)
assert(storageListener.rddInfoList.size === 0)
}
test("task end") {
val myRddInfo0 = rddInfo0
val myRddInfo1 = rddInfo1
val myRddInfo2 = rddInfo2
val stageInfo0 = new StageInfo(0, 0, "0", 100, Seq(myRddInfo0, myRddInfo1, myRddInfo2), "details")
bus.postToAll(SparkListenerBlockManagerAdded(1L, bm1, 1000L))
bus.postToAll(SparkListenerStageSubmitted(stageInfo0))
assert(storageListener._rddInfoMap.size === 3)
assert(storageListener.rddInfoList.size === 0) // not cached
assert(!storageListener._rddInfoMap(0).isCached)
assert(!storageListener._rddInfoMap(1).isCached)
assert(!storageListener._rddInfoMap(2).isCached)
// Task end with no updated blocks. This should not change anything.
bus.postToAll(SparkListenerTaskEnd(0, 0, "obliteration", Success, taskInfo, new TaskMetrics))
assert(storageListener._rddInfoMap.size === 3)
assert(storageListener.rddInfoList.size === 0)
// Task end with a few new persisted blocks, some from the same RDD
val metrics1 = new TaskMetrics
metrics1.updatedBlocks = Some(Seq(
(RDDBlockId(0, 100), BlockStatus(memAndDisk, 400L, 0L, 0L)),
(RDDBlockId(0, 101), BlockStatus(memAndDisk, 0L, 400L, 0L)),
(RDDBlockId(0, 102), BlockStatus(memAndDisk, 400L, 0L, 200L)),
(RDDBlockId(1, 20), BlockStatus(memAndDisk, 0L, 240L, 0L))
))
bus.postToAll(SparkListenerTaskEnd(1, 0, "obliteration", Success, taskInfo, metrics1))
assert(storageListener._rddInfoMap(0).memSize === 800L)
assert(storageListener._rddInfoMap(0).diskSize === 400L)
assert(storageListener._rddInfoMap(0).tachyonSize === 200L)
assert(storageListener._rddInfoMap(0).numCachedPartitions === 3)
assert(storageListener._rddInfoMap(0).isCached)
assert(storageListener._rddInfoMap(1).memSize === 0L)
assert(storageListener._rddInfoMap(1).diskSize === 240L)
assert(storageListener._rddInfoMap(1).tachyonSize === 0L)
assert(storageListener._rddInfoMap(1).numCachedPartitions === 1)
assert(storageListener._rddInfoMap(1).isCached)
assert(!storageListener._rddInfoMap(2).isCached)
assert(storageListener._rddInfoMap(2).numCachedPartitions === 0)
// Task end with a few dropped blocks
val metrics2 = new TaskMetrics
metrics2.updatedBlocks = Some(Seq(
(RDDBlockId(0, 100), BlockStatus(none, 0L, 0L, 0L)),
(RDDBlockId(1, 20), BlockStatus(none, 0L, 0L, 0L)),
(RDDBlockId(2, 40), BlockStatus(none, 0L, 0L, 0L)), // doesn't actually exist
(RDDBlockId(4, 80), BlockStatus(none, 0L, 0L, 0L)) // doesn't actually exist
))
bus.postToAll(SparkListenerTaskEnd(2, 0, "obliteration", Success, taskInfo, metrics2))
assert(storageListener._rddInfoMap(0).memSize === 400L)
assert(storageListener._rddInfoMap(0).diskSize === 400L)
assert(storageListener._rddInfoMap(0).tachyonSize === 200L)
assert(storageListener._rddInfoMap(0).numCachedPartitions === 2)
assert(storageListener._rddInfoMap(0).isCached)
assert(!storageListener._rddInfoMap(1).isCached)
assert(storageListener._rddInfoMap(2).numCachedPartitions === 0)
assert(!storageListener._rddInfoMap(2).isCached)
assert(storageListener._rddInfoMap(2).numCachedPartitions === 0)
}
test("verify StorageTab contains all cached rdds") {
val rddInfo0 = new RDDInfo(0, "rdd0", 1, memOnly)
val rddInfo1 = new RDDInfo(1, "rdd1", 1 ,memOnly)
val stageInfo0 = new StageInfo(0, 0, "stage0", 1, Seq(rddInfo0), "details")
val stageInfo1 = new StageInfo(1, 0, "stage1", 1, Seq(rddInfo1), "details")
val taskMetrics0 = new TaskMetrics
val taskMetrics1 = new TaskMetrics
val block0 = (RDDBlockId(0, 1), BlockStatus(memOnly, 100L, 0L, 0L))
val block1 = (RDDBlockId(1, 1), BlockStatus(memOnly, 200L, 0L, 0L))
taskMetrics0.updatedBlocks = Some(Seq(block0))
taskMetrics1.updatedBlocks = Some(Seq(block1))
bus.postToAll(SparkListenerBlockManagerAdded(1L, bm1, 1000L))
bus.postToAll(SparkListenerStageSubmitted(stageInfo0))
assert(storageListener.rddInfoList.size === 0)
bus.postToAll(SparkListenerTaskEnd(0, 0, "big", Success, taskInfo, taskMetrics0))
assert(storageListener.rddInfoList.size === 1)
bus.postToAll(SparkListenerStageSubmitted(stageInfo1))
assert(storageListener.rddInfoList.size === 1)
bus.postToAll(SparkListenerStageCompleted(stageInfo0))
assert(storageListener.rddInfoList.size === 1)
bus.postToAll(SparkListenerTaskEnd(1, 0, "small", Success, taskInfo1, taskMetrics1))
assert(storageListener.rddInfoList.size === 2)
bus.postToAll(SparkListenerStageCompleted(stageInfo1))
assert(storageListener.rddInfoList.size === 2)
}
}
| Dax1n/spark-core | core/src/test/scala/org/apache/spark/ui/storage/StorageTabSuite.scala | Scala | apache-2.0 | 9,297 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Sep 23 21:14:14 EDT 2012
* @see LICENSE (MIT style license file).
*/
package scalation.analytics.classifier
import scala.math.round
import scalation.linalgebra.{MatriD, MatrixD, MatriI, MatrixI, VectoD, VectoI, VectorI}
import scalation.stat.vectorD2StatVector
import scalation.util.{Error, getFromURL_File}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `ClassifierInt` abstract class provides a common foundation for several
* classifiers that operate on integer-valued data.
* @param x the integer-valued training/test data vectors stored as rows of a matrix
* @param y the training/test classification vector, where y_i = class for row i of the matrix x
* @param fn the names for all features/variables
* @param k the number of classes
* @param cn the names for all classes
*/
abstract class ClassifierInt (x: MatriI, y: VectoI, fn: Array [String], k: Int, cn: Array [String])
extends Classifier with Error
{
/** the number of data vectors in training/test-set (# rows)
*/
protected val m = x.dim1
/** the number of features/variables (# columns)
*/
protected val n = x.dim2
/** the training-set size as a Double
*/
protected val md = m.toDouble
/** the feature-set size as a Double
*/
protected val nd = n.toDouble
if (y.dim != m) flaw ("constructor", "y.dim must equal training-set size (m)")
if (fn.length != n) flaw ("constructor", "fn.length must equal feature-set size (n)")
if (k >= m) flaw ("constructor", "k must be less than training-set size (m)")
if (cn.length != k) flaw ("constructor", "cn.length must equal number of classes (k)")
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the number of data vectors in training/test-set (# rows).
*/
def size: Int = m
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return default values for binary input data (value count 'vc' set to 2).
*/
def vc_default: VectorI = { val vc = new VectorI (n); vc.set (2); vc }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return value counts calculated from the input data.
* May wish to call 'shiftToZero' before calling this method.
*/
def vc_fromData: VectorI = VectorI (for (j <- x.range2) yield x.col(j).max() + 1)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return value counts calculated from the input data.
* May wish to call 'shiftToZero' before calling this method.
* @param rg the range of columns to be considered
*/
def vc_fromData2 (rg: Range): VectorI = VectorI (for (j <- rg) yield x.col(j).max() + 1)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Shift the 'x' Matrix so that the minimum value for each column equals zero.
*/
def shiftToZero () { x -= VectorI (for (j <- x.range2) yield x.col(j).min()) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a new continuous data vector 'z', determine which class it belongs
* to, by first rounding it to an integer-valued vector.
* Return the best class, its name and its relative probability
* @param z the vector to classify
*/
def classify (z: VectoD): (Int, String, Double) =
{
val zi = new VectorI (z.dim)
for (j <- 0 until z.dim) zi(j) = (round (z(j))).toInt
classify (zi)
} // classify
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the quality of the training with a test-set and return the fraction
* of correct classifications.
* @param testStart beginning of test region (inclusive)
* @param testEnd end of test region (exclusive)
*/
def test (testStart: Int, testEnd: Int): Double =
{
var correct = 0
for (i <- testStart until testEnd if classify (x(i))._1 == y(i)) correct += 1
correct / (testEnd - testStart).toDouble
} // test
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the quality of the training with a test-set and return the fraction
* of correct classifications.
* @param xx the integer-valued test vectors stored as rows of a matrix
* @param yy the test classification vector, where 'yy_i = class' for row 'i' of 'xx'
*/
def test (xx: MatrixI, yy: VectorI): Double =
{
val mm = xx.dim1
if (yy.dim != mm) flaw ("test", "yy.dim must equal test-set size (mm)")
var correct = 0
for (i <- 0 until mm if classify (xx(i))._1 == yy(i)) correct += 1
correct / mm.toDouble
} // test
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Test the quality of the training with a test-set and return the fraction
* of correct classifications.
* @param itest indices of the instances considered test data
*/
override def test (itest: VectorI): Double =
{
var correct = 0
for (i <- itest if classify (x(i))._1 == y(i)) correct += 1
correct / itest.dim.toDouble
} // test
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the correlation matrix for the feature vectors 'fea'.
* If the correlations are too high, the independence assumption may be dubious.
*/
def calcCorrelation: MatriD =
{
val fea = for (j <- 0 until n) yield x.col(j).toDouble.toDense
val cor = new MatrixD (n, n)
for (j1 <- 0 until n; j2 <- 0 until j1) {
// println ("fea (j1) = " + fea(j1))
// println ("fea (j2) = " + fea(j2))
cor(j1, j2) = fea(j1) corr fea(j2)
} // for
cor
} // calcCorrelation
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the correlation matrix for the feature vectors of Z (Level 3)
* and those of X (level 2).
* If the correlations are too high, the independence assumption may be dubious.
* @param zrg the range of Z-columns
* @param xrg the range of X-columns
*/
def calcCorrelation2 (zrg: Range, xrg: Range): MatriD =
{
val zfea = for (j <- zrg) yield x.col(j).toDouble.toDense
val xfea = for (j <- xrg) yield x.col(j).toDouble.toDense
val cor = new MatrixD (zfea.size, xfea.size)
for (j1 <- 0 until cor.dim1; j2 <- 0 until cor.dim2) {
//println ("fea (j1) = " + fea(j1))
//println ("fea (j2) = " + fea(j2))
cor(j1, j2) = zfea(j1) corr xfea(j2)
} // for
cor
} // calcCorrelation
} // ClassifierInt abstract class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `ClassifierInt` companion object provides methods to read in data
* matrices in a combined 'xy' format that can be later decomposed into
* 'x' the feature data matrix and 'y' the classification vector.
*/
object ClassifierInt
{
private val SP = ',' // the token separation character
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Read the data set (e.g., a CSV file) and return the 'xy' data matrix.
* It will make sure the classification column 'cc' is last.
* @param fname the file-name (file should contain lines of data)
* @param m the number of data rows
* @param n the number of data columns/features (including the classification)
* @param skip the number of columns at the beginning the line to skip (e.g., id column)
* @param cc the classification column (the default (-1) => no position checking)
*/
def apply (fname: String, m: Int, n: Int, skip: Int = 1, cc: Int = -1): MatrixI =
{
val lines = getFromURL_File (fname)
val xy = new MatrixI (m, n)
var i = 0
for (ln <- lines) { xy(i) = VectorI (ln.split (SP), skip); i += 1; }
if (cc >= 0 && cc != n-1) { // want the classification column (cc) to be the last column
val c1 = xy.col (cc) // swap column cc with last (n-1), if necessary
val c2 = xy.col (n-1)
xy.setCol (cc, c2)
xy.setCol (n-1, c1)
} // if
xy
} // apply
} // ClassifierInt object
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/analytics/classifier/ClassifierInt.scala | Scala | mit | 8,678 |
package at.logic.gapt.provers.sat
import at.logic.gapt.examples.{ BussTautology, PigeonHolePrinciple }
import at.logic.gapt.expr._
import org.specs2.mutable._
class GlucoseTest extends Specification {
if ( !Glucose.isInstalled ) skipAll
"find a model for an atom" in { Glucose.solve( SATProblems.getProblem1() ) must beSome }
"see that Pc and -Pc is unsat" in { Glucose.solve( SATProblems.getProblem2() ) must beNone }
"see that Pc or -Pc is valid" in {
Glucose.isValid( SATProblems.getProblem3a() ) must beTrue
Glucose.isValid( SATProblems.getProblem3b() ) must beTrue
}
"see that Pc is not valid" in {
Glucose.isValid( SATProblems.getProblem4() ) must beFalse
}
"return a correct model" in {
Glucose.solve( SATProblems.getProblem5() ) must beLike {
case Some( model ) => SATProblems.checkSolution5( model ) must beTrue
}
}
"deal correctly with the pigeonhole problem" in {
SATProblems.getProblem6a() foreach { f => Glucose.isValid( f ) must beFalse }
SATProblems.getProblem6b() foreach { f => Glucose.isValid( f ) must beTrue }
ok
}
"say bottom is unsatisfiable" in { Glucose.solve( Bottom() ) must beNone }
"say top is satisfiable" in { Glucose.solve( Top() ) must beSome }
"empty CNF is sat" in { Glucose.solve( Seq() ) must beSome }
"empty clause is unsat" in { Glucose.solve( Seq( Seq() ) ) must beNone }
"proof import" in {
"pigeonhole 3 2" in { Glucose getResolutionProof PigeonHolePrinciple( 3, 2 ) must beSome }
"buss 5" in { Glucose getResolutionProof BussTautology( 5 ) must beSome }
"to be or not to be" in { Glucose getResolutionProof hof"be ∨ ¬be" must beSome }
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/provers/sat/GlucoseTest.scala | Scala | gpl-3.0 | 1,679 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.infinity.server.unfiltered.request
import java.io.{StringReader, InputStream, Reader}
import org.scalatest.mock.MockitoSugar
import unfiltered.Cookie
import unfiltered.request.HttpRequest
case class MockHttpRequest[R](
override val underlying: R = null,
inputStream: InputStream = MockHttpRequest.mockStream,
reader: Reader = new StringReader(""),
isSecure: Boolean = false,
uri: String = "",
remoteAddr: String = "",
method: String = "GET",
cookies: Seq[Cookie] = Seq.empty,
protocol: String = "",
params: Map[String, Seq[String]] = Map.empty,
headerz: Map[String, Seq[String]] = Map.empty) extends HttpRequest[R](underlying) {
def headers(name: String): Iterator[String] = headerz.getOrElse(name, Seq.empty[String]).iterator
val headerNames: Iterator[String] = headerz.keysIterator
def parameterValues(param: String): Seq[String] = params(param)
val parameterNames: Iterator[String] = params.keysIterator
}
object MockHttpRequest extends MockitoSugar {
def mockStream = mock[InputStream]
}
| telefonicaid/fiware-cosmos-platform | infinity/server/src/test/scala/es/tid/cosmos/infinity/server/unfiltered/request/MockHttpRequest.scala | Scala | apache-2.0 | 1,716 |
package org.geoscript.geocss
import org.geotools.{ styling => gt }
import gt.Style
import collection.JavaConversions._
object Benchmark {
val Translator = new Translator()
val template = """
[%1$s < 10] {
stroke: black;
label: "%1$s - 1";
}
[%1$s > 20] {
stroke: red;
label: "%1$s - 2";
}
[%1$s < 20] {
stroke: blue;
label: "%1$s - 3";
}
[%1$s = 15] {
stroke: green;
label: "%1$s - 4";
}
"""
val tx = new org.geotools.styling.SLDTransformer()
tx.setIndentation(4)
def encodeSLD(sld: gt.Style): String = {
val out = new java.io.StringWriter()
tx.transform(sld, out)
out.toString()
}
def time[A](op: => A): (A, Long) = {
val startTime = System.currentTimeMillis()
(op, System.currentTimeMillis() - startTime)
}
def ruleCount(sld: Style): Int = {
sld.featureTypeStyles.foldLeft(0) { (i, fts) => i + fts.rules.length }
}
def main(args: Array[String]) {
for (end <- 'A' to 'B') {
// dry run; warm up the JIT statistics
encodeSLD(
Translator.css2sld(
CssParser.parse(
('A' to end).map { template.format(_) }.mkString
).get
)
)
}
println("properties, parse_time, transform_time, rule_count, encode_time")
for (end <- 'A' to 'D') {
val range = 1 + (end - 'A')
val css = ('A' to end).map { template.format(_) }.mkString
val (cssRules, parseTime) = time { CssParser.parse(css).get }
val (sldRules: Style, transformTime) = time { Translator.css2sld(cssRules) }
val (sld, encodeTime) = time { encodeSLD(sldRules) }
println(Seq(range, parseTime, transformTime, ruleCount(sldRules), encodeTime).mkString(", "))
}
println("values, parse_time, transform_time, rule_count, encode_time")
for (range <- 4 to 16 by 4) {
val css = (1 to range).map {
"""
[A=%1$s] { label: "%1$s"; }
""".format(_)
}.mkString
val (cssRules, parseTime) = time { CssParser.parse(css).get }
val (sldRules, transformTime) = time { Translator.css2sld(cssRules) }
val (sld, encodeTime) = time { encodeSLD(sldRules) }
println(Seq(range, parseTime, transformTime, ruleCount(sldRules), encodeTime).mkString(", "))
}
}
}
| dwins/geoscript.scala | geocss/src/test/scala/org/geoscript/geocss/Benchmark.scala | Scala | mit | 2,244 |
package io.gatling.elassandra
object Predef {
def elassandra(
clusterName:String = "Localhost",
clusterContactPoint:String ="172.28.198.16",
keyspaceName:String = "customer",
tableName:String = "external"
):ElassandraProtocol =
new ElassandraProtocol(clusterName,clusterContactPoint,keyspaceName,tableName)
} | diegopacheco/scala-playground | gatling-elassandra/src/main/scala/io/gatling/elassandra/Predef.scala | Scala | unlicense | 349 |
/*
* Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.ucla.cs.starai.forclift.rcr
import scala.collection._
import scala.language.postfixOps
import edu.ucla.cs.starai.forclift.compiler._
import edu.ucla.cs.starai.forclift._
import edu.ucla.cs.starai.forclift.examples.models._
import edu.ucla.cs.starai.forclift.examples.models.mln._
import edu.ucla.cs.starai.forclift.inference._
import scala.util.Random._
import java.io._
object RelaxCompensateExperimentExact {
def main(args: Array[String]): Unit = {
val rootDir = new File("experiments/rcr/test/")
rootDir.mkdir()
def replot() {
import scala.sys.process._
println(Process("sh makeallplots.sh", rootDir)!!)
}
//TODO check OriginalSymmetricFriendsSmokerModel correct rewrite
//TODO increase precision!!!!! to avoid convergence at step 0 do minimal number of steps???? do reset of params?
val experiments = List(
// ("braz2-10", (new Braz2Model(10)).theory), //too long
// ("braz2-30", (new Braz2Model(30)).theory), //too long
// ("sickdeath-5", (new SickDeathModel(5)).theory),
// ("sickdeath-20", (new SickDeathModel(20)).theory),
// ("competingworkshops-10-3", (new CompetingWorkshopsModel(10, 3)).theory),
// ("competingworkshops-200-10", (new CompetingWorkshopsModel(200, 10)).theory),
// ("workshopattributes-5", (new Workshop6AttributesModel(5)).theory),
// ("workshopattributes-50", (new Workshop6AttributesModel(50)).theory),
// ("smokers-10", (new OriginalFriendsSmokerModel(10)).theory),
// ("smokers-100", (new OriginalFriendsSmokerModel(100)).theory),
// ("smokersdrinkers-10", (new OriginalFriendsSmokerDrinkerModel(10)).theory), //too long
// ("smokersdrinkers-50", (new OriginalFriendsSmokerDrinkerModel(50)).theory), //too long
("smokerssymmetric-10", (new OriginalSymmetricFriendsSmokerModel(10)).theory), //too long
("smokerssymmetric-50", (new OriginalSymmetricFriendsSmokerModel(50)).theory), //too long
("webkb-5", (new WebKBModel(5)).theory),
("webkb-20", (new WebKBModel(20)).theory))
for ((name, wmc) <- experiments) {
val dir = new File(rootDir, name)
dir.mkdir()
// val groundingsDir = new File("experiments/rcr/groundings/")
// groundingsDir.mkdir()
// MLNGrounder.ground(wmc,groundingsDir,name)
val rcr = new LoggingGroundTruthRCR(wmc, dir,
Compiler.Builder.default,
Compiler.Builder.default, true) {
override def onEndCompensation(
weights: PredicateWeights,
compensations: List[Compensation],
marginalCircuitsSet: MarginalCircuitsSet) {
super.onEndCompensation(
weights: PredicateWeights,
compensations: List[Compensation],
marginalCircuitsSet: MarginalCircuitsSet)
replot()
}
}
val weights = rcr.compensateFullRelaxationAndRecover(keepParams = true, damping = 0.5)
rcr.closeAll()
replot()
}
}
}
object RelaxCompensateExperimentApprox {
def main(args: Array[String]): Unit = {
val rootDir = new File("experiments/rcr/test/")
rootDir.mkdir()
def replot() {
import scala.sys.process._
println(Process("sh makeallplots.sh", rootDir)!!)
}
def model(n: Int) = new WeightedCNFModel {
def theoryString = "domain D " + n + """ {a,b,c}
predicate r(D,D) 1.0 1.0
predicate f_{1}(D,D,D) 1 0
¬f_{1}(X,Y,Z) v ¬r(X,Y) v ¬r(Y,Z) v r(X,Z)
f_{1}(X,Y,Z) v r(X,Y)
f_{1}(X,Y,Z) v r(Y,Z)
f_{1}(X,Y,Z) v ¬r(X,Z)
"""
}
//TODO check OriginalSymmetricFriendsSmokerModel correct rewrite
//TODO increase precision!!!!! to avoid convergence at step 0 do minimal number of steps???? do reset of params?
val experiments = List(
("tfriends-3", (model(3)).theory),
("tfriends-10", (model(10)).theory),
("tsmokers-3", (new TransitiveFriendsSmokerModel(3)).theory),
("tsmokersdrinkers-3", (new TransitiveFriendsSmokerDrinkerModel(3)).theory),
("tsmokerssymmetric-3", (new TransitiveSymmetricFriendsSmokerModel(3)).theory),
("tsmokers-10", (new TransitiveFriendsSmokerModel(10)).theory),
("tsmokersdrinkers-10", (new TransitiveFriendsSmokerDrinkerModel(10)).theory),
("tsmokerssymmetric-10", (new TransitiveSymmetricFriendsSmokerModel(10)).theory))
val exactCompiler = GroundCompiler.builder
for ((name, wmc) <- experiments) {
val dir = new File(rootDir, name)
dir.mkdir()
// val groundingsDir = new File("experiments/rcr/groundings/")
// groundingsDir.mkdir()
// MLNGrounder.ground(wmc,groundingsDir,name)
val rcr = new LoggingGroundTruthRCR(wmc, dir,
Compiler.Builder.default,
exactCompiler, true) {
override def onEndCompensation(
weights: PredicateWeights,
compensations: List[Compensation],
marginalCircuitsSet: MarginalCircuitsSet) {
super.onEndCompensation(
weights: PredicateWeights,
compensations: List[Compensation],
marginalCircuitsSet: MarginalCircuitsSet)
replot()
}
}
val weights = rcr.compensateFullRelaxationAndRecover(keepParams = true, damping = 0.5)
rcr.closeAll()
replot()
}
}
}
| UCLA-StarAI/Forclift | src/main/scala/edu/ucla/cs/starai/forclift/rcr/RelaxCompensateExperiment.scala | Scala | apache-2.0 | 6,001 |
package akkaviz.frontend.components
import org.scalajs.dom.html._
import org.scalajs.dom.{Element => domElement, console}
import rx.{Rx, Var}
import scala.scalajs.js.ThisFunction0
import scala.util.Try
import scalatags.JsDom.all._
class MessageFilter(
seenMessages: Var[Set[String]],
selectedMessages: Var[Set[String]],
selectedActors: Var[Set[String]]
) extends Tab {
val messagesObs = Rx.unsafe {
(seenMessages(), selectedMessages())
}.triggerLater {
val seen = seenMessages.now.toList.sorted
val selected = selectedMessages.now
val content = seen.map {
clazz =>
val contains = selected(clazz)
tr(
td(input(`type` := "checkbox", if (contains) checked else ())),
td(if (contains) b(clazz) else clazz),
onclick := {
() =>
console.log(s"Toggling ${clazz} now it will be ${!contains}")
selectedMessages() = if (contains) selected - clazz else selected + clazz
}
)
}
messagesTbody.innerHTML = ""
messagesTbody.appendChild(content.render)
}
lazy val messagesTbody = tbody().render
val elem = div(cls := "panel-body", id := "messagefilter",
table(
cls := "table table-striped table-hover",
thead(
tr(th(), th("Class", p(
float.right,
input(id := "messagefilter-regex", size := 12, tpe := "text", placeholder := "Filter...", marginRight := 1.em, onkeyup := regexMessageFilter),
a(href := "#", id := "messagefilter-select-all", "all", onclick := selectAllMessageFilters),
" | ",
a(href := "#", id := "messagefilter-select-none", "none", onclick := clearMessageFilters)
)))
),
messagesTbody
)).render
def clearMessageFilters: ThisFunction0[domElement, Unit] = { _: domElement =>
selectedMessages() = Set.empty
}
def selectAllMessageFilters: ThisFunction0[domElement, Unit] = { _: domElement =>
selectedMessages() = seenMessages.now
}
def regexMessageFilter(): ThisFunction0[Input, Unit] = { self: Input =>
val input = self.value
Try(input.r).foreach { r =>
selectedMessages() = seenMessages.now.filter(_.matches(r.regex))
}
}
override def name: String = "Message filter"
override def tabId: String = "message-filter"
override def onCreate(): Unit = {
tabBody.appendChild(elem)
}
}
| blstream/akka-viz | frontend/src/main/scala/akkaviz/frontend/components/MessageFilter.scala | Scala | mit | 2,395 |
package at.logic.gapt.examples.tip.isaplanner
import at.logic.gapt.expr._
import at.logic.gapt.formats.ClasspathInputFile
import at.logic.gapt.formats.tip.TipSmtParser
import at.logic.gapt.proofs.Ant
import at.logic.gapt.proofs.gaptic._
object prop_30 extends TacticsProof {
val bench = TipSmtParser.fixupAndParse( ClasspathInputFile( "tip/isaplanner/prop_30.smt2", getClass ) )
ctx = bench.ctx
val sequent = bench.toSequent.zipWithIndex.map {
case ( f, Ant( i ) ) => s"h$i" -> f
case ( f, _ ) => "goal" -> f
}
val proof = Lemma( sequent ) {
allR
allR
induction( hov"xs:list" )
// base case
allL( "h6", le"x:Nat" )
eql( "h6_0", "goal" )
allL( "h14", le"x:Nat", le"x:Nat", le"nil:list" )
andL
impL( "h14_0_1" )
orR
induction( hov"x:Nat", "h14_0_1_0" )
axiomLog
allL( "h12", le"x_0:Nat", le"x_0:Nat" )
andL
impL( "h12_0_1" )
axiomLog
axiomLog
axiomLog
// inductive case
allL( "h7", le"x:Nat", le"x_0:Nat", le"xs_0:list" )
allL( "h8", le"x:Nat", le"x_0:Nat", le"xs_0:list" )
impL( "h7_0" )
negR
impL( "h8_0" )
axiomLog
eql( "h8_0", "goal" )
allL( "h14", le"x:Nat", le"x:Nat", le"cons(x_0:Nat, xs_0:list):list" )
andL
impL( "h14_0_1" )
orR
// proof of equal(x,x)
induction( hov"x:Nat", "h14_0_1_0" )
axiomLog
allL( "h12", le"x_1:Nat", le"x_1:Nat" )
andL
impL( "h12_0_1" )
axiomLog
axiomLog
axiomLog
eql( "h7_0", "goal" )
allL( "h14", le"x:Nat", le"x_0:Nat", le"ins(x:Nat, xs_0:list):list" )
andL
impL( "h14_0_1" )
orR
axiomLog
axiomLog
}
}
| gebner/gapt | examples/tip/isaplanner/prop_30.scala | Scala | gpl-3.0 | 1,652 |
package aecor.example
import io.circe.{ Decoder, Encoder }
import shapeless.Unwrapped
trait AnyValCirceEncoding {
implicit def anyValEncoder[V, U](implicit ev: V <:< AnyVal,
V: Unwrapped.Aux[V, U],
encoder: Encoder[U]): Encoder[V] = {
val _ = ev
encoder.contramap(V.unwrap)
}
implicit def anyValDecoder[V, U](implicit ev: V <:< AnyVal,
V: Unwrapped.Aux[V, U],
decoder: Decoder[U]): Decoder[V] = {
val _ = ev
decoder.map(V.wrap)
}
}
object AnyValCirceEncoding extends AnyValCirceEncoding
| notxcain/aecor | modules/example/src/main/scala/aecor/example/AnyValCirceEncoding.scala | Scala | mit | 655 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.flow.dda
import org.argus.jawa.flow.cfg._
import org.argus.jawa.flow.dfa._
import org.argus.jawa.flow.interprocedural.CallResolver
import org.argus.jawa.flow.rda._
import org.argus.jawa.flow.{Context, JawaAlirInfoProvider}
import org.argus.jawa.core.ast.{AssignmentStatement, CallStatement, Statement}
import org.argus.jawa.core.Global
import org.argus.jawa.core.util._
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
* @author <a href="mailto:[email protected]">Sankardas Roy</a>
*/
object InterProceduralReachingDefinitionAnalysis {
type RDFact = ReachingDefinitionAnalysis.RDFact
type LOC = Context
type IRDFact = (RDFact, LOC)
type Node = ICFGNode
def apply(
global: Global,
cg: InterProceduralControlFlowGraph[Node]): MIdMap[Node, ISet[IRDFact]] = build(global, cg)
def build(
global: Global,
cg: InterProceduralControlFlowGraph[Node]): MIdMap[Node, ISet[IRDFact]] = {
new InterProceduralReachingDefinitionAnalysis().build(global, cg)
}
}
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
* @author <a href="mailto:[email protected]">Sankardas Roy</a>
*/
class InterProceduralReachingDefinitionAnalysis {
import InterProceduralReachingDefinitionAnalysis._
var icfg: InterProceduralControlFlowGraph[Node] = _
var factSet: MIdMap[Node, ISet[IRDFact]] = idmapEmpty[Node, ISet[IRDFact]]
def build(
global: Global,
icfg: InterProceduralControlFlowGraph[Node]): MIdMap[Node, ISet[IRDFact]] = {
val gen = new Gen
val kill = new Kill
val callr = Some(new Callr)
val ip = new InterIngredientProvider[IRDFact](global, icfg)
this.icfg = icfg
icfg.nodes.foreach{ node =>
global.getMethod(node.getOwner) match {
case Some(owner) =>
if(!owner.isUnknown){
val cfg = JawaAlirInfoProvider.getCfg(owner)
val rda = JawaAlirInfoProvider.getRdaWithCall(owner, cfg)
node match{
case cvn: ICFGVirtualNode =>
val rdafact = rda.entrySet(cfg.getVirtualNode(cvn.getVirtualLabel))
factSet.update(cvn, rdafact.map{fact => (fact, getContext(fact, cvn.getContext))})
case cln: ICFGLocNode =>
val rdafact = rda.entrySet(cfg.getNode(owner.getBody.resolvedBody.locations(cln.locIndex)))
factSet.update(cln, rdafact.map{fact => (fact, getContext(fact, cln.getContext))})
}
}
case None =>
}
}
val initialContext: Context = new Context(global.projectName)
val iota: ISet[IRDFact] = isetEmpty + (((VarSlot("@@IRDA"), InitDefDesc), initialContext))
val initial: ISet[IRDFact] = isetEmpty
MonotoneDataFlowAnalysisFramework[Node, IRDFact, LOC](icfg,
forward = true, lub = true, ip, gen, kill, callr, iota, initial)
factSet
}
private def getContext(fact: RDFact, srcContext: Context): Context = {
val procSig = srcContext.getMethodSig
val tarContext = srcContext.copy.removeTopContext()
fact._2 match {
case pdd: ParamDefDesc =>
tarContext.setContext(procSig, pdd.locUri)
case ldd: LocDefDesc =>
tarContext.setContext(procSig, ldd.locUri)
case dd: DefDesc =>
if(dd.isDefinedInitially){
tarContext.setContext(procSig, "Entry")
} else if(dd.isUndefined) {
tarContext.setContext(procSig, "Entry")
} else throw new RuntimeException("Unexpected DefDesc: " + dd)
}
}
private def isGlobal(slot: Slot): Boolean =
slot match{
case vs: VarSlot => vs.varName.startsWith("@@")
case _ => false
}
private def isDef(defDesc: DefDesc): Boolean =
defDesc match{
case _: LocDefDesc => true
case _ => false
}
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
class Gen extends MonotonicFunction[Node, IRDFact] {
def apply(s: ISet[IRDFact], e: Statement, currentNode: Node): ISet[IRDFact] = {
e match {
case _: AssignmentStatement =>
val succs = icfg.successors(currentNode)
val globFacts =
if(succs.isEmpty) isetEmpty[IRDFact]
else succs.map(node => factSet(node).filter(fact => isGlobal(fact._1._1) && isDef(fact._1._2))).reduce(iunion[IRDFact])
val globDefFacts = globFacts.filter(fact => isDef(fact._1._2))
val flowingGlobFacts = s.filter(fact => isGlobal(fact._1._1) && isDef(fact._1._2))
factSet += (currentNode -> (factSet.getOrElse(currentNode, isetEmpty) -- globFacts ++ flowingGlobFacts ++ globDefFacts))
globDefFacts
case _ =>
isetEmpty
}
}
}
/**
* @author Fengguo Wei & Sankardas Roy
*/
class Kill extends MonotonicFunction[Node, IRDFact] {
def apply(s: ISet[IRDFact], e: Statement, currentNode: Node): ISet[IRDFact] = {
e match {
case _: AssignmentStatement =>
val node = currentNode
val succs = icfg.successors(node)
val globDefFacts =
if(succs.isEmpty) isetEmpty[IRDFact]
else succs.map(node => factSet(node).filter(fact => isGlobal(fact._1._1) && isDef(fact._1._2))).reduce(iunion[IRDFact])
val redefGlobSlots = globDefFacts.filter(fact => s.map(_._1._1).contains(fact._1._1)).map(_._1._1)
s.filter(f => !redefGlobSlots.contains(f._1._1))
case _ => s
}
}
}
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
* @author <a href="mailto:[email protected]">Sankardas Roy</a>
*/
class Callr extends CallResolver[Node, IRDFact] {
/**
* It returns the facts for each callee entry node and caller return node
*/
def resolveCall(s: ISet[IRDFact], cj: CallStatement, callerNode: Node): (IMap[Node, ISet[IRDFact]], ISet[IRDFact]) = {
var calleeFactsMap: IMap[ICFGNode, ISet[IRDFact]] = imapEmpty
var returnFacts: ISet[IRDFact] = isetEmpty
val callNode = icfg.getICFGCallNode(callerNode.getContext)
icfg.successors(callNode).foreach {
case suc@(_: ICFGEntryNode) =>
calleeFactsMap += (suc -> s)
case _: ICFGReturnNode =>
returnFacts ++= s
case _ =>
}
(calleeFactsMap, returnFacts)
}
def getAndMapFactsForCaller(calleeS: ISet[IRDFact], callerNode: Node, calleeExitNode: Node): ISet[IRDFact] = {
calleeS
}
def needReturnNode(): Boolean = true
}
}
| arguslab/Argus-SAF | jawa/src/main/scala/org/argus/jawa/flow/dda/InterProceduralReachingDefinitionAnalysis.scala | Scala | apache-2.0 | 6,838 |
/*
* Copyright 2014 Renaud Bruneliere
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.bruneli.scalaopt.core.gradient
import com.github.bruneli.scalaopt.core._
import com.github.bruneli.scalaopt.core.variable.UnconstrainedVariables
import org.scalatest.{FlatSpec, Matchers}
/**
* @author bruneli
*/
class SteihaugCGSpec extends FlatSpec with Matchers {
import SteihaugCG._
val x0 = UnconstrainedVariables(0.5, 2.0)
val fQuad = (x: UnconstrainedVariablesType) => (x - x0) dot (x - x0)
val dfQuad = (x: UnconstrainedVariablesType) => (x - x0) * 2.0
val config = new SteihaugCGConfig(tol = 1.0e-6)
"SteihaugCG method" should "converge with fQuad and exact derivatives" in {
val xOpt = minimize((fQuad, dfQuad), UnconstrainedVariables(0.0, 0.0))
xOpt shouldBe 'success
val d = xOpt.get - x0
(d dot d) should be < (config.tol * config.tol)
}
it should "converge with fQuad and approximate derivatives" in {
val xOpt = minimize(fQuad, UnconstrainedVariables(0.0, 0.0))
xOpt shouldBe 'success
val d = xOpt.get - x0
(d dot d) should be < (config.tol * config.tol)
}
it should "throw an exception when reaching max number of iterations" in {
a [MaxIterException] should be thrownBy {
minimize((x: UnconstrainedVariablesType) => x(0) + x(1), UnconstrainedVariables(0.0, 0.0))
}
}
}
| bruneli/scalaopt | core/src/test/scala/com/github/bruneli/scalaopt/core/gradient/SteihaugCGSpec.scala | Scala | apache-2.0 | 1,883 |
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.core.formatters
import com.comcast.money.api.SpanId
import com.typesafe.config.Config
final case class FormatterChain(formatters: Seq[Formatter]) extends Formatter {
override def toHttpHeaders(spanId: SpanId, addHeader: (String, String) => Unit): Unit = formatters.foreach {
formatter => formatter.toHttpHeaders(spanId, addHeader)
}
override def fromHttpHeaders(headers: Iterable[String], getHeader: String => String, log: String => Unit): Option[SpanId] = formatters.toStream.flatMap {
formatter => formatter.fromHttpHeaders(headers, getHeader, log)
} headOption
override def fields: Seq[String] = formatters.flatMap {
formatter => formatter.fields
}
override def setResponseHeaders(getHeader: String => String, addHeader: (String, String) => Unit): Unit = formatters.foreach {
formatter => formatter.setResponseHeaders(getHeader, addHeader)
}
}
object FormatterChain {
import FormatterFactory.create
def apply(config: Config): FormatterChain = {
val formatters = create(config.getConfigList("formatters")).get
FormatterChain(formatters)
}
def default: FormatterChain = FormatterChain(Seq(MoneyTraceFormatter, TraceContextFormatter))
}
| Comcast/money | money-core/src/main/scala/com/comcast/money/core/formatters/FormatterChain.scala | Scala | apache-2.0 | 1,848 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.audit
import java.io.Closeable
import java.time.ZonedDateTime
import com.google.gson.{Gson, GsonBuilder}
import com.typesafe.scalalogging.LazyLogging
import scala.reflect.ClassTag
/**
* Basic trait for any 'event' that we may want to audit. Ties it to a particular data store, schema type name
* and date
*/
trait AuditedEvent {
/**
* Underlying data store type that triggered the event - e.g. 'accumulo', 'hbase', 'kafka'
*
* @return
*/
def storeType: String
/**
* Simple feature type name that triggered the event
*
* @return
*/
def typeName: String
/**
* Date of event, in millis since the Java epoch
*
* @return
*/
def date: Long
}
/**
* An event that can be soft-deleted
*/
trait DeletableEvent extends AuditedEvent {
/**
* Has the event been marked as deleted?
*
* @return
*/
def deleted: Boolean
}
/**
* Writes an audited event
*/
trait AuditWriter extends Closeable {
/**
* Writes an event asynchronously
*
* @param event event to write
* @tparam T event type
*/
def writeEvent[T <: AuditedEvent](event: T)(implicit ct: ClassTag[T]): Unit
}
/**
* Reads an audited event
*/
trait AuditReader extends Closeable {
/**
* Retrieves stored events
*
* @param typeName simple feature type name
* @param dates dates to retrieve stats for
* @tparam T event type
* @return iterator of events
*/
def getEvents[T <: AuditedEvent](typeName: String,
dates: (ZonedDateTime, ZonedDateTime))
(implicit ct: ClassTag[T]): Iterator[T]
}
/**
* Implemented AuditWriter by logging events as json
*/
trait AuditLogger extends AuditWriter with LazyLogging {
private val gson: Gson = new GsonBuilder().serializeNulls().create()
override def writeEvent[T <: AuditedEvent](event: T)(implicit ct: ClassTag[T]): Unit =
logger.debug(gson.toJson(event))
override def close(): Unit = {}
}
object AuditLogger extends AuditLogger | elahrvivaz/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/audit/AuditedEvent.scala | Scala | apache-2.0 | 2,560 |
/*
* Licensed to the Massive Data Science and Derrick R. Burns under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Massive Data Science and Derrick R. Burns licenses this file to You under the
* Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.massivedatascience.clusterer
import com.massivedatascience.clusterer.KMeansSelector.InitialCondition
import com.massivedatascience.linalg.BLAS._
import com.massivedatascience.util.{ SparkHelper, XORShiftRandom }
import org.apache.spark.Logging
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.linalg.{ Vector, Vectors }
import org.apache.spark.rdd.RDD
import scala.annotation.tailrec
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
/**
*
* Initialize `runs` sets of cluster centers using the
* <a href="http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf">k-means|| algorithm</a>.
* This is a variant of k-means++ that tries to find dissimilar cluster centers by starting with a random center and then doing
* passes where more centers are chosen with probability proportional to their squared distance
* to the current cluster set. It results in a provable approximation to an optimal clustering.
*
* In this implementation, we allow the client to provide an initial set of cluster centers
* and closest distance for each point to those cluster centers. This allows us to
* use this code to find additional cluster centers at any time.
*/
case class KMeansParallel(numSteps: Int, sampleRate: Double = 1.0) extends KMeansSelector
with SparkHelper with Logging {
/**
*
* @param pointOps distance function
* @param data data
* @param targetNumberClusters number of new clusters desired
* @param initialState initial clusters and distance data
* @param runs number of runs to perform
* @param seedx random number seed
* @return updated set of cluster centers
*/
def init(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
targetNumberClusters: Int,
initialState: Option[InitialCondition] = None,
runs: Int,
seedx: Long): Seq[Centers] = {
implicit val sc = data.sparkContext
require(data.getStorageLevel.useMemory)
val rand = new XORShiftRandom(seedx)
val seed = rand.nextLong()
val preselectedCenters = initialState.map(_.centers)
val initialCosts = initialState.map(_.distances)
val centers = preselectedCenters.getOrElse(randomCenters(pointOps, data, runs, seed))
val requested = numbersRequested(targetNumberClusters, preselectedCenters, runs)
val expandedCenters = moreCenters(pointOps, data, runs, initialCosts, numSteps, requested, seed,
centers)
val numbersRetainedCenters = preselectedCenters.map(_.map(_.size))
finalClusterCenters(pointOps, data, targetNumberClusters, rand.nextLong(), expandedCenters,
numbersRetainedCenters)
}
/**
* Use K-Means++ to whittle the candidate centers to the requested number of centers
*
* @param numClusters number of new clusters desired
* @param seed random number seed
* @param centers candidate centers
* @param numberRetained number of pre-selected candidate centers to keep
* @return arrays of selected centers
*/
private[this] def finalClusterCenters(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
numClusters: Int,
seed: Long,
centers: Seq[Centers],
numberRetained: Option[Seq[Int]]): Seq[Centers] = {
val centerArrays = centers.map(_.toIndexedSeq)
val weightMap = weights(pointOps, data, centerArrays, sampleRate, seed)
val kMeansPlusPlus = new KMeansPlusPlus(pointOps)
Seq.tabulate(centerArrays.length) { r =>
val myCenters = centerArrays(r)
val weights = IndexedSeq.tabulate(myCenters.length)(i => weightMap.getOrElse((r, i), 0.0))
val kx = if (numClusters > myCenters.length) myCenters.length else numClusters
kMeansPlusPlus.goodCenters(seed, myCenters, weights, kx, 1,
numberRetained.map(_(r)).getOrElse(0))
}
}
/**
* Set the costs, given a set of centers.
*
* @param centers new centers to consider
* @return
*/
private[this] def costsFromCenters(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
runs: Int,
centers: Seq[Centers]): RDD[Vector] = {
implicit val sc = data.sparkContext
withBroadcast(centers) { bcNewCenters =>
data.map { point =>
Vectors.dense(Array.tabulate(runs) { r =>
pointOps.pointCost(bcNewCenters.value(r), point)
})
}
}
}
/**
* Update the costs, given a previous set of costs and a new set of centers per run.
*
* @param centers new centers to consider
* @param oldCosts best distance to previously considered centers
* @return
*/
private[this] def updatedCosts(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
runs: Int,
centers: Seq[Centers],
oldCosts: RDD[Vector]): RDD[Vector] = {
implicit val sc = data.sparkContext
withBroadcast(centers) { bcNewCenters =>
data.zip(oldCosts).map {
case (point, oldCost) =>
Vectors.dense(Array.tabulate(runs) { r =>
math.min(pointOps.pointCost(bcNewCenters.value(r), point), oldCost(r))
})
}
}
}
/**
* Select one point per run
*
* @param seed seed for random numbers
* @return random center per run stored in an array buffer
*/
private[this] def randomCenters(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
runs: Int,
seed: Long): Seq[Centers] = {
data
.takeSample(withReplacement = true, runs, seed)
.map(pointOps.toCenter)
.map(IndexedSeq(_))
}
/**
* Compute for each cluster the sum of the weights of the points in the cluster
*
* @param centerArrays sequence of arrays of centers
* @param fraction fraction of points to sample
* @return A map from (run, cluster index) to the sum of the weights of its points
*/
private[this] def weights(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
centerArrays: Seq[Centers],
fraction: Double,
seed: Long): Map[(Int, Int), Double] = {
implicit val sc = data.sparkContext
withBroadcast(centerArrays) { bcCenters =>
// for each (run, cluster) compute the sum of the weights of the points in the cluster
data.sample(withReplacement = false, fraction, seed).flatMap { point =>
val centers = bcCenters.value
Seq.tabulate(centers.length)(r =>
((r, pointOps.findClosestCluster(centers(r), point)), point.weight))
}.reduceByKeyLocally(_ + _)
}
}
/**
* Convert an sequence of RDDs of Doubles into RDD of vectors of Doubles
*
* @param rdds sequence of RDDs of Doubles
* @return RDD of vectors
*/
private[this] def asVectors(rdds: Seq[RDD[Double]]): RDD[Vector] = {
rdds match {
case Seq(head, _) =>
rdds.zipWithIndex.foldLeft(head.map { _ => new Array[Double](rdds.length) }) {
case (arrayRdd, (doubleRdd, i)) =>
arrayRdd.zip(doubleRdd).map { case (array, double) => array(i) = double; array }
}.map(Vectors.dense)
case _ => throw new IllegalArgumentException("sequence of RDDs must be non-empty")
}
}
/**
* Select approximately k points at random with probability proportional to the weight vectors
* given.
*
* @param k number of points desired
* @param seed random number seed
* @param costs costs
* @return k * runs new points, in an array where each entry is the tuple (run, point)
*/
private[this] def select(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
runs: Int,
k: Seq[Int],
seed: Long,
costs: RDD[Vector]): Array[(Int, BregmanCenter)] = {
val sumCosts = costs
.aggregate(Vectors.zeros(runs))(
(s, v) => axpy(1.0, v, s),
(s0, s1) => axpy(1.0, s1, s0)
)
require(costs.getStorageLevel.useMemory)
data.zip(costs).mapPartitionsWithIndex[(Int, BregmanCenter)] { (index, pointsWithCosts) =>
val rand = new XORShiftRandom(seed ^ index)
val range = 0 until runs
pointsWithCosts.flatMap {
case (p, c) =>
val selectedRuns = range.filter { r =>
val v = rand.nextDouble()
v < c(r) * k(r) / sumCosts(r)
}
if (selectedRuns.nonEmpty) {
val cp = pointOps.toCenter(p)
selectedRuns.map((_, cp)).toIterator
} else {
Seq[(Int, BregmanCenter)]()
}
}
}.collect()
}
/**
* Identify the number of additional cluster centers needed per run.
*
* @param desired total number of clusters desired (for each run)
* @param centers initial clusters and distance per point to cluster
* @param runs number of runs
* @return number of clusters needed to fulfill gap
*/
private[this] def numbersRequested(desired: Int, centers: Option[Seq[Centers]], runs: Int): Seq[Int] =
centers.map(_.map(desired - _.length))
.getOrElse(Seq.fill(runs)(desired))
/**
* On each step, preRound(run) points on average for each run with probability proportional
* to their squared distance from the centers. Note that only distances between points
* and new centers are computed in each iteration.
*
* @param initialCosts initial costs
* @param requested minimum number of points add
* @param seed random seed
* @param centers initial centers
* @return expanded set of centers, including initial centers
*/
private[this] def moreCenters(
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
runs: Int,
initialCosts: Option[Seq[RDD[Double]]],
numberSteps: Int,
requested: Seq[Int],
seed: Long,
centers: Seq[Centers]): Seq[Centers] = {
val addedCenters = centers.map(new ArrayBuffer[BregmanCenter] ++= _)
val newCenters = Seq.fill(runs)(new ArrayBuffer[BregmanCenter]())
@tailrec
def addCenters(step: Int, costs: RDD[Vector]): RDD[Vector] = {
val stepSeed = seed ^ (step << 16)
for ((index, center) <- select(pointOps, data, runs, requested.map(_ * 2), stepSeed, costs)) {
newCenters(index) += center
}
val newCosts = exchange(s"costs at step $step", costs) { oldCosts =>
updatedCosts(pointOps, data, runs, newCenters, oldCosts)
}
for ((c, n) <- addedCenters.zip(newCenters)) {
c ++= n
n.clear()
}
if (step < numberSteps) addCenters(step + 1, newCosts) else newCosts
}
val startingCosts = initialCosts.map(asVectors)
.getOrElse(costsFromCenters(pointOps, data, runs, centers))
val costs = sync[Vector]("initial costs", startingCosts)
val finalCosts = addCenters(0, costs)
finalCosts.unpersist(blocking = false)
addedCenters.map(_.toIndexedSeq)
}
}
| derrickburns/generalized-kmeans-clustering | src/main/scala/com/massivedatascience/clusterer/KMeansParallel.scala | Scala | apache-2.0 | 11,457 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package com.jspha.maia
import scala.language.higherKinds
import com.jspha.maia
import scala.collection.immutable.HashMap
/**
* A [[Fields]] is a type signature providing meaning for the various types of
* field-lookups available in an Api.
*/
trait Fields {
/**
* [[AtomE]] describes a field which returns a value atomically---no
* further query is run against the returned value, it's returned wholesale.
*
* @tparam E Possible error type
* @tparam A Return type of this field lookup
*/
type AtomE[E, A]
type Atom[A] = AtomE[Nothing, A]
/**
* [[IAtomE]] describes an atomic field that is indexed by a single
* argument. This argument *must* be provided in order to qualify which
* value is returned
*
* @tparam I Index or "argument" to the field
* @tparam A Return type of this field lookup
*/
type IAtomE[I, E, A]
type IAtom[I, A] = IAtomE[I, Nothing, A]
/**
* [[ObjE1]] describes a field returning a single "object" which supports
* sub-querying.
*
* @tparam A Api-schema of the returned "object" values
*/
type ObjE1[E, A[_ <: Fields]] = ObjE[Cardinality.One, E, A]
type Obj1[A[_ <: Fields]] = ObjE[Cardinality.One, Nothing, A]
/**
* [[ObjE]] describes a generic "object" field much like [[ObjE1]] but it
* also supports a choice of [[Cardinality]] of the set of object
* responses returned.
*
* @tparam M [[Cardinality]] of the set of objects returned by this field
* lookup
* @tparam A Api-schema of the returned "object" values
*/
type ObjE[M <: Cardinality, E, A[_ <: Fields]]
type Obj[M <: Cardinality, A[_ <: Fields]] = ObjE[M, Nothing, A]
/**
* [[IObjE1]] is similar to [[ObjE1]] but allows for indexing or
* parameterization of the field lookup
*
* @tparam I The index or "argument" to this field lookup
* @tparam A Api-schema of the returned "object" values
*/
type IObjE1[I, E, A[_ <: Fields]] = IObjE[I, Cardinality.One, E, A]
type IObj1[I, A[_ <: Fields]] = IObjE[I, Cardinality.One, Nothing, A]
/**
* [[IObjE]] is similar to [[ObjE]] but allows for indexing or
* parameterization of the field lookup
*
* @tparam I The index or "argument" to this field lookup
* @tparam M [[Cardinality]] of the set of objects returned by this field
* lookup
* @tparam A Api-schema of the returned "object" values
*/
type IObjE[I, M <: Cardinality, E, A[_ <: Fields]]
type IObj[I, M <: Cardinality, A[_ <: Fields]] = IObjE[I, M, Nothing, A]
}
object Fields {
final class Fetcher[F[_]] extends Fields {
type AtomE[E, A] = F[Either[E, A]]
type IAtomE[I, E, A] = I => F[Either[E, A]]
type ObjE[M <: Cardinality, E, Api[_ <: Fields]] =
F[Either[E, M#Coll[maia.Fetcher[F, Api]]]]
type IObjE[I, M <: Cardinality, E, Api[_ <: Fields]] =
I => F[Either[E, M#Coll[maia.Fetcher[F, Api]]]]
}
final class Query[Super[_ <: Fields]] extends Fields {
type AtomE[E, A] = Lookup[Super, E, A]
type IAtomE[I, E, A] = I => Lookup[Super, E, A]
trait ObjE[M <: Cardinality, E, Sub[_ <: Fields]] {
def apply[R](cont: maia.Query[Sub] => Lookup[Sub, E, R])
: Lookup[Super, E, M#Coll[R]]
}
trait IObjE[I, M <: Cardinality, E, Sub[_ <: Fields]] {
def apply[R](ix: I)(cont: maia.Query[Sub] => Lookup[Sub, E, R])
: Lookup[Super, E, M#Coll[R]]
}
}
sealed trait Response extends maia.Fields {
type AtomE[E, A] = Option[Either[E, A]]
type IAtomE[I, E, A] = HashMap[I, Either[E, A]]
type ObjE[M <: maia.Cardinality, E, A[_ <: maia.Fields]] =
Option[Either[E, M#Coll[maia.Response[A]]]]
type IObjE[I, M <: maia.Cardinality, E, A[_ <: maia.Fields]] =
HashMap[I, Either[E, M#Coll[maia.Response[A]]]]
}
object Response extends Response
sealed trait Request extends maia.Fields {
type AtomE[E, A] = Boolean
type IAtomE[I, E, A] = Set[I]
type ObjE[M <: maia.Cardinality, E, A[_ <: maia.Fields]] =
Option[maia.Request[A]]
type IObjE[I, M <: maia.Cardinality, E, A[_ <: maia.Fields]] =
HashMap[I, maia.Request[A]]
}
object Request extends Request
}
| tel/scala-maia | maia/src/main/scala-2.12/com/jspha/maia/Fields.scala | Scala | mpl-2.0 | 4,414 |
package class_property_extractor
import scala.collection.JavaConversions._
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import org.scalaquery.ql.basic.BasicDriver.Implicit.queryToQueryInvoker
import org.scalaquery.ql.basic.BasicDriver.Implicit.tableToQuery
import org.scalaquery.ql.basic.BasicTable
import org.scalaquery.ql.basic.BasicDriver.Implicit._
import org.scalaquery.session.Database.threadLocalSession
import org.scalaquery.session.Database
import com.hp.hpl.jena.rdf.model.ResourceFactory
import com.hp.hpl.jena.util.FileManager
import class_instance_extractor.ClassInstanceList
object ClassPropertyList extends BasicTable[(String, String, Int)]("ClassPropertyListTable") {
def jwoClass = column[String]("jwoClass")
def jwoProperty = column[String]("jwoProperty")
def jwoPropertyCount = column[Int]("jwoPropertyCount")
def * = jwoClass ~ jwoProperty ~ jwoPropertyCount
}
/**
* 6. Defining the domains of properties based on a consideration of property inheritance
* 6-1. class_property_extractor.ClassPropertyExtractor.scala
* - Inputs
* -- inputs_and_outputs/refined_class_instance_list_removing_redundant_type.db
* -- ontologies/wikipediaontology_instance_20101114ja.rdf
* - Output
* -- inputs_and_outputs/class_property_list_with_count.db
*/
object ClassPropertyExtractor {
def main(args: Array[String]) {
val classInstanceSetMap = Map[String, Set[String]]()
val typeDB = Database.forURL(url = "jdbc:sqlite:inputs_and_outputs/refined_class_instance_list_removing_redundant_type.db", driver = "org.sqlite.JDBC")
typeDB withSession {
val q = for { result <- ClassInstanceList }
yield result.jwoClass ~ result.jwoInstance
for ((jwoClass, jwoInstance) <- q.list) {
classInstanceSetMap.get(jwoClass) match {
case Some(instanceSet) => instanceSet.add(jwoInstance)
case None =>
val instanceSet = Set(jwoInstance)
classInstanceSetMap.put(jwoClass, instanceSet)
}
}
}
println("model loading");
val ns = "http://www.yamaguti.comp.ae.keio.ac.jp/wikipedia_ontology/instance/"
val inputOntology = "ontologies/wikipediaontology_instance_20101114ja.rdf"
val model = FileManager.get().loadModel(inputOntology)
println("model loaded")
val classPropertyListDB = Database.forURL(url = "jdbc:sqlite:inputs_and_outputs/class_property_list_with_count.db", driver = "org.sqlite.JDBC")
classPropertyListDB withSession {
(ClassPropertyList.ddl).create
}
for ((cls, instanceSet) <- classInstanceSetMap) {
println(cls + "->" + instanceSet.size)
val propertyCountMap = Map[String, Int]()
for (instance <- instanceSet) {
for (stmt <- model.listStatements(ResourceFactory.createResource(ns + instance), null, null).toList()) {
var property = stmt.getPredicate().getURI()
if (property.split("property/").size == 2) {
property = property.split("property/")(1) // JWOのプロパティの場合には,プロパティ名のみに変換.それ以外は名前空間も保持.
}
// println(instance + "-->" + property)
if (property != "http://www.w3.org/1999/02/22-rdf-syntax-ns#type") {
propertyCountMap.get(property) match {
case Some(count) => propertyCountMap.put(property, count + 1)
case None =>
propertyCountMap.put(property, 1)
}
}
}
}
classPropertyListDB withSession {
for ((property, count) <- propertyCountMap) {
// println(cls + "\\t" + property + "\\t" + count)
ClassPropertyList.insert(cls, property, count)
}
}
}
}
}
| t-morita/JWO_Refinement_Tools | src/main/scala/class_property_extractor/ClassPropertyExtractor.scala | Scala | apache-2.0 | 3,750 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.views.formatting
import uk.gov.hmrc.nisp.utils.UnitSpec
class NispMoneySpec extends UnitSpec {
"pounds" should {
"return HTML with £100 for value 100" in {
NispMoney.pounds(100).toString().endsWith("£100") shouldBe true
}
"return HTML with £100.12 for value 100.12" in {
NispMoney.pounds(100.12).toString().endsWith("£100.12") shouldBe true
}
"return HTML with £100.10 for value 100.1" in {
NispMoney.pounds(100.1).toString().endsWith("£100.10") shouldBe true
}
}
}
| hmrc/nisp-frontend | test/uk/gov/hmrc/nisp/views/formatting/NispMoneySpec.scala | Scala | apache-2.0 | 1,165 |
/*
Copyright 2015 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization
import java.io.{ InputStream, OutputStream }
import scala.util.{ Failure, Success, Try }
class Serialization2[A, B](val serA: Serialization[A], val serB: Serialization[B]) extends Serialization[(A, B)] {
override def hash(x: (A, B)) = {
import MurmurHashUtils._
val h1 = mixH1(seed, serA.hash(x._1))
val h2 = mixH1(h1, serB.hash(x._2))
fmix(h2, 2)
}
override def equiv(x: (A, B), y: (A, B)): Boolean =
serA.equiv(x._1, y._1) && serB.equiv(x._2, y._2)
override def read(in: InputStream): Try[(A, B)] = {
val a = serA.read(in)
val b = serB.read(in)
(a, b) match {
case (Success(a), Success(b)) => Success((a, b))
case (Failure(e), _) => Failure(e)
case (_, Failure(e)) => Failure(e)
}
}
override def write(out: OutputStream, a: (A, B)): Try[Unit] = {
val resA = serA.write(out, a._1)
if (resA.isSuccess) serB.write(out, a._2)
else resA
}
override val staticSize = for {
a <- serA.staticSize
b <- serB.staticSize
} yield a + b
override def dynamicSize(t: (A, B)) = if (staticSize.isDefined) staticSize
else for {
a <- serA.dynamicSize(t._1)
b <- serB.dynamicSize(t._2)
} yield a + b
}
object OrderedSerialization2 {
def maybeOrderedSerialization2[A, B](implicit ordA: Ordering[A], ordB: Ordering[B]): Ordering[(A, B)] = {
(ordA, ordB) match {
case (ordA: OrderedSerialization[_], ordB: OrderedSerialization[_]) =>
new OrderedSerialization2(ordA.asInstanceOf[OrderedSerialization[A]], ordB.asInstanceOf[OrderedSerialization[B]])
case _ => Ordering.Tuple2(ordA, ordB)
}
}
}
class OrderedSerialization2[A, B](val ordA: OrderedSerialization[A],
val ordB: OrderedSerialization[B]) extends Serialization2[A, B](ordA, ordB) with OrderedSerialization[(A, B)] {
override def compare(x: (A, B), y: (A, B)) = {
val ca = ordA.compare(x._1, y._1)
if (ca != 0) ca
else ordB.compare(x._2, y._2)
}
override def compareBinary(a: InputStream, b: InputStream) = {
// This mutates the buffers and advances them. Only keep reading if they are different
val cA = ordA.compareBinary(a, b)
// we have to read the second ones to skip
val cB = ordB.compareBinary(a, b)
cA match {
case OrderedSerialization.Equal => cB
case f @ OrderedSerialization.CompareFailure(_) => f
case _ => cA // the first is not equal
}
}
}
| tglstory/scalding | scalding-serialization/src/main/scala/com/twitter/scalding/serialization/Serialization2.scala | Scala | apache-2.0 | 2,998 |
package com.lot.exchange
import scala.collection.mutable.ListBuffer
import com.lot.order.model.Order
import com.lot.order.model.OrderType
import com.lot.order.dao.OrderDao
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.duration.Duration
import scala.concurrent.Await
/**
* Manages all the unfilled orders for a given security
*/
class UnfilledOrderManager(val security_id: Long,
val buys: ListBuffer[Order],
val sells: ListBuffer[Order]) extends LazyLogging {
/**
* Ensures sells are sorted by price and time with market orders on top of limit order.
* Note id is used instead of time for performance
*/
private def sortSells(left: Order, right: Order) = { left.order_type > right.order_type && left.price <= right.price && left.id.get < right.id.get }
/**
* Ensures buys are sorted by price and time. Note id is used instead of time for performance
*/
private def sortBuys(left: Order, right: Order) = { left.order_type > right.order_type && left.price >= right.price && left.id.get < right.id.get }
/*
* Ensure the orders from the DB are sorted properly
*/
buys.sortWith(sortBuys)
sells.sortWith(sortSells)
logger.debug(s"buys = $buys sells = $sells")
/**
* Finds an order that matches the given order
*/
def findMatch(order: Order): Option[Order] = {
checkOrder(order)
/*
* Sometimes the order gets loaded from the DB when the OM starts
* In which case the order should be dequeued before we proceed
*/
dequeueOrder(order, true)
logger.debug(s"findMatch: $order buys: $buys sells: $sells")
if (order.unfilled_qty > 0) {
order match {
case Order(id, _, OrderType.BUY, OrderType.MARKET, user_id, _, _, _, _, _, _, _, _, _, _) =>
sells.headOption match {
case Some(head) => sells.headOption
case _ => None
}
case Order(id, _, OrderType.SELL, OrderType.MARKET, user_id, _, _, _, _, _, _, _, _, _, _) =>
buys.headOption match {
case Some(head) => buys.headOption
case _ => None
}
case Order(id, _, OrderType.BUY, OrderType.LIMIT, user_id, _, _, _, _, _, _, _, _, _, _) =>
sells.headOption match {
case Some(head) if head.order_type == OrderType.LIMIT && head.price <= order.price => sells.headOption
case Some(head) if head.order_type == OrderType.MARKET => sells.headOption
case _ => None
}
case Order(id, _, OrderType.SELL, OrderType.LIMIT, user_id, _, _, _, _, _, _, _, _, _, _) =>
buys.headOption match {
case Some(head) if head.order_type == OrderType.LIMIT && head.price >= order.price => buys.headOption
case Some(head) if head.order_type == OrderType.MARKET => buys.headOption
case _ => None
}
}
} else {
logger.debug(s"findMatch: Order with unfilled_qty = 0 not matched")
None
}
}
/**
* Modifies the unfilled quantity for the matched trades
*/
def adjustOrders(order: Order, matchedOrder: Order) = {
checkOrder(order)
checkOrder(matchedOrder)
if (order.unfilled_qty >= matchedOrder.unfilled_qty) {
/*
* Fill the entire matchedOrder. Mark the order as partially filled
*/
logger.info(s"order id ${order.id} filled with ${matchedOrder.unfilled_qty} from matchedOrder ${matchedOrder.id}")
order.setUnfilledQty(order.unfilled_qty - matchedOrder.unfilled_qty)
matchedOrder.setUnfilledQty(0)
/*
* Remove the matchedOrder from the appropriate queue, so it does not match up with new incoming orders
*/
dequeueOrder(matchedOrder)
if (order.unfilled_qty == 0) {
dequeueOrder(order)
}
} else {
/*
* Fill the entire order. Mark the matched order as partially filled
*/
logger.info(s"order id ${order.id} filled with ${order.unfilled_qty} from matchedOrder ${matchedOrder.id}")
matchedOrder.setUnfilledQty(matchedOrder.unfilled_qty - order.unfilled_qty)
order.setUnfilledQty(0)
/*
* Remove the order from the appropriate queue, so it does not match up with new incoming orders
*/
dequeueOrder(order)
if (matchedOrder.unfilled_qty == 0) {
dequeueOrder(matchedOrder)
}
}
/*
* Save the state
*/
Await.result(OrderDao.updateMatchStatus(order, matchedOrder), Duration.Inf)
logger.debug(s"buys = $buys \\nsells=$sells")
}
def dequeueOrder(order: Order, force: Boolean = false) = {
logger.info(s"De-queuing order $order")
checkOrder(order)
if (order.unfilled_qty == 0 || force) {
/*
* Dequeue order which matches the id and ensure they are sorted
*/
order match {
case Order(id, _, OrderType.BUY, _, user_id, _, _, _, _, _, _, _, _, _, _) => {
val filtered = buys.filter(_.id == order.id)
buys --= filtered
buys.sortWith(sortBuys)
logger.debug(s"filtered out $filtered")
}
case Order(id, _, OrderType.SELL, _, user_id, _, _, _, _, _, _, _, _, _, _) => {
val filtered = sells.filter(_.id == order.id)
sells --= filtered
sells.sortWith(sortSells)
logger.debug(s"filtered out $filtered")
}
}
true
} else {
logger.warn(s"De-queuing order $order failed. This order cannot be dequeued")
false
}
}
/**
* Enqueues the order into the buys or sells and ensures they are sorted
*/
def enqueOrder(order: Order) = {
logger.info(s"En-queuing order $order")
checkOrder(order)
/*
* Enqueue order and ensure they are sorted
*/
order match {
case Order(id, _, OrderType.BUY, _, user_id, _, _, _, _, _, _, _, _, _, _) => {
buys += order
buys.sortWith(sortBuys)
}
case Order(id, _, OrderType.SELL, _, user_id, _, _, _, _, _, _, _, _, _, _) => {
sells += order
sells.sortWith(sortSells)
}
}
}
/**
* Defensive programming - change later.
* Check if this order can be handled by this UOM
*/
def checkOrder(order: Order) = {
if (order.security_id != security_id) {
throw new IllegalArgumentException(s"Cannot handle $order with UnfilledOrderManager of security_id = $security_id")
}
}
}
import scala.concurrent.Await
import scala.concurrent.duration._
object UnfilledOrderManager {
/**
* Creates an instance of the UnfilledOrderManager with buys and sells lists
*/
def apply(security_id: Long) = {
val buys = new ListBuffer[Order]()
val sells = new ListBuffer[Order]()
new UnfilledOrderManager(security_id, buys, sells)
}
} | thimmaiah/life_of_a_trade_scala | src/main/scala/com/lot/exchange/UnfilledOrderManager.scala | Scala | apache-2.0 | 6,826 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.