code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.metric
import java.io.File
import scala.collection.mutable.HashMap
import org.apache.spark.TestUtils
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.SparkPlanInfo
import org.apache.spark.sql.execution.ui.{SparkPlanGraph, SQLAppStatusStore}
import org.apache.spark.sql.internal.SQLConf.WHOLESTAGE_CODEGEN_ENABLED
import org.apache.spark.sql.test.SQLTestUtils
trait SQLMetricsTestUtils extends SQLTestUtils {
import testImplicits._
protected def currentExecutionIds(): Set[Long] = {
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
statusStore.executionsList.map(_.executionId).toSet
}
protected def statusStore: SQLAppStatusStore = spark.sharedState.statusStore
// Pattern of size SQLMetric value, e.g. "\\n96.2 MiB (32.1 MiB, 32.1 MiB, 32.1 MiB (stage 0.0:
// task 4))" OR "\\n96.2 MiB (32.1 MiB, 32.1 MiB, 32.1 MiB)"
protected val sizeMetricPattern = {
val bytes = "([0-9]+(\\\\.[0-9]+)?) (EiB|PiB|TiB|GiB|MiB|KiB|B)"
val maxMetrics = "\\\\(stage ([0-9])+\\\\.([0-9])+\\\\: task ([0-9])+\\\\)"
s"(.*\\\\n$bytes \\\\($bytes, $bytes, $bytes( $maxMetrics)?\\\\))|($bytes)"
}
// Pattern of timing SQLMetric value, e.g. "\\n2.0 ms (1.0 ms, 1.0 ms, 1.0 ms (stage 3.0):
// task 217))" OR "\\n2.0 ms (1.0 ms, 1.0 ms, 1.0 ms)" OR "1.0 ms"
protected val timingMetricPattern = {
val duration = "([0-9]+(\\\\.[0-9]+)?) (ms|s|m|h)"
val maxMetrics = "\\\\(stage ([0-9])+\\\\.([0-9])+\\\\: task ([0-9])+\\\\)"
s"(.*\\\\n$duration \\\\($duration, $duration, $duration( $maxMetrics)?\\\\))|($duration)"
}
// Pattern of size SQLMetric value for Aggregate tests.
// e.g "\\n(1, 1, 0.9 (stage 1.0: task 8)) OR "\\n(1, 1, 0.9 )" OR "1"
protected val aggregateMetricsPattern = {
val iters = "([0-9]+(\\\\.[0-9]+)?)"
val maxMetrics = "\\\\(stage ([0-9])+\\\\.([0-9])+\\\\: task ([0-9])+\\\\)"
s"(.*\\\\n\\\\($iters, $iters, $iters( $maxMetrics)?\\\\))|($iters)"
}
/**
* Get execution metrics for the SQL execution and verify metrics values.
*
* @param metricsValues the expected metric values (numFiles, numPartitions, numOutputRows).
* @param func the function can produce execution id after running.
*/
private def verifyWriteDataMetrics(metricsValues: Seq[Int])(func: => Unit): Unit = {
val previousExecutionIds = currentExecutionIds()
// Run the given function to trigger query execution.
func
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
val executionIds = currentExecutionIds().diff(previousExecutionIds)
assert(executionIds.size == 1)
val executionId = executionIds.head
val executedNode = statusStore.planGraph(executionId).nodes.head
val metricsNames = Seq(
"number of written files",
"number of dynamic part",
"number of output rows")
val metrics = statusStore.executionMetrics(executionId)
metricsNames.zip(metricsValues).foreach { case (metricsName, expected) =>
val sqlMetric = executedNode.metrics.find(_.name == metricsName)
assert(sqlMetric.isDefined)
val accumulatorId = sqlMetric.get.accumulatorId
val metricValue = metrics(accumulatorId).replaceAll(",", "").toInt
assert(metricValue == expected)
}
val totalNumBytesMetric = executedNode.metrics.find(
_.name == "written output").get
val totalNumBytes = metrics(totalNumBytesMetric.accumulatorId).replaceAll(",", "")
.split(" ").head.trim.toDouble
assert(totalNumBytes > 0)
}
protected def testMetricsNonDynamicPartition(
dataFormat: String,
tableName: String): Unit = {
withTable(tableName) {
Seq((1, 2)).toDF("i", "j")
.write.format(dataFormat).mode("overwrite").saveAsTable(tableName)
val tableLocation =
new File(spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName)).location)
// 2 files, 100 rows, 0 dynamic partition.
verifyWriteDataMetrics(Seq(2, 0, 100)) {
(0 until 100).map(i => (i, i + 1)).toDF("i", "j").repartition(2)
.write.format(dataFormat).mode("overwrite").insertInto(tableName)
}
assert(TestUtils.recursiveList(tableLocation).count(_.getName.startsWith("part-")) == 2)
}
}
protected def testMetricsDynamicPartition(
provider: String,
dataFormat: String,
tableName: String): Unit = {
withTable(tableName) {
withTempPath { dir =>
spark.sql(
s"""
|CREATE TABLE $tableName(a int, b int)
|USING $provider
|PARTITIONED BY(a)
|LOCATION '${dir.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
val df = spark.range(start = 0, end = 40, step = 1, numPartitions = 1)
.selectExpr("id a", "id b")
// 40 files, 80 rows, 40 dynamic partitions.
verifyWriteDataMetrics(Seq(40, 40, 80)) {
df.union(df).repartition(2, $"a")
.write
.format(dataFormat)
.mode("overwrite")
.insertInto(tableName)
}
assert(TestUtils.recursiveList(dir).count(_.getName.startsWith("part-")) == 40)
}
}
}
/**
* Call `df.collect()` and collect necessary metrics from execution data.
*
* @param df `DataFrame` to run
* @param expectedNumOfJobs number of jobs that will run
* @param expectedNodeIds the node ids of the metrics to collect from execution data.
* @param enableWholeStage enable whole-stage code generation or not.
*/
protected def getSparkPlanMetrics(
df: DataFrame,
expectedNumOfJobs: Int,
expectedNodeIds: Set[Long],
enableWholeStage: Boolean = false): Option[Map[Long, (String, Map[String, Any])]] = {
val previousExecutionIds = currentExecutionIds()
withSQLConf(WHOLESTAGE_CODEGEN_ENABLED.key -> enableWholeStage.toString) {
df.collect()
}
sparkContext.listenerBus.waitUntilEmpty(10000)
val executionIds = currentExecutionIds().diff(previousExecutionIds)
assert(executionIds.size === 1)
val executionId = executionIds.head
val jobs = statusStore.execution(executionId).get.jobs
// Use "<=" because there is a race condition that we may miss some jobs
// TODO Change it to "=" once we fix the race condition that missing the JobStarted event.
assert(jobs.size <= expectedNumOfJobs)
if (jobs.size == expectedNumOfJobs) {
// If we can track all jobs, check the metric values
val metricValues = statusStore.executionMetrics(executionId)
val metrics = SparkPlanGraph(SparkPlanInfo.fromSparkPlan(
df.queryExecution.executedPlan)).allNodes.filter { node =>
expectedNodeIds.contains(node.id)
}.map { node =>
val nodeMetrics = node.metrics.map { metric =>
val metricValue = metricValues(metric.accumulatorId)
(metric.name, metricValue)
}.toMap
(node.id, node.name -> nodeMetrics)
}.toMap
Some(metrics)
} else {
// TODO Remove this "else" once we fix the race condition that missing the JobStarted event.
// Since we cannot track all jobs, the metric values could be wrong and we should not check
// them.
logWarning("Due to a race condition, we miss some jobs and cannot verify the metric values")
None
}
}
/**
* Call `df.collect()` and verify if the collected metrics are same as "expectedMetrics".
*
* @param df `DataFrame` to run
* @param expectedNumOfJobs number of jobs that will run
* @param expectedMetrics the expected metrics. The format is
* `nodeId -> (operatorName, metric name -> metric value)`.
*/
protected def testSparkPlanMetrics(
df: DataFrame,
expectedNumOfJobs: Int,
expectedMetrics: Map[Long, (String, Map[String, Any])],
enableWholeStage: Boolean = false): Unit = {
val expectedMetricsPredicates = expectedMetrics.mapValues { case (nodeName, nodeMetrics) =>
(nodeName, nodeMetrics.mapValues(expectedMetricValue =>
(actualMetricValue: Any) => {
actualMetricValue.toString.matches(expectedMetricValue.toString)
}).toMap)
}
testSparkPlanMetricsWithPredicates(df, expectedNumOfJobs, expectedMetricsPredicates.toMap,
enableWholeStage)
}
/**
* Call `df.collect()` and verify if the collected metrics satisfy the specified predicates.
* @param df `DataFrame` to run
* @param expectedNumOfJobs number of jobs that will run
* @param expectedMetricsPredicates the expected metrics predicates. The format is
* `nodeId -> (operatorName, metric name -> metric predicate)`.
* @param enableWholeStage enable whole-stage code generation or not.
*/
protected def testSparkPlanMetricsWithPredicates(
df: DataFrame,
expectedNumOfJobs: Int,
expectedMetricsPredicates: Map[Long, (String, Map[String, Any => Boolean])],
enableWholeStage: Boolean = false): Unit = {
val optActualMetrics =
getSparkPlanMetrics(df, expectedNumOfJobs, expectedMetricsPredicates.keySet, enableWholeStage)
optActualMetrics.foreach { actualMetrics =>
assert(expectedMetricsPredicates.keySet === actualMetrics.keySet)
for ((nodeId, (expectedNodeName, expectedMetricsPredicatesMap))
<- expectedMetricsPredicates) {
val (actualNodeName, actualMetricsMap) = actualMetrics(nodeId)
assert(expectedNodeName === actualNodeName)
for ((metricName, metricPredicate) <- expectedMetricsPredicatesMap) {
assert(metricPredicate(actualMetricsMap(metricName)),
s"$nodeId / '$metricName' (= ${actualMetricsMap(metricName)}) did not match predicate.")
}
}
}
}
}
object InputOutputMetricsHelper {
private class InputOutputMetricsListener extends SparkListener {
private case class MetricsResult(
var recordsRead: Long = 0L,
var shuffleRecordsRead: Long = 0L,
var sumMaxOutputRows: Long = 0L)
private[this] val stageIdToMetricsResult = HashMap.empty[Int, MetricsResult]
def reset(): Unit = {
stageIdToMetricsResult.clear()
}
/**
* Return a list of recorded metrics aggregated per stage.
*
* The list is sorted in the ascending order on the stageId.
* For each recorded stage, the following tuple is returned:
* - sum of inputMetrics.recordsRead for all the tasks in the stage
* - sum of shuffleReadMetrics.recordsRead for all the tasks in the stage
* - sum of the highest values of "number of output rows" metric for all the tasks in the stage
*/
def getResults(): List[(Long, Long, Long)] = {
stageIdToMetricsResult.keySet.toList.sorted.map { stageId =>
val res = stageIdToMetricsResult(stageId)
(res.recordsRead, res.shuffleRecordsRead, res.sumMaxOutputRows)
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = synchronized {
val res = stageIdToMetricsResult.getOrElseUpdate(taskEnd.stageId, MetricsResult())
res.recordsRead += taskEnd.taskMetrics.inputMetrics.recordsRead
res.shuffleRecordsRead += taskEnd.taskMetrics.shuffleReadMetrics.recordsRead
var maxOutputRows = 0L
for (accum <- taskEnd.taskMetrics.externalAccums) {
val info = accum.toInfo(Some(accum.value), None)
if (info.name.toString.contains("number of output rows")) {
info.update match {
case Some(n: Number) =>
if (n.longValue() > maxOutputRows) {
maxOutputRows = n.longValue()
}
case _ => // Ignore.
}
}
}
res.sumMaxOutputRows += maxOutputRows
}
}
// Run df.collect() and return aggregated metrics for each stage.
def run(df: DataFrame): List[(Long, Long, Long)] = {
val spark = df.sparkSession
val sparkContext = spark.sparkContext
val listener = new InputOutputMetricsListener()
sparkContext.addSparkListener(listener)
try {
sparkContext.listenerBus.waitUntilEmpty(5000)
listener.reset()
df.collect()
sparkContext.listenerBus.waitUntilEmpty(5000)
} finally {
sparkContext.removeSparkListener(listener)
}
listener.getResults()
}
}
| dbtsai/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsTestUtils.scala | Scala | apache-2.0 | 13,294 |
package com.scalaAsm.x86
package Instructions
package x87
// Description: IEEE Partial Remainder
// Category: general/arith
trait FPREM1 extends InstructionDefinition {
val mnemonic = "FPREM1"
}
object FPREM1 extends ZeroOperands[FPREM1] with FPREM1Impl
trait FPREM1Impl extends FPREM1 {
implicit object _0 extends NoOp{
val opcode: OneOpcode = 0xD9 /+ 6
override def hasImplicitOperand = true
}
}
| bdwashbu/scala-x86-inst | src/main/scala/com/scalaAsm/x86/Instructions/x87/FPREM1.scala | Scala | apache-2.0 | 420 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.web.components
import com.normation.cfclerk.domain.Technique
import com.normation.cfclerk.services.TechniqueRepository
import com.normation.rudder.domain.policies._
import com.normation.rudder.domain.nodes.NodeGroupId
import com.normation.rudder.domain.policies._
import com.normation.rudder.domain.eventlog.RudderEventActor
import com.normation.rudder.domain.reports._
import com.normation.rudder.domain.nodes.NodeInfo
import com.normation.rudder.repository._
import com.normation.rudder.services.reports.ReportingService
import com.normation.rudder.services.nodes.NodeInfoService
import net.liftweb.http.js._
import JsCmds._
import com.normation.inventory.domain.NodeId
import JE._
import net.liftweb.common._
import net.liftweb.http._
import scala.xml._
import net.liftweb.util._
import net.liftweb.util.Helpers._
import com.normation.rudder.web.model._
import com.normation.utils.StringUuidGenerator
import com.normation.exceptions.TechnicalException
import com.normation.utils.Control.sequence
import com.normation.utils.HashcodeCaching
import com.normation.eventlog.ModificationId
import bootstrap.liftweb.RudderConfig
import net.liftweb.json.JArray
import net.liftweb.json.JsonParser
import net.liftweb.json.JString
import net.liftweb.json.JObject
import net.liftweb.json.JField
import net.liftweb.http.js.JE.JsArray
import com.normation.rudder.web.services.JsTableLine
import com.normation.rudder.web.services.JsTableData
import net.liftweb.http.js.JE.AnonFunc
import com.normation.rudder.web.services.NodeComplianceLine
import org.joda.time.DateTime
import org.joda.time.Interval
import com.normation.rudder.services.reports.NodeChanges
object RuleGrid {
def staticInit =
<head>
<style type="text/css">
#actions_zone , .dataTables_length , .dataTables_filter {{ display: inline-block; }}
.greenCompliance {{ background-color: #CCFFCC }}
.orangeCompliance {{ background-color: #FFBB66 }}
.redCompliance {{ background-color: #FF6655 }}
.noCompliance {{ background-color:#BBAAAA; }}
.applyingCompliance {{ background-color:#CCCCCC; }}
.compliance {{ text-align: center; }}
.statusCell {{font-weight:bold}}
</style>
</head>
}
class RuleGrid(
htmlId_rulesGridZone : String
, rules : Seq[Rule]
//JS callback to call when clicking on a line
, detailsCallbackLink : Option[(Rule,String) => JsCmd]
, showCheckboxColumn:Boolean = true
, directiveApplication : Option[DirectiveApplicationManagement] = None
) extends DispatchSnippet with Loggable {
private[this] sealed trait Line { val rule:Rule }
private[this] case class OKLine(
rule : Rule
, compliance : Option[ComplianceLevel]
, applicationStatus: ApplicationStatus
, trackerVariables : Seq[(Directive,ActiveTechnique,Technique)]
, targets : Set[RuleTargetInfo]
) extends Line with HashcodeCaching
private[this] case class ErrorLine(
rule:Rule
, trackerVariables: Box[Seq[(Directive,ActiveTechnique,Technique)]]
, targets:Box[Set[RuleTargetInfo]]
) extends Line with HashcodeCaching
private[this] val getFullNodeGroupLib = RudderConfig.roNodeGroupRepository.getFullGroupLibrary _
private[this] val getFullDirectiveLib = RudderConfig.roDirectiveRepository.getFullDirectiveLibrary _
private[this] val getRuleApplicationStatus = RudderConfig.ruleApplicationStatus.isApplied _
private[this] val recentChanges = RudderConfig.recentChangesService
private[this] val reportingService = RudderConfig.reportingService
private[this] val getAllNodeInfos = RudderConfig.nodeInfoService.getAll _
private[this] val techniqueRepository = RudderConfig.techniqueRepository
private[this] val categoryRepository = RudderConfig.roRuleCategoryRepository
private[this] val categoryService = RudderConfig.ruleCategoryService
//used to error tempering
private[this] val roRuleRepository = RudderConfig.roRuleRepository
private[this] val woRuleRepository = RudderConfig.woRuleRepository
private[this] val uuidGen = RudderConfig.stringUuidGenerator
///// local variables /////
private[this] val htmlId_rulesGridId = "grid_" + htmlId_rulesGridZone
private[this] val htmlId_reportsPopup = "popup_" + htmlId_rulesGridZone
private[this] val htmlId_modalReportsPopup = "modal_" + htmlId_rulesGridZone
private[this] val htmlId_rulesGridWrapper = htmlId_rulesGridId + "_wrapper"
private[this] val tableId_reportsPopup = "popupReportsGrid"
def templatePath = List("templates-hidden", "reports_grid")
def template() = Templates(templatePath) match {
case Empty | Failure(_,_,_) =>
throw new TechnicalException("Template for report grid not found. I was looking for %s.html".format(templatePath.mkString("/")))
case Full(n) => n
}
def reportTemplate = chooseTemplate("reports", "report", template)
def dispatch = {
case "rulesGrid" => { _:NodeSeq => rulesGrid()}
}
def jsVarNameForId(tableId:String) = "oTable" + tableId
def rulesGridWithUpdatedInfo(popup: Boolean = false, linkCompliancePopup:Boolean = true) = {
rulesGrid(popup, linkCompliancePopup)
}
def selectAllVisibleRules(status : Boolean) : JsCmd= {
directiveApplication match {
case Some(directiveApp) =>
def moveCategory(arg: String) : JsCmd = {
//parse arg, which have to be json object with sourceGroupId, destCatId
try {
(for {
JObject(JField("rules",JArray(childs)) :: Nil) <- JsonParser.parse(arg)
JString(ruleid) <- childs
} yield {
RuleId(ruleid)
}) match {
case ruleIds =>
directiveApp.checkRules(ruleIds,status) match {
case DirectiveApplicationResult(rules,completeCategories,indeterminate) =>
After(TimeSpan(50),JsRaw(s"""
${rules.map(c => s"""$$('#${c.value}Checkbox').prop("checked",${status}); """).mkString("\\n")}
${completeCategories.map(c => s"""$$('#${c.value}Checkbox').prop("indeterminate",false); """).mkString("\\n")}
${completeCategories.map(c => s"""$$('#${c.value}Checkbox').prop("checked",${status}); """).mkString("\\n")}
${indeterminate.map(c => s"""$$('#${c.value}Checkbox').prop("indeterminate",true); """).mkString("\\n")}
"""))
}
}
} catch {
case e:Exception => Alert("Error while trying to move group :"+e)
}
}
JsRaw(s"""
var rules = $$.map($$('#grid_rules_grid_zone tr.tooltipabletr'), function(v,i) {return $$(v).prop("id")});
var rulesIds = JSON.stringify({ "rules" : rules });
${SHtml.ajaxCall(JsVar("rulesIds"), moveCategory _)};
""")
case None => Noop
}
}
// Build refresh function for Rule grid
def refresh(popup:Boolean = false, linkCompliancePopup:Boolean = true) = {
AnonFunc(SHtml.ajaxCall(JsNull, (s) => {
( for {
rules <- roRuleRepository.getAll(false)
nodeInfo = getAllNodeInfos()
groupLib = getFullNodeGroupLib()
directiveLib = getFullDirectiveLib()
changes = recentChanges.getChangesByInterval()
newData <- getRulesTableData(popup,rules,linkCompliancePopup, nodeInfo, groupLib, directiveLib, changes)
} yield {
JsRaw(s"""refreshTable("${htmlId_rulesGridId}", ${newData.json.toJsCmd});""")
}
) match {
case Full(cmd) => cmd
case eb:EmptyBox =>
val fail = eb ?~! ("an error occured during data update")
logger.error(s"Could not refresh Rule table data cause is: ${fail.msg}")
JsRaw(s"""$$("#ruleTableError").text("Could not refresh Rule table data cause is: ${fail.msg}");""")
}
} ) )
}
def rulesGrid(
popup : Boolean = false
, linkCompliancePopup:Boolean = true
) : NodeSeq = {
val allcheckboxCallback = AnonFunc("checked",SHtml.ajaxCall(JsVar("checked"), (in : String) => selectAllVisibleRules(in.toBoolean)))
val onLoad =
s"""createRuleTable (
"${htmlId_rulesGridId}"
, []
, ${showCheckboxColumn}
, ${popup}
, ${allcheckboxCallback.toJsCmd}
, "${S.contextPath}"
, ${refresh(popup, linkCompliancePopup).toJsCmd}
);
createTooltip();
createTooltiptr();
$$('#${htmlId_rulesGridWrapper}').css("margin","10px 0px 0px 0px");
"""
<div id={htmlId_rulesGridZone}>
<span class="error" id="ruleTableError"></span>
<div id={htmlId_modalReportsPopup} class="nodisplay">
<div id={htmlId_reportsPopup} ></div>
</div>
<table id={htmlId_rulesGridId} class="display" cellspacing="0"> </table>
<div class={htmlId_rulesGridId +"_pagination, paginatescala"} >
<div id={htmlId_rulesGridId +"_paginate_area"}></div>
</div>
</div> ++
Script(OnLoad(JsRaw(onLoad)))
}
/*
* Get Data to use in the datatable for all Rules
* First: transform all Rules to Lines
* Second: Transform all of those lines into javascript datas to send in the function
*/
private[this] def getRulesTableData(
popup:Boolean
, rules:Seq[Rule]
, linkCompliancePopup:Boolean
, allNodeInfos : Box[Map[NodeId, NodeInfo]]
, groupLib : Box[FullNodeGroupCategory]
, directiveLib : Box[FullActiveTechniqueCategory]
, recentChanges: Box[Map[Interval,Seq[ResultRepairedReport]]]
) : Box[JsTableData[RuleLine]] = {
for {
directivesLib <- directiveLib
groupsLib <- groupLib
nodes <- allNodeInfos
changes <- recentChanges
} yield {
val lines = for {
line <- convertRulesToLines(directivesLib, groupsLib, nodes, rules.toList)
} yield {
getRuleData(line, groupsLib, nodes, changes)
}
JsTableData(lines)
}
}
/*
* Convert Rules to Data used in Datatables Lines
*/
private[this] def convertRulesToLines (
directivesLib: FullActiveTechniqueCategory
, groupsLib : FullNodeGroupCategory
, nodes : Map[NodeId, NodeInfo]
, rules : List[Rule]
) : List[Line] = {
// we compute beforehand the compliance, so that we have a single big query
// to the database
val complianceMap = computeCompliances(nodes.keySet, rules.map( _.id).toSet)
rules.map { rule =>
val trackerVariables: Box[Seq[(Directive, ActiveTechnique, Technique)]] = {
sequence(rule.directiveIds.toSeq) { id =>
directivesLib.allDirectives.get(id) match {
case Some((activeTechnique, directive)) =>
techniqueRepository.getLastTechniqueByName(activeTechnique.techniqueName) match {
case None =>
Failure(s"Can not find Technique for activeTechnique with name ${activeTechnique.techniqueName} referenced in Rule with ID ${rule.id.value}")
case Some(technique) =>
Full((directive, activeTechnique.toActiveTechnique, technique))
}
case None => //it's an error if the directive ID is defined and found but it is not attached to an activeTechnique
val error = Failure(s"Can not find Directive with ID '${id.value}' referenced in Rule with ID '${rule.id.value}'")
logger.debug(error.messageChain, error)
error
}
}
}
val targetsInfo = sequence(rule.targets.toSeq) {
case json:CompositeRuleTarget =>
val ruleTargetInfo = RuleTargetInfo(json,"","",true,false)
Full(ruleTargetInfo)
case target =>
groupsLib.allTargets.get(target) match {
case Some(t) =>
Full(t.toTargetInfo)
case None =>
Failure(s"Can not find full information for target '${target}' referenced in Rule with ID '${rule.id.value}'")
}
}.map(x => x.toSet)
(trackerVariables, targetsInfo) match {
case (Full(seq), Full(targets)) =>
val applicationStatus = getRuleApplicationStatus(rule, groupsLib, directivesLib, nodes)
val compliance = applicationStatus match {
case _:NotAppliedStatus =>
Full(None)
case _ =>
complianceMap match {
case eb: EmptyBox => eb ?~! "Error when getting the compliance of rules"
case Full(cm) => cm.get(rule.id) match {
case None =>
logger.debug(s"Error when getting compliance for Rule ${rule.name}")
// if we can't find the compliance, it is most likely that the promises are not generated yet,
// so we say it is pending
Full(Some(ComplianceLevel(pending = 1)))
case s@Some(_) => Full(s)
}
}
}
compliance match {
case e:EmptyBox =>
logger.error(e)
ErrorLine(rule, trackerVariables, targetsInfo)
case Full(value) =>
OKLine(rule, value, applicationStatus, seq, targets)
}
case (x,y) =>
if(rule.isEnabledStatus) {
//the Rule has some error, try to disable it
//and be sure to not get a Rules from a modification pop-up, because we don't want to commit changes along
//with the disable.
//it's only a try, so it may fails, we won't try again
( for {
r <- roRuleRepository.get(rule.id)
_ <- woRuleRepository.update(
r.copy(isEnabledStatus=false)
, ModificationId(uuidGen.newUuid)
, RudderEventActor
, Some("Rule automatically disabled because it contains error (bad target or bad directives)")
)
} yield {
logger.warn(s"Disabling rule '${rule.name}' (ID: '${rule.id.value}') because it refers missing objects. Go to rule's details and save, then enable it back to correct the problem.")
x match {
case f: Failure =>
logger.warn(s"Rule '${rule.name}' (ID: '${rule.id.value}' directive problem: " + f.messageChain)
case _ => // Directive Ok!
}
y match {
case f: Failure =>
logger.warn(s"Rule '${rule.name}' (ID: '${rule.id.value}' target problem: " + f.messageChain)
case _ => // Group Ok!
}
} ) match {
case eb: EmptyBox =>
val e = eb ?~! s"Error when to trying to disable the rule '${rule.name}' (ID: '${rule.id.value}') because it's data are unconsistant."
logger.warn(e.messageChain)
e.rootExceptionCause.foreach { ex =>
logger.warn("Exception was: ", ex)
}
case _ => //ok
}
}
ErrorLine(rule, x, y)
}
}
}
/*
* Generates Data for a line of the table
*/
private[this] def getRuleData (
line:Line
, groupsLib: FullNodeGroupCategory
, nodes: Map[NodeId, NodeInfo]
, changes: Map[Interval,Seq[ResultRepairedReport]]
) : RuleLine = {
// Status is the state of the Rule, defined as a string
// reasons are the the reasons why a Rule is disabled
val (status,reasons) : (String,Option[String]) =
line match {
case line : OKLine =>
line.applicationStatus match {
case FullyApplied => ("In application",None)
case PartiallyApplied(seq) =>
val why = seq.map { case (at, d) => "Directive " + d.name + " disabled" }.mkString(", ")
("Partially applied", Some(why))
case x:NotAppliedStatus =>
val isAllTargetsEnabled = line.targets.filter(t => !t.isEnabled).isEmpty
val nodeSize = groupsLib.getNodeIds(line.rule.targets, nodes).size
val conditions = {
Seq( ( line.rule.isEnabled , "Rule disabled" )
, ( line.trackerVariables.size > 0 , "No policy defined")
, ( isAllTargetsEnabled , "Group disabled")
, ( nodeSize!=0 , "Empty groups")
) ++
line.trackerVariables.flatMap {
case (directive, activeTechnique,_) =>
Seq( ( directive.isEnabled , "Directive " + directive.name + " disabled")
, ( activeTechnique.isEnabled, "Technique for '" + directive.name + "' disabled")
)
}
}
val why = conditions.collect { case (ok, label) if(!ok) => label }.mkString(", ")
("Not applied", Some(why))
}
case _ : ErrorLine => ("N/A",None)
}
// Compliance percent and class to apply to the td
val compliancePercent = {
line match {
case line : OKLine => line.compliance match {
case None => ComplianceLevel()
case Some(c) => c
}
case _ => ComplianceLevel()
}
}
// Is the ruple applying a Directive and callback associated to the checkbox
val (applying,checkboxCallback) = {
directiveApplication match {
case Some(directiveApplication) =>
def check(value : Boolean) : JsCmd= {
directiveApplication.checkRule(line.rule.id, value) match {
case DirectiveApplicationResult(rules,completeCategories,indeterminate) =>
JsRaw(s"""
${completeCategories.map(c => s"""$$('#${c.value}Checkbox').prop("indeterminate",false); """).mkString("\\n")}
${completeCategories.map(c => s"""$$('#${c.value}Checkbox').prop("checked",${value}); """).mkString("\\n")}
${indeterminate.map(c => s"""$$('#${c.value}Checkbox').prop("indeterminate",true); """).mkString("\\n")}
""")
}
}
val isApplying = line.rule.directiveIds.contains(directiveApplication.directive.id)
val ajax = SHtml.ajaxCall(JsVar("checked"), bool => check (bool.toBoolean))
val callback = AnonFunc("checked",ajax)
(isApplying,Some(callback))
case None => (false,None)
}
}
// Css to add to the whole line
val cssClass = {
val disabled = if (line.rule.isEnabled) {
""
} else {
"disabledRule"
}
val error = line match {
case _:ErrorLine => " error"
case _ => ""
}
s"tooltipabletr ${disabled} ${error}"
}
val category = categoryService.shortFqdn(line.rule.categoryId).getOrElse("Error")
// Callback to use on links, parameter define the tab to open "showForm" for compliance, "showEditForm" to edit form
val callback = for {
callback <- detailsCallbackLink
ajax = SHtml.ajaxCall(JsVar("action"), (s: String) => callback(line.rule,s))
} yield {
AnonFunc("action",ajax)
}
RuleLine (
line.rule.name
, line.rule.id
, line.rule.shortDescription
, applying
, category
, status
, compliancePercent
, changes
, cssClass
, callback
, checkboxCallback
, reasons
)
}
private[this] def computeCompliances(nodeIds: Set[NodeId], ruleIds: Set[RuleId]) : Box[Map[RuleId, ComplianceLevel]] = {
for {
reports <- reportingService.findRuleNodeStatusReports(nodeIds, ruleIds)
} yield {
reports.groupBy( _.ruleId ).map { case (ruleId, nodeReports) =>
(
ruleId
//BE CAREFUL: nodeReports is a SET - and it's likelly that
//some compliance will be equals
, ComplianceLevel.sum(nodeReports.toSeq.map(_.compliance))
)
}
}
}
}
/*
* Javascript object containing all data to create a line in the DataTable
* { "name" : Rule name [String]
* , "id" : Rule id [String]
* , "description" : Rule (short) description [String]
* , "applying": Is the rule applying the Directive, used in Directive page [Boolean]
* , "category" : Rule category [String]
* , "status" : Status of the Rule, "enabled", "disabled" or "N/A" [String]
* , "compliance" : Percent of compliance of the Rule [String]
* , "recentChanges" : Array of changes to build the sparkline [Array[String]]
* , "trClass" : Class to apply on the whole line (disabled ?) [String]
* , "callback" : Function to use when clicking on one of the line link, takes a parameter to define which tab to open, not always present[ Function ]
* , "checkboxCallback": Function used when clicking on the checkbox to apply/not apply the Rule to the directive, not always present [ Function ]
* , "reasons": Reasons why a Rule is a not applied, empty if there is no reason [ String ]
* }
*/
case class RuleLine (
name : String
, id : RuleId
, description : String
, applying : Boolean
, category : String
, status : String
, compliance : ComplianceLevel
, recentChanges : Map[Interval,Seq[ResultRepairedReport]]
, trClass : String
, callback : Option[AnonFunc]
, checkboxCallback : Option[AnonFunc]
, reasons : Option[String]
) extends JsTableLine {
/* Would love to have a reflexive way to generate that map ... */
override val json = {
val reasonField = reasons.map(r => ( "reasons" -> Str(r)))
val cbCallbackField = checkboxCallback.map(cb => ( "checkboxCallback" -> cb))
val callbackField = callback.map(cb => ( "callback" -> cb))
val optFields : Seq[(String,JsExp)]= reasonField.toSeq ++ cbCallbackField ++ callbackField
val changes = NodeChanges.changesOnRule(id)(recentChanges)
val base = JsObj(
( "name", name )
, ( "id", id.value )
, ( "description", description )
, ( "applying", applying )
, ( "category", category )
, ( "status", status )
, ( "compliance", jsCompliance(compliance) )
, ( "recentChanges", NodeChanges.json(changes) )
, ( "trClass", trClass )
)
base +* JsObj(optFields:_*)
}
}
| Kegeruneku/rudder | rudder-web/src/main/scala/com/normation/rudder/web/components/RuleGrid.scala | Scala | agpl-3.0 | 24,485 |
package org.scalameta.paradise
package typechecker
trait Namers { self: AnalyzerPlugins =>
import global._
import analyzer._
import definitions._
import scala.reflect.internal.Flags._
import analyzer.{Namer => NscNamer}
def mkNamer(namer0: NscNamer) = new { val namer: NscNamer = namer0 } with Namer with Expander
trait Namer { self: Namer with Expander =>
val namer: NscNamer
import namer._
val namerErrorGen = new ErrorGen(namer.typer)
import namerErrorGen._
import NamerErrorGen._
def enterSym(tree: Tree): Context = {
def dispatch() = {
var returnContext = namer.context
tree match {
case DocDef(_, mdef) =>
enterSym(mdef)
case tree @ Import(_, _) =>
createAssignAndEnterSymbol(tree)
finishSymbol(tree)
returnContext = context.make(tree)
case tree: MemberDef =>
createAssignAndEnterSymbol(tree)
finishSymbol(tree)
case _ =>
}
returnContext
}
tree.symbol match {
case NoSymbol =>
try dispatch()
catch typeErrorHandler(tree, namer.context)
case sym => enterExistingSym(sym, tree)
}
}
def createAssignAndEnterSymbol(tree: Tree, mask: Long = -1L): Symbol = {
def coreCreateAssignAndEnterSymbol = {
val sym = tree match {
case PackageDef(pid, _) =>
createPackageSymbol(tree.pos, pid) // package symbols are entered elsewhere
case imp: Import =>
createImportSymbol(imp) // import symbols are dummies, no need to enter them anywhere
case mdef: MemberDef =>
enterInScope(setPrivateWithin(mdef, createMemberSymbol(mdef, mdef.name, mask)))
case _ => abort("Unexpected tree: " + tree)
}
if (isPastTyper) sym.name.toTermName match {
case nme.IMPORT | nme.OUTER | nme.ANON_CLASS_NAME | nme.ANON_FUN_NAME |
nme.CONSTRUCTOR =>
()
case _ =>
tree match {
case md: DefDef => log("[+symbol] " + sym.debugLocationString)
case _ =>
}
}
tree.symbol = sym
sym
}
def deriveSymbolFromSource(tree: Tree)(pf: PartialFunction[Tree, Symbol]): Symbol = {
val sym = pf(tree)
// can't do this in coreCreateAssignAndEnterSymbol
// because then we won't get to update sources for redefinitions
// this might be crucial when we have classfiles of the definition we're currently compiling
attachSource(sym, tree)
sym
}
deriveSymbolFromSource(tree) {
case tree @ ClassDef(mods, name, _, _) =>
val existing = context.scope.lookup(name)
val isRedefinition = (
existing.isType
&& existing.isTopLevel
&& context.scope == existing.owner.info.decls
&& (
currentRun.canRedefine(existing) ||
isExpanded(existing)
)
)
val clazz: Symbol = {
if (isRedefinition) {
updatePosFlags(existing, tree.pos, mods.flags)
setPrivateWithin(tree, existing)
clearRenamedCaseAccessors(existing)
tree.symbol = existing
existing
} else coreCreateAssignAndEnterSymbol setFlag inConstructorFlag
}
if (clazz.isClass && clazz.isTopLevel) {
if (clazz.sourceFile != null && clazz.sourceFile != contextFile)
devWarning(s"Source file mismatch in $clazz: ${clazz.sourceFile} vs. $contextFile")
clazz.associatedFile = contextFile
if (clazz.sourceFile != null) {
assert(
currentRun.canRedefine(clazz) || clazz.sourceFile == currentRun.symSource(clazz),
clazz.sourceFile)
currentRun.symSource(clazz) = clazz.sourceFile
}
registerTopLevelSym(clazz)
assert(clazz.name.toString.indexOf('(') < 0, clazz.name) // )
}
clazz
case tree @ ModuleDef(mods, name, _) =>
var m: Symbol = context.scope lookupModule name
val moduleFlags = mods.flags | MODULE
// TODO: inCurrentScope(m) check that's present in vanilla Namer is omitted here
// this fixes SI-3772, but may break something else - I didn't have time to look into that
if (m.isModule && !m.hasPackageFlag && (currentRun.canRedefine(m) || m.isSynthetic || isExpanded(
m))) {
// This code accounts for the way the package objects found in the classpath are opened up
// early by the completer of the package itself. If the `packageobjects` phase then finds
// the same package object in sources, we have to clean the slate and remove package object
// members from the package class.
//
// TODO SI-4695 Pursue the approach in https://github.com/scala/scala/pull/2789 that avoids
// opening up the package object on the classpath at all if one exists in source.
if (m.isPackageObject) {
val packageScope = m.enclosingPackageClass.rawInfo.decls
packageScope
.filter(_.owner != m.enclosingPackageClass)
.toList
.foreach(packageScope unlink _)
}
updatePosFlags(m, tree.pos, moduleFlags)
setPrivateWithin(tree, m)
m.moduleClass andAlso (setPrivateWithin(tree, _))
context.unit.synthetics -= m
tree.symbol = m
} else {
m = coreCreateAssignAndEnterSymbol
m.moduleClass setFlag moduleClassFlags(moduleFlags)
setPrivateWithin(tree, m.moduleClass)
}
m.moduleClass setInfo namerOf(m).moduleClassTypeCompleter(tree)
if (m.isTopLevel && !m.hasPackageFlag) {
m.moduleClass.associatedFile = contextFile
currentRun.symSource(m) = m.moduleClass.sourceFile
registerTopLevelSym(m)
}
m
case _ =>
coreCreateAssignAndEnterSymbol
}
}
// reimplemented to integrate with weakEnsureCompanionObject
def ensureCompanionObject(cdef: ClassDef,
creator: ClassDef => Tree = companionModuleDef(_)): Symbol = {
val m = patchedCompanionSymbolOf(cdef.symbol, context)
def synthesizeTree = atPos(cdef.pos.focus)(creator(cdef))
if (m != NoSymbol && currentRun.compiles(m) && !isWeak(m)) m
else unmarkWeak(enterSyntheticSym(synthesizeTree))
}
/** Does the same as `ensureCompanionObject`, but also makes sure that the returned symbol destroys itself
* if noone ends up using it (either by calling `ensureCompanionObject` or by `finishSymbol`).
*/
// TODO: deduplicate
def weakEnsureCompanionObject(cdef: ClassDef,
creator: ClassDef => Tree = companionModuleDef(_)): Symbol = {
val m = patchedCompanionSymbolOf(cdef.symbol, context)
if (m != NoSymbol && currentRun.compiles(m)) m
else {
val mdef = atPos(cdef.pos.focus)(creator(cdef)); enterSym(mdef); markWeak(mdef.symbol)
}
}
def finishSymbol(tree: Tree) {
// annotations on parameters expand together with their owners
// therefore when we actually get to enter the parameters, we shouldn't even bother checking
// TODO: we don't handle primary ctors that might get spuriously marked as maybe expandees because of primary paramss
val aprioriNotExpandable = (context.tree, tree) match {
case (ClassDef(_, _, _, _), TypeDef(_, _, _, _)) => true
case (Template(_, _, _), ValDef(mods, _, _, _)) if mods.isParamAccessor => true
// vparamss of primary ctors are entered in `enterValueParams`, which doesn't call us
case (DefDef(_, _, _, _, _, _), TypeDef(_, _, _, _)) => true
// vparamss of normal methods are also entered in `enterValueParams`, which doesn't call us
case (TypeDef(_, _, _, _), TypeDef(_, _, _, _)) => true
case _ => false
}
if (aprioriNotExpandable) finishSymbolNotExpandee(tree)
else {
treeInfo.getAnnotationZippers(tree) match {
case Nil => finishSymbolNotExpandee(tree)
case zippers => finishSymbolMaybeExpandee(tree, zippers)
}
// this will only show companions defined above ourselves
// so when finishing `class C` in `{ class C; object C }`
// we won't see `object C` in `companion` - we will see NoSymbol
// that's the limitation of how namer works, but nevertheless it's not a problem for us
// because if finishing `class C` doesn't set up the things, finishing `object C` will
val sym = tree.symbol
val companion = patchedCompanionSymbolOf(sym, context)
tree match {
// TODO: should we also support annotations on modules expanding companion classes?
case tree @ ClassDef(_, _, _, _) if isMaybeExpandee(sym) =>
val wasExpanded = isExpanded(companion)
val m = weakEnsureCompanionObject(tree)
finishSymbolMaybeExpandeeCompanion(attachedSource(m), m, sym)
if (wasExpanded) markExpanded(m) // why is this necessary? see files/run/macro-annotation-recursive-class
// TODO: in general, this first call to FSMEC usually only brings grief
// can we get rid of it completely without having to sweep its results under the carpet?
case tree @ ModuleDef(_, _, _) if isMaybeExpandee(companion) =>
finishSymbolMaybeExpandeeCompanion(tree, sym, companion)
case _ =>
}
}
}
def finishSymbolNotExpandee(tree: Tree) {
val sym = tree.symbol
def savingLock[T](op: => T): T = {
val wasLocked = sym.hasFlag(LOCKED)
val result = op
if (wasLocked) sym.setFlag(LOCKED)
result
}
savingLock(tree match {
case tree @ PackageDef(_, _) =>
newNamer(context.make(tree, sym.moduleClass, sym.info.decls)) enterSyms tree.stats
case tree @ ClassDef(mods, name, tparams, impl) =>
sym setInfo completerOf(tree)
if (mods.isCase) {
val m = ensureCompanionObject(tree, caseModuleDef)
m.moduleClass.updateAttachment(new ClassForCaseCompanionAttachment(tree))
}
val hasDefault = impl.body exists treeInfo.isConstructorWithDefault
if (hasDefault) {
val m = ensureCompanionObject(tree)
m.updateAttachment(new ConstructorDefaultsAttachment(tree, null))
}
val owner = tree.symbol.owner
if (settings.warnPackageObjectClasses && owner.isPackageObjectClass && !mods.isImplicit) {
reporter.warning(
tree.pos,
"it is not recommended to define classes/objects inside of package objects.\\n" +
"If possible, define " + tree.symbol + " in " + owner.skipPackageObject + " instead."
)
}
// Suggested location only.
if (mods.isImplicit) {
if (treeInfo.primaryConstructorArity(tree) == 1) {
log("enter implicit wrapper " + tree + ", owner = " + owner)
enterImplicitWrapper(tree)
} else MultipleParametersImplicitClassError(tree)
}
validateCompanionDefs(tree)
case tree @ ModuleDef(_, _, _) =>
unmarkWeak(sym)
sym setInfo completerOf(tree)
validateCompanionDefs(tree)
case tree @ ValDef(_, _, _, _) =>
val isScala = !context.unit.isJava
if (isScala) {
if (nme.isSetterName(tree.name)) ValOrVarWithSetterSuffixError(tree)
if (tree.mods.isPrivateLocal && tree.mods.isCaseAccessor)
PrivateThisCaseClassParameterError(tree)
}
if (isScala && deriveAccessors(tree)) {
// when refactoring enterSym, I needed to decouple symbol creation and various syntheses
// so that annotation expansion mechanism could be installed in-between of those
// it went well except for one thing - ValDef symbol creation is very closely tied to syntheses
// because depending on whether the ValDef is a val, var or a lazy val, different symbols need to be generated
// since I didn't have much time (and, back then, much understanding), I just decided to create dummies
// that live only to stand in as potential annottees and get destroyed if any sort of synthesis is necessary
// TODO: this is obviously ugly and needs to be fixed
context.scope.unlink(tree.symbol)
tree.symbol setInfo NoType
enterGetterSetter(tree)
} else {
tree.symbol setInfo completerOf(tree)
}
if (isEnumConstant(tree))
tree.symbol setInfo ConstantType(Constant(tree.symbol))
case tree @ DefDef(_, nme.CONSTRUCTOR, _, _, _, _) =>
sym setInfo completerOf(tree)
case tree @ DefDef(mods, name, tparams, _, _, _) =>
val bridgeFlag = if (mods hasAnnotationNamed tpnme.bridgeAnnot) BRIDGE | ARTIFACT else 0
sym setFlag bridgeFlag
val completer =
if (sym hasFlag SYNTHETIC) {
if (name == nme.copy) copyMethodCompleter(tree)
else if (sym hasFlag CASE) applyUnapplyMethodCompleter(tree, context)
else completerOf(tree)
} else completerOf(tree)
sym setInfo completer
case tree @ TypeDef(_, _, _, _) =>
sym setInfo completerOf(tree)
case tree @ Import(_, _) =>
namerOf(tree.symbol) importTypeCompleter tree
})
}
// we have several occasions when so called "maybe expandees" need special care
// ("maybe expandees" = annotated members, which might or might not be annotated with a macro expansion)
// 1) (when called by Symbol.info) trigger the MaybeExpandeeCompleter and then immediately recur into a fresh completer
// if we don't recur, we're doomed to fail, because there are only so many retries that Symbol.info can tolerate
// and this retry threshold is already fine-tuned to the current chain of completers, which makes MaybeExpandeeCompleter one too many
// 2) (when called by expandMacroAnnotations from templateSig or typedBlock) in this situation noone needs us to fully complete
// the underlying symbol. just making sure that we don't have any annotations to expand is the least and the most we should do.
// if we're overeager like in mode #1, we might easily induce cyclic reference errors (like in tests/run/macro-annotations-packageobject)
// 3) (when called by Symbol.typeParams) this one is different from Symbol.info, because it calls load, not complete
// from what I understand, this separation exists because it takes much less effort to figure out tparams rather than the full signature
// for example, vanilla completers assigned in namer are created with typeParams already known
// you can see for yourself in the distinction between monoTypeCompleter and PolyTypeCompleter
// therefore, just as with Symbol.info we need to trigger the MaybeExpandeeCompleter
// and then not forget to recur into the fresh completer's load, again because of the retry limit baked into Symbol.typeParams
// 4) TODO: (when called by Symbol.unsafeTypeParams) figure out what's the deal with them
// existence of this method profoundly scares me, even though I never had a problem with it
abstract class MaybeExpandeeCompleter(val tree: Tree)
extends LockingTypeCompleter
with FlagAssigningCompleter {
def destroy(syms: Symbol*) = {
for (sym <- syms) {
context.unit.synthetics -= sym
context.scope.unlink(sym)
sym setInfo NoType
sym.moduleClass setInfo NoType
sym.removeAttachment[SymbolCompleterAttachment]
}
}
def complete(sym: Symbol, onlyExpansions: Boolean) = {
lockedCount += 1
try completeImpl(sym, onlyExpansions)
finally lockedCount -= 1
}
override def completeImpl(sym: Symbol): Unit = {
completeImpl(sym, onlyExpansions = false)
}
def completeImpl(sym: Symbol, onlyExpansions: Boolean): Unit = {
val thisCompleter = sym.rawInfo
maybeExpand()
assert(sym.rawInfo != thisCompleter,
s"${sym.accurateKindString} ${sym.rawname}#${sym.id} with $kind")
if (onlyExpansions) sym.rawInfo.completeOnlyExpansions(sym)
else sym.rawInfo.complete(sym)
}
override def load(sym: Symbol): Unit = {
this.completeOnlyExpansions(sym)
sym.rawInfo.load(sym)
}
def maybeExpand(): Unit // TODO: should I also pass `sym` here?
}
abstract class MaybeExpandeeCompanionCompleter(tree: Tree) extends MaybeExpandeeCompleter(tree)
implicit class RichType(tpe: Type) {
def completeOnlyExpansions(sym: Symbol) = tpe match {
case mec: Namer#MaybeExpandeeCompleter => mec.complete(sym, onlyExpansions = true)
case c => ()
}
}
def finishSymbolMaybeExpandee(tree: Tree, annZippers: List[AnnotationZipper]) {
val sym = tree.symbol
unmarkWeak(sym)
markMaybeExpandee(sym)
sym.setInfo(new MaybeExpandeeCompleter(tree) {
override def kind =
s"maybeExpandeeCompleter for ${sym.accurateKindString} ${sym.rawname}#${sym.id}"
override def maybeExpand(): Unit = {
val companion =
if (tree.isInstanceOf[ClassDef] || tree.isInstanceOf[TypeDef]) {
patchedCompanionSymbolOf(sym, context)
} else {
NoSymbol
}
def maybeExpand(annotation: Tree,
annottee: Tree,
maybeExpandee: Tree): Option[List[Tree]] = {
val treeInfo.Applied(Select(New(tpt), nme.CONSTRUCTOR), _, _) = annotation
val mann = probeMacroAnnotation(context, tpt)
if (mann.isMacroAnnotation && context.macrosEnabled) {
// if we encounter an error, we just return None, so that other macro annotations can proceed
// this is unlike macroExpand1 when any error in an expandee blocks expansions
// there it's necessary in order not to exacerbate typer errors
// but when manning we aren't in typer, so we don't have to do as macroExpand1 does
// and also there's a good reason not to ban other macro annotations
// if we do ban them, we might get spurious compilation errors from non-existent members that could've been generated
assert(!currentRun.compiles(mann), mann)
val companion =
if (maybeExpandee.isInstanceOf[ClassDef] || tree.isInstanceOf[TypeDef]) {
patchedCompanionSymbolOf(sym, context)
} else {
NoSymbol
}
val companionSource =
if (!isWeak(companion)) attachedSource(companion) else EmptyTree
val unsafeExpandees =
List(annottee, maybeExpandee, companionSource).distinct.filterNot(_.isEmpty)
val expandees =
unsafeExpandees.map(duplicateAndKeepPositions).map(_.setSymbol(NoSymbol))
if (mann.isOldMacroAnnotation)
expandOldAnnotationMacro(tree, mann, annotation, expandees)
else if (mann.isNewMacroAnnotation)
expandNewAnnotationMacro(tree, mann, annotation, expandees)
else None
} else {
None
}
}
annZippers.toStream
.flatMap(annz => maybeExpand(annz.annotation, annz.annottee, annz.owner))
.headOption match {
case Some(expanded) =>
tellReplAboutExpansion(sym, companion, expanded)
markExpanded(sym)
markExpanded(companion)
// expansion brings new trees, probably wildly different from current ones. what do we do?
// the most robust thing would be to destroy ourselves (us and our companion), but we can't do that at top level
// therefore at top level we don't destroy, but rather rely on enterSyms to redefine ourselves
// however when nested we go all out
// TODO: unlinking distorts the order of symbols in scope
// note however that trees (calculated by expandMacroAnnotations) will be generated in correct order
if (!sym.isTopLevel) destroy(sym, companion)
enterSyms(expanded) // TODO: we can't reliably expand into imports, because they won't be accounted by definitions below us
case None =>
markNotExpandable(sym)
finishSymbolNotExpandee(tree)
}
// take care of the companion if it's no longer needed
// we can't do this in companion's completer, because that one isn't guaranteed to ever be called
val expandedWithoutCompanion =
isExpanded(sym) && attachedExpansion(companion).map(_.isEmpty).getOrElse(false)
val companionHasReemerged =
expandedWithoutCompanion && sym.isTopLevel && !isWeak(companion)
val notExpandableWeakCompanion = isNotExpandable(sym) && isWeak(companion)
if ((expandedWithoutCompanion && !companionHasReemerged) || notExpandableWeakCompanion)
destroy(companion)
}
})
}
// how do we make sure that this completer falls back to the vanilla completer if the companion ends up not expanding?
// well, if a module symbol has a maybeExpandee companion then the last two calls to its setInfo will be one of:
// * non-FSMEC completer for the module and then FSMEC => fallback should call native completer
// * FSMEC from enterSyntheticSym for a phantom module and then FSMEC again => fallback should do nothing
// now it's easy to see that both are correctly handled here
def finishSymbolMaybeExpandeeCompanion(tree: Tree, m: Symbol, c: Symbol) {
val worthBackingUp = !m.rawInfo.isInstanceOf[Namer#MaybeExpandeeCompanionCompleter]
if (worthBackingUp) backupCompleter(m)
markMaybeExpandee(m)
m.setInfo(new MaybeExpandeeCompanionCompleter(tree) {
override def kind = s"maybeExpandeeCompanionCompleter for ${m.rawname}#${m.id}"
override def maybeExpand(): Unit = {
c.rawInfo.completeOnlyExpansions(c)
// this is a very tricky part of annotation expansion
// because now, after deferring to our companion's judgement for a while, we have to ourselves figure out:
// 1) whether we should start completing on our own
// 2) if we should do it on our own, then how exactly
// 1 is easy. If our companion's expansion has destroyed us (or hasn't materialized us if we were weak)
// then we no longer care and we silently go into oblivion. Otherwise, we should take care of ourselves.
// 2 is hard, because we have two distinct situations to handle:
// 2a) isExpanded(c) is true, which means that our companion has just expanded
// 2b) isNotExpandable(c) is true, which means that our companion has just been deemed unexpandable
// 2a is simple, because it means that we don't have to do anything, as we've either got destroyed
// or we've got entered in `enterSyms(expanded)` that follows expansions.
// 2b is tricky, because it means that we need to fall back to the most recent non-FSMEC completer.
// The hardest part here is that we can't just get to the completer that was preceding `this` as m.rawInfo
// (otherwise we run into issue #9, for more details see history of this change). Instead we need to track m's type history.
val destroyedDuringExpansion = m.rawInfo == NoType
val failedToMaterializeDuringExpansion = isWeak(m)
val aliveAndKicking = !destroyedDuringExpansion && !failedToMaterializeDuringExpansion
if (aliveAndKicking && isNotExpandable(c)) {
if (worthBackingUp) restoreCompleter(m)
val maybeExpandee = m.rawInfo.isInstanceOf[Namer#MaybeExpandeeCompleter]
if (maybeExpandee) markMaybeExpandee(m) else markNotExpandable(m)
}
}
})
}
// mostly copy/pasted and adapted from typedIdent
// adaptations = ignore error reporting + ignore java + don't force symbols being compiled
// the last requirement leads to us being imprecise in some situation wrt normal name resolution
// but that's okay, since it's the only way for manns to remain modular and not to cripple normal annotations
def probeMacroAnnotation(context: Context, tpt: Tree): Symbol = {
// SAFE HELPERS (can't cause unnecessary completions)
def reallyExists(sym: Symbol) = {
if (newTyper(context).isStale(sym)) sym.setInfo(NoType); exists(sym)
}
def qualifies(sym: Symbol): Boolean = sym.hasRawInfo && reallyExists(sym)
// UNSAFE HELPERS (need to guard against unnecessary completions)
def canDefineMann(sym: Symbol): Boolean = !currentRun.compiles(sym)
def exists(sym: Symbol) = if (canDefineMann(sym)) sym.exists else false
def importedSymbol(imp: ImportInfo, name: Name): Symbol = { // TODO: be more precise in reproducing importSig and importedSymbol
val impContext = context.enclosingContextChain.find(_.tree.symbol == imp.tree.symbol).get
val sym =
imp.tree.cached("importQualProbe", probeMacroAnnotation(impContext.outer, imp.tree.expr))
val pre = if (reallyExists(sym) && isAccessible(impContext, sym)) sym.tpe else NoType
var result: Symbol = NoSymbol
var renamed = false
var selectors = imp.tree.selectors
def current = selectors.head
while (selectors != Nil && result == NoSymbol) {
if (current.rename == name.toTermName)
result =
nonLocalMember(pre, if (name.isTypeName) current.name.toTypeName else current.name)
else if (selectors.head.name == name.toTermName)
renamed = true
else if (selectors.head.name == nme.WILDCARD && !renamed)
result = nonLocalMember(pre, name)
if (result == NoSymbol)
selectors = selectors.tail
}
if (settings.warnUnusedImport && selectors.nonEmpty && result != NoSymbol && imp.pos != NoPosition) {
val m_recordUsage =
imp.getClass.getDeclaredMethods().find(_.getName == "recordUsage").get
m_recordUsage.setAccessible(true)
m_recordUsage.invoke(imp, current, result)
}
if (definitions isImportable result) result
else NoSymbol
}
// def isAccessible(cx: Context, sym: Symbol) = if (canDefineMann(cx.owner)) cx.isAccessible(sym, cx.prefix, superAccess = false) else false
def isAccessible(cx: Context, sym: Symbol) =
true // TODO: sorry, it's 2am, and I can't figure this out
def member(tpe: Type, name: Name) =
if (canDefineMann(tpe.typeSymbol)) tpe.member(name) else NoSymbol
def nonLocalMember(tpe: Type, name: Name) =
if (canDefineMann(tpe.typeSymbol)) tpe.nonLocalMember(name) else NoSymbol
if (tpt.hasSymbolField && tpt.symbol != NoSymbol) tpt.symbol
else
tpt match {
case Ident(name) =>
def resolve(name: Name): Symbol = {
// STEP 1: RESOLVE THE NAME IN SCOPE
var defSym: Symbol = NoSymbol
var defEntry: ScopeEntry = null
var cx = context
while (defSym == NoSymbol && cx != NoContext && (cx.scope ne null)) {
defEntry = cx.scope.lookupEntry(name)
if ((defEntry ne null) && qualifies(defEntry.sym)) defSym = defEntry.sym
else {
cx = cx.enclClass
val foundSym = member(cx.prefix, name) filter qualifies
defSym = foundSym filter (isAccessible(cx, _))
if (defSym == NoSymbol) cx = cx.outer
}
}
if (defSym == NoSymbol && settings.exposeEmptyPackage) {
defSym = rootMirror.EmptyPackageClass.info member name
}
// STEP 2: RESOLVE THE NAME IN IMPORTS
val symDepth =
if (defEntry eq null) cx.depth
else
cx.depth - ({
if (cx.scope ne null) cx.scope.nestingLevel
else 0 // TODO: fix this in toolboxes, not hack around here
} - defEntry.owner.nestingLevel)
var impSym: Symbol = NoSymbol
var imports = context.imports
while (!reallyExists(impSym) && !imports.isEmpty && imports.head.depth > symDepth) {
impSym = importedSymbol(imports.head, name)
if (!exists(impSym)) imports = imports.tail
}
// FIXME: repl hack. somehow imports that come from repl are doubled
// e.g. after `import $line7.$read.$iw.$iw.foo` you'll have another identical `import $line7.$read.$iw.$iw.foo`
// this is a crude workaround for the issue
imports match {
case fst :: snd :: _ if exists(impSym) && fst == snd => imports = imports.tail
case _ => // do nothing
}
// STEP 3: TRY TO RESOLVE AMBIGUITIES
if (exists(defSym) && exists(impSym)) {
if (defSym.isDefinedInPackage &&
(!currentRun.compiles(defSym) ||
context.unit.exists && defSym.sourceFile != context.unit.source.file))
defSym = NoSymbol
else if (impSym.isError || impSym.name == nme.CONSTRUCTOR)
impSym = NoSymbol
}
if (!exists(defSym) && exists(impSym)) {
var impSym1: Symbol = NoSymbol
var imports1 = imports.tail
while (!imports1.isEmpty &&
(!imports.head.isExplicitImport(name) ||
imports1.head.depth == imports.head.depth)) {
impSym1 = importedSymbol(imports1.head, name)
if (reallyExists(impSym1)) {
if (imports1.head.isExplicitImport(name)) {
if (imports.head.isExplicitImport(name) ||
imports1.head.depth != imports.head.depth)
return NoSymbol // was possibly fixable ambiguous import
impSym = impSym1
imports = imports1
} else if (!imports.head.isExplicitImport(name) &&
imports1.head.depth == imports.head.depth)
return NoSymbol // was possibly fixable ambiguous import
}
imports1 = imports1.tail
}
}
// STEP 4: DEAL WITH WHAT WE HAVE
if (exists(defSym) && !exists(impSym)) defSym
else if (exists(defSym) && exists(impSym)) NoSymbol // was ambiguous import
else if (!exists(defSym) && exists(impSym)) impSym
else {
val lastTry = rootMirror.missingHook(rootMirror.RootClass, name)
if (lastTry != NoSymbol && isAccessible(context, lastTry)) lastTry
else NoSymbol
}
}
resolve(name)
case Select(qualtree, name) => // TODO: be more precise wrt typedSelect
def resolve(name: Name): Symbol = {
val qual = probeMacroAnnotation(context, qualtree)
val sym = if (canDefineMann(qual)) member(qual.tpe, name) else NoSymbol
if (reallyExists(sym) && isAccessible(context, sym)) sym else NoSymbol
}
resolve(name).orElse(resolve(name.toTermName))
case AppliedTypeTree(tpt, _) => // https://github.com/scalamacros/paradise/issues/2: expand manns with type parameters
probeMacroAnnotation(context, tpt)
case _ =>
NoSymbol
}
}
// see https://github.com/scalamacros/paradise/issues/7
// also see https://github.com/scalamacros/paradise/issues/64
def patchedCompanionSymbolOf(original: Symbol, ctx: Context): Symbol = {
val owner = original.owner
// SI-7264 Force the info of owners from previous compilation runs.
// Doing this generally would trigger cycles; that's what we also
// use the lower-level scan through the current Context as a fall back.
if (!currentRun.compiles(owner) &&
// NOTE: the following three lines of code are added to work around #7
!owner.enclosingTopLevelClass.isRefinementClass &&
!owner.ownerChain.exists(_.isLocalDummy) &&
owner.ownerChain.forall(!currentRun.compiles(_))) {
owner.initialize
}
original.companionSymbol orElse {
implicit class PatchedContext(ctx: Context) {
trait PatchedLookupResult { def suchThat(criterion: Symbol => Boolean): Symbol }
def patchedLookup(name: Name, expectedOwner: Symbol) = new PatchedLookupResult {
override def suchThat(criterion: Symbol => Boolean): Symbol = {
var res: Symbol = NoSymbol
var ctx = PatchedContext.this.ctx
while (res == NoSymbol && ctx.outer != ctx) {
// NOTE: original implementation says `val s = ctx.scope lookup name`
// but we can't use it, because Scope.lookup returns wrong results when the lookup is ambiguous
// and that triggers https://github.com/scalamacros/paradise/issues/64
val s = {
val lookupResult = ctx.scope.lookupAll(name).filter(criterion).toList
lookupResult match {
case Nil => NoSymbol
case List(unique) => unique
case _ =>
abort(
s"unexpected multiple results for a companion symbol lookup for $original#{$original.id}")
}
}
if (s != NoSymbol && s.owner == expectedOwner)
res = s
else
ctx = ctx.outer
}
res
}
}
}
ctx
.patchedLookup(original.name.companionName, owner)
.suchThat(sym =>
(original.isTerm || sym.hasModuleFlag) &&
(sym isCoDefinedWith original))
}
}
}
}
| scalameta/paradise | plugin/src/main/scala-2.12.8/org/scalameta/paradise/typechecker/Namers.scala | Scala | bsd-3-clause | 35,149 |
package peschke.markov.utils
import cats.{Order, Show}
import cats.data.{NonEmptyVector, ValidatedNel}
import cats.syntax.apply._
import cats.syntax.show._
import cats.syntax.validated._
import com.github.ghik.silencer.silent
import com.monovore.decline.Argument
/**
* Manly used to encode ranges for the CLI parameters
*
* @param min the inclusive lower bound
* @param max the inclusive upper bound
*/
case class PositiveRange private[utils](min: PositiveInt, max: PositiveInt) {
def toNEV: NonEmptyVector[PositiveInt] =
NonEmptyVector(min, Iterator.iterate(min.next)(_.next).takeWhile(_.value <= max.value).toVector)
@silent private[this] def copy(): PositiveRange = ???
}
object PositiveRange {
@silent private[this] def apply(min: PositiveInt, max: PositiveInt): PositiveRange = ???
/**
* Creates a [[peschke.markov.utils.PositiveRange]], ensuring that `a` < `b`
*/
def between(a: PositiveInt, b: PositiveInt): PositiveRange =
new PositiveRange(
Order[PositiveInt].min(a, b),
Order[PositiveInt].max(a, b))
implicit val show: Show[PositiveRange] = Show.show(r => show"${r.min}..${r.max}")
implicit val argument: Argument[PositiveRange] = new Argument[PositiveRange] {
private val PI = Argument[PositiveInt]
type ValidatedArg[A] = ValidatedNel[String, A]
override def read(string: String): ValidatedNel[String, PositiveRange] =
string.split('.') match {
case Array(rawLow, "", rawHigh) =>
(PI.read(rawLow).leftMap(_.map("low " + _)): ValidatedArg[PositiveInt],
PI.read(rawHigh).leftMap(_.map("high " + _)): ValidatedArg[PositiveInt]
).mapN(_ to _)
case _ => "must be '<low>..<high>', where <low> and <high> are positive integers".invalidNel
}
override def defaultMetavar: String = "min..max"
}
}
| morgen-peschke/markov-text | scala/src/main/scala/peschke/markov/utils/PositiveRange.scala | Scala | mit | 1,838 |
/*
* Copyright 2017 Guy Van den Broeck <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.ucla.cs.starai.util;
import language.implicitConversions
class BigRational(n: BigInt, d: BigInt) extends Ordered[BigRational]{
require(d != 0)
private val g = gcd(n.abs, d.abs)
private val Precision = 17
val numer = n / g
val denom = d / g
def this(n: BigInt) = this(n, 1)
def +(that: BigRational): BigRational =
new BigRational(
numer * that.denom + that.numer * denom,
denom * that.denom)
def +(i: BigInt): BigRational =
new BigRational(numer + i * denom, denom)
def -(that: BigRational): BigRational =
new BigRational(
numer * that.denom - that.numer * denom,
denom * that.denom)
def -(i: BigInt): BigRational =
new BigRational(numer - i * denom, denom)
def *(that: BigRational): BigRational =
new BigRational(numer * that.numer, denom * that.denom)
def *(i: BigInt): BigRational =
new BigRational(numer * i, denom)
def /(that: BigRational): BigRational =
new BigRational(numer * that.denom, denom * that.numer)
def /(i: BigInt): BigRational =
new BigRational(numer, denom * i)
def reciprocal: BigRational =
new BigRational(denom, numer)
override def toString: String =
s"$numer/$denom"
def toDouble: Double = {
def div(d1: BigDecimal, d2: BigDecimal) = // drop down to java.math.BigDecimal
new BigDecimal(d1.bigDecimal.divide(d2.bigDecimal, Precision, java.math.RoundingMode.DOWN))
div(BigDecimal(numer), BigDecimal(denom))
.setScale(Precision).doubleValue
}
def toExactBigInt = {
require(denom == 1)
numer
}
private def gcd(a: BigInt, b: BigInt): BigInt =
if (b == BigInt(0)) a else gcd(b, a % b)
def compare(that: BigRational) = {
val diffNum = (this-that).numer
if(diffNum == 0) 0
else if(diffNum < 0) -1
else 1
}
override def hashCode: Int = (numer, denom).hashCode
override def equals(other: Any): Boolean =
other match {
case that: BigRational =>
this.numer == that.numer && this.denom == that.denom ||
this.numer == 0 && that.numer == 0
case _ => false
}
}
object BigRational {
def apply(n: BigInt, d: BigInt) = new BigRational(n,d)
def apply(n: BigInt) = new BigRational(n,1)
def unapply(b: BigRational): Option[(BigInt, BigInt)] = Some((b.numer, b.denom))
implicit def int2BigRational(i: Int): BigRational = new BigRational(i)
implicit object BigRationalIsFractional extends Numeric[BigRational] with Fractional[BigRational]{
def plus(x: BigRational, y: BigRational): BigRational = x + y
def minus(x: BigRational, y: BigRational): BigRational = x - y
def times(x: BigRational, y: BigRational): BigRational = x * y
def negate(x: BigRational): BigRational = -x
def fromInt(x: Int): BigRational = BigRational(x)
def toInt(x: BigRational): Int = throw new UnsupportedOperationException
def toLong(x: BigRational): Long = throw new UnsupportedOperationException
def toFloat(x: BigRational): Float = x.toDouble.toFloat
def toDouble(x: BigRational): Double = x.toDouble
def div(x: BigRational, y: BigRational): BigRational = x / y
def compare(x: BigRational,y: BigRational) = x compare y
}
} | UCLA-StarAI/ScalaDD | src/main/scala/edu/ucla/cs/starai/util/BigRational.scala | Scala | apache-2.0 | 3,892 |
package com.github.bruneli.phyqty
/*
* Copyright 2016 Renaud Bruneliere
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import Dimension.DimensionLess
/**
* @author bruneli
*/
object Math {
def ceil[D <: Dimension[_, _, _, _, _, _, _]](quantity: ScalarQuantity[D]): ScalarQuantity[D] = {
applyFunction(quantity, math.ceil)
}
def ceil[D <: Dimension[_, _, _, _, _, _, _]](quantities: QuantitiesLike[D, Scalar]): QuantitiesLike[D, Scalar] = {
quantities.mapCoordinates(math.ceil)
}
def floor[D <: Dimension[_, _, _, _, _, _, _]](quantity: ScalarQuantity[D]): ScalarQuantity[D] = {
applyFunction(quantity, math.floor)
}
def floor[D <: Dimension[_, _, _, _, _, _, _]](quantities: QuantitiesLike[D, Scalar]): QuantitiesLike[D, Scalar] = {
quantities.mapCoordinates(math.floor)
}
def abs[D <: Dimension[_, _, _, _, _, _, _]](quantity: ScalarQuantity[D]): ScalarQuantity[D] = {
applyFunction(quantity, math.abs)
}
def abs[D <: Dimension[_, _, _, _, _, _, _]](quantities: QuantitiesLike[D, Scalar]): QuantitiesLike[D, Scalar] = {
quantities.mapCoordinates(math.abs)
}
def sqrt(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.sqrt)
}
def sqrt(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.sqrt)
}
def cbrt(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.cbrt)
}
def cbrt(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.cbrt)
}
def exp(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.exp)
}
def exp(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.exp)
}
def log(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.log)
}
def log(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.log)
}
def log10(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.log10)
}
def log10(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.log10)
}
def sin(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.sin)
}
def sin(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.sin)
}
def cos(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.cos)
}
def cos(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.cos)
}
def tan(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.tan)
}
def tan(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.tan)
}
def asin(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.asin)
}
def asin(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.asin)
}
def acos(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.acos)
}
def acos(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.acos)
}
def atan(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.atan)
}
def atan(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.atan)
}
def sinh(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.sinh)
}
def sinh(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.sinh)
}
def cosh(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.cosh)
}
def cosh(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.cosh)
}
def tanh(quantity: ScalarQuantity[DimensionLess]): ScalarQuantity[DimensionLess] = {
applyFunction(quantity, math.tanh)
}
def tanh(quantities: QuantitiesLike[DimensionLess, Scalar]): QuantitiesLike[DimensionLess, Scalar] = {
quantities.mapCoordinates(math.tanh)
}
def max[D <: Dimension[_, _, _, _, _, _, _]](q1: ScalarQuantity[D], q2: ScalarQuantity[D]): ScalarQuantity[D] = {
val q3 = q2.in(q1.unit)
q1.copy(magnitude = math.max(q1.magnitude, q3.magnitude))
}
def max[D <: Dimension[_, _, _, _, _, _, _]](q1: QuantitiesLike[D, Scalar], q2: QuantitiesLike[D, Scalar]): QuantitiesLike[D, Scalar] = {
applyArity2Function(q1, q2)(math.max)
}
def min[D <: Dimension[_, _, _, _, _, _, _]](q1: ScalarQuantity[D], q2: ScalarQuantity[D]): ScalarQuantity[D] = {
val q3 = q2.in(q1.unit)
q1.copy(magnitude = math.min(q1.magnitude, q3.magnitude))
}
def min[D <: Dimension[_, _, _, _, _, _, _]](q1: QuantitiesLike[D, Scalar], q2: QuantitiesLike[D, Scalar]): QuantitiesLike[D, Scalar] = {
applyArity2Function(q1, q2)(math.min)
}
def applyFunction[D <: Dimension[_, _, _, _, _, _, _]](quantity: ScalarQuantity[D], function: Double => Double): ScalarQuantity[D] = {
ScalarQuantity(function(quantity.magnitude), quantity.unit)
}
def applyArity2Function[D <: Dimension[_, _, _, _, _, _, _]](q1: QuantitiesLike[D, Scalar], q2: QuantitiesLike[D, Scalar])(
function: (Double, Double) => Double): ScalarQuantities[D] = {
if (q1.length != q2.length) {
throw new Quantities.QuantitiesDimensionException
} else {
val q3 = q2.in(q1.unit)
val magnitudes = new Array[Double](q1.length)
for (idx <- magnitudes.indices) {
magnitudes(idx) = function(q1.magnitude(idx), q3.magnitude(idx))
}
ScalarQuantities(magnitudes, q1.unit)
}
}
}
| bruneli/phyqty | src/main/scala/com/github/bruneli/phyqty/Math.scala | Scala | apache-2.0 | 7,126 |
package com.twitter.finagle.netty4
import com.twitter.concurrent.Once
import com.twitter.finagle.stats.{FinagleStatsReceiver, Gauge}
import io.netty.buffer.{PoolArenaMetric, PooledByteBufAllocator}
import scala.collection.JavaConverters._
import scala.collection.mutable
/**
* Exports a number of N4-related metrics under `finagle/netty4`.
*/
private[netty4] object exportNetty4Metrics {
private[this] val stats = FinagleStatsReceiver.scope("netty4")
private[this] val gauges = mutable.Set.empty[Gauge]
private[this] def buildAccumulator(f: PoolArenaMetric => Long) =
{ (acc: Float, pa: PoolArenaMetric) => acc + f(pa) }
private[this] val sumHugeAllocations = buildAccumulator(_.numHugeAllocations())
private[this] val sumNormalAllocations = buildAccumulator(_.numNormalAllocations())
private[this] val sumSmallAllocations = buildAccumulator(_.numSmallAllocations())
private[this] val sumTinyAllocations = buildAccumulator(_.numTinyAllocations())
private[this] val sumHugeDeallocations = buildAccumulator(_.numHugeDeallocations())
private[this] val sumNormalDellocations = buildAccumulator(_.numNormalDeallocations())
private[this] val sumSmallDeallocations = buildAccumulator(_.numSmallDeallocations())
private[this] val sumTinyDeallocations = buildAccumulator(_.numTinyDeallocations())
private[this] val applyOnce: () => Unit = Once {
if (poolReceiveBuffers()) {
val allocator = PooledByteBufAllocator.DEFAULT
val poolingStats = stats.scope("pooling")
// Allocations.
gauges.add(poolingStats.addGauge("allocations", "huge")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumHugeAllocations)
))
gauges.add(poolingStats.addGauge("allocations", "normal")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumNormalAllocations)
))
gauges.add(poolingStats.addGauge("allocations", "small")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumSmallAllocations)
))
gauges.add(poolingStats.addGauge("allocations", "tiny")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumTinyAllocations)
))
// Deallocations.
gauges.add(poolingStats.addGauge("deallocations", "huge")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumHugeDeallocations)
))
gauges.add(poolingStats.addGauge("deallocations", "normal")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumNormalDellocations)
))
gauges.add(poolingStats.addGauge("deallocations", "small")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumSmallDeallocations)
))
gauges.add(poolingStats.addGauge("deallocations", "tiny")(
allocator.directArenas().asScala.foldLeft(0.0f)(sumTinyDeallocations)
))
}
}
/**
* Exports N4 metrics.
*
* @note This method is thread-safe and no matter how many times it's called,
* the metrics will only be exported once.
*/
def apply(): Unit = applyOnce()
}
| spockz/finagle | finagle-netty4/src/main/scala/com/twitter/finagle/netty4/exportNetty4Metrics.scala | Scala | apache-2.0 | 2,996 |
// TLCockpit
// Copyright 2017-2018 Norbert Preining
// Licensed according to GPLv3+
//
// Front end for tlmgr
package TLCockpit
import scalafx.beans.property.{ObjectProperty, StringProperty}
// Note!!! we have to use ObjectProperty[Int] here instead of IntegerProperty
// since IntegerProperty does NOT implement Observable[Int,Int]
// see https://github.com/scalafx/scalafx/issues/243
case class TLPackageDisplay(name: StringProperty, var lrev: ObjectProperty[Int], rrev: ObjectProperty[Int],
shortdesc: StringProperty, size: ObjectProperty[Int], var installed: StringProperty) {
def this(_name: String, _lrev: String, _rrev: String, _shortdesc: String, _size: String, _installed: String) =
this(
StringProperty(_name), ObjectProperty[Int](_lrev.toInt), ObjectProperty[Int](_rrev.toInt),
StringProperty(_shortdesc), ObjectProperty[Int](_size.toInt), StringProperty(_installed)
)
}
case class TLBackupDisplay(name: StringProperty, rev: StringProperty, date: StringProperty) {
def this(_name: String, _rev: String, _date: String) =
this(StringProperty(_name), StringProperty(_rev), StringProperty(_date))
}
case class TLUpdateDisplay(name: StringProperty, var status: StringProperty, var lrev: StringProperty, rrev: StringProperty,
shortdesc: StringProperty, size: StringProperty) {
def this(_name: String, _status: String, _lrev: String, _rrev: String, _shortdesc: String, _size: String) =
this(
StringProperty(_name), StringProperty(_status), StringProperty(_lrev), StringProperty(_rrev),
StringProperty(_shortdesc), StringProperty(_size)
)
} | TeX-Live/tlcockpit | src/main/scala/TLCockpit/TLDisplayClasses.scala | Scala | gpl-3.0 | 1,660 |
package dispatch
/** Mix in to Http if you want JDK logging */
trait JdkLogging extends HttpExecutor {
override def make_logger = new dispatch.Logger {
val jdklog = java.util.logging.Logger.getLogger("dispatch")
def info(msg: String, items: Any*) {
jdklog.info(msg.format(items: _*))
}
def warn(msg: String, items: Any*) {
jdklog.warning(msg.format(items: _*))
}
}
}
/**
* Mix in to Http if you want no logging from Dispatch.
* Note that HttpClient logs separately:
* http://hc.apache.org/httpcomponents-client/logging.html
*/
trait NoLogging extends HttpExecutor {
override def make_logger = new dispatch.Logger {
def info(msg: String, items: Any*) { }
def warn(msg: String, items: Any*) { }
}
}
| cmc333333/Databinder-Dispatch | core/src/main/scala/logging.scala | Scala | lgpl-2.1 | 755 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.datastream.storm
import backtype.storm.topology.base.BaseRichSpout
import com.typesafe.config.Config
import org.apache.eagle.dataproc.impl.storm.StormSpoutProvider
import org.apache.eagle.datastream.core.{ExecutionEnvironment, StormSourceProducer, StreamDAG}
/**
* @since 12/7/15
*/
class StormExecutionEnvironment(private val conf:Config) extends ExecutionEnvironment(conf) {
override def execute(dag: StreamDAG) : Unit = {
StormTopologyCompiler(config.get, dag).buildTopology.execute
}
def fromSpout[T](source: BaseRichSpout): StormSourceProducer[T] = {
val ret = StormSourceProducer[T](source)
ret.initWith(dag,config.get)
ret
}
def fromSpout[T](sourceProvider: StormSpoutProvider):StormSourceProducer[T] = fromSpout(sourceProvider.getSpout(config.get))
} | pkuwm/incubator-eagle | eagle-core/eagle-data-process/eagle-stream-process-api/src/main/scala/org/apache/eagle/datastream/storm/StormExecutionEnvironment.scala | Scala | apache-2.0 | 1,615 |
package com.etsy.sahale
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import scala.collection.mutable
@RunWith(classOf[JUnitRunner])
class FlowGraphBuilderSpec extends FlatSpec with Matchers {
val graphDotString = {
scala.io.Source.fromInputStream(
Thread.currentThread.getContextClassLoader.getResourceAsStream(
"test-graph.txt"
),
"UTF-8"
).getLines().mkString("\\n")
}
val emptyProps = new java.util.Properties
val expectedEdgeMap = Map[String, Set[Int]](
"11" -> Set(33),
"22" -> Set(44),
"33" -> Set(44)
)
val expectedStepStatusMap = mutable.Map[String, StepStatus](
"2AAA915A87DE4B52B2A56C249545C54D" -> new StepStatus(null, 11, "2AAA915A87DE4B52B2A56C249545C54D", emptyProps),
"6EC78784266342CB9424E9875FF4299F" -> new StepStatus(null, 22, "6EC78784266342CB9424E9875FF4299F", emptyProps),
"9FC4CA743ED5468EBC8C3CA76C6B12A6" -> new StepStatus(null, 33, "9FC4CA743ED5468EBC8C3CA76C6B12A6", emptyProps),
"C4047D6DEBB6427B8B95DAF19D3E5DE2" -> new StepStatus(null, 44, "C4047D6DEBB6427B8B95DAF19D3E5DE2", emptyProps)
)
"A FlowGraphBuilder" should "extract a valid Edge Map from the planned Flow" in {
val fgb = newFlowGraphBuilder
fgb.extractVerticesAndEdgesFromDot(graphDotString)
fgb.edgeMap should be (expectedEdgeMap)
}
"A FlowGraphBuilder" should "extract a valid StepStatus map from the planned Flow" in {
val fgb = newFlowGraphBuilder
fgb.extractVerticesAndEdgesFromDot(graphDotString)
fgb.stepStatusMap should be (expectedStepStatusMap)
}
def newFlowGraphBuilder: FlowGraphBuilder = {
new FlowGraphBuilder(null, mutable.Map[String, StepStatus](), mutable.Map[String, Set[Int]](), true)
}
}
| etsy/Sahale | flowtracker/src/test/scala/FlowGraphBuilderSpec.scala | Scala | mit | 1,778 |
package org.alcaudon.core.sources
import java.io.InputStream
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.twitter.hbc.ClientBuilder
import com.twitter.hbc.common.DelimitedStreamReader
import com.twitter.hbc.core.Constants
import com.twitter.hbc.core.endpoint.StatusesSampleEndpoint
import com.twitter.hbc.core.processor.HosebirdMessageProcessor
import com.twitter.hbc.httpclient.BasicClient
import com.twitter.hbc.httpclient.auth.{OAuth1 => TwitterOAuth1}
import org.alcaudon.core.RawRecord
object TwitterSourceConfig {
case class OAuth1(consumerKey: String,
consumerSecret: String,
token: String,
tokenSecret: String) {
def toTwitter(): TwitterOAuth1 =
new TwitterOAuth1(consumerKey, consumerSecret, token, tokenSecret)
}
}
case class TwitterSource(credentials: TwitterSourceConfig.OAuth1)
extends SourceFunc
with TimestampExtractor {
@transient val waitLock = new Object()
@transient var client: BasicClient = null
@transient val mapper = new ObjectMapper()
def run(): Unit = {
val endpoint = new StatusesSampleEndpoint()
endpoint.stallWarnings(false)
endpoint.delimited(false)
client = new ClientBuilder()
.name("alcaudon-twitter-source")
.hosts(Constants.STREAM_HOST)
.endpoint(endpoint)
.authentication(credentials.toTwitter())
.processor(new HosebirdMessageProcessor() {
var reader: DelimitedStreamReader = null
val mapper = new ObjectMapper()
override def setup(inputStream: InputStream): Unit = {
reader = new DelimitedStreamReader(inputStream,
Constants.DEFAULT_CHARSET,
50000)
}
override def process(): Boolean = {
try {
val line = reader.readLine()
ctx.collect(RawRecord(line.getBytes(), extractTimestamp(line)))
true
} catch {
case e: Exception =>
println(s"error --->$e")
false
}
}
})
.build()
client.connect()
running = true
while (running) {
waitLock.synchronized {
waitLock.wait(100L)
}
}
}
override def cancel: Unit = {
running = false
if (client != null) client.stop()
waitLock.synchronized {
waitLock.notify()
}
}
override def extractTimestamp(rawRecord: String): Long = {
val json = mapper.readValue(rawRecord, classOf[JsonNode])
if (json.has("timestamp_ms"))
json.get("timestamp_ms").asLong()
else
System.currentTimeMillis()
}
}
| fcofdez/alcaudon | src/main/scala/org/alcaudon/core/sources/TwitterSource.scala | Scala | apache-2.0 | 2,685 |
package controllers
import javax.inject.Inject
import be.objectify.deadbolt.scala.{ActionBuilders, DeadboltActions}
import be.objectify.deadbolt.scala.cache.HandlerCache
import play.api.mvc.{Action, Controller}
import security.{HandlerKeys, MyAlternativeDynamicResourceHandler, MyDeadboltHandler}
import views.html.accessOk
/**
*
* @author Steve Chaloner ([email protected])
*/
class DynamicRestrictionsController @Inject() (deadbolt: DeadboltActions, handlers: HandlerCache, actionBuilder: ActionBuilders) extends Controller
{
def pureLuck = deadbolt.Dynamic(name = "pureLuck") {
Action {
Ok(accessOk())
}
}
def noWayJose = deadbolt.Dynamic(name = "pureLuck", handler= handlers(HandlerKeys.altHandler)) {
Action {
Ok(accessOk())
}
}
def pureLuck_fromBuilder = actionBuilder.DynamicAction(name = "pureLuck").defaultHandler() {
Ok(accessOk())
}
def noWayJose_fromBuilder = actionBuilder.DynamicAction(name = "pureLuck").key(HandlerKeys.altHandler) {
Ok(accessOk())
}
}
| play2-maven-plugin/play2-maven-test-projects | play24/external-modules/deadbolt/scala/app/controllers/DynamicRestrictionsController.scala | Scala | apache-2.0 | 1,542 |
package spark.deploy
object ExecutorState
extends Enumeration("LAUNCHING", "LOADING", "RUNNING", "KILLED", "FAILED", "LOST") {
val LAUNCHING, LOADING, RUNNING, KILLED, FAILED, LOST = Value
type ExecutorState = Value
def isFinished(state: ExecutorState): Boolean = Seq(KILLED, FAILED, LOST).contains(state)
}
| ankurdave/arthur | core/src/main/scala/spark/deploy/ExecutorState.scala | Scala | bsd-3-clause | 320 |
/*
* The Bluejelly project, Copyright 2012.
*
* This source code is distributed under the terms of
* the BSD license, see the LICENSE file for details.
*/
package bluejelly.l4
import java.io.StringWriter
import scala.text.Document
import scala.text.Document.group
import scala.text.Document.nest
import scala.text.Document.text
import scala.util.parsing.input.Position
import bluejelly.utils.Errors
/**
* Custom error bag.
* @author ppedemon
*/
private class L4Errors extends Errors(false) {
private def quote[T](v:T):String = "`%s'" format v
private def ppr(d:Document):String = {
val s = new StringWriter
d.format(75, s)
s.flush()
s.toString
}
private def gnest(d:Document):Document = group(nest(2,d))
private def pprList[T](xs:List[T]):Document = text(xs mkString ("[",",","]"))
def dupDataCon(d:DataDecl, prev:DataDecl) {
val doc = gnest(
gnest("duplicated data declaration" :/: quote(d.ref) :/: "at:" :/: text(d.pos.toString)) :/:
gnest("(previous declaration was at:" :/: prev.pos.toString :: text(")")))
error(d.pos, ppr(doc))
}
def dupExtern(e:ExtDecl, prev:ExtDecl) {
val doc = gnest(
gnest("duplicated extern declaration" :/: quote(e.n) :/: "at:" :/: text(e.pos.toString)) :/:
gnest("(previous declaration was at:" :/: prev.pos.toString :: text(")")))
error(e.pos, ppr(doc))
}
def localExtern(e:ExtDecl) {
val doc = gnest("extern declaration for local function" :/: quote(e.n)
:/: "at:" :/: text(e.pos.toString))
error(e.pos, ppr(doc))
}
def dupFun(f:FunDecl, prev:FunDecl) {
val doc = gnest(
gnest("duplicated function declaration" :/: quote(f.n) :/: "at:" :/: text(f.pos.toString)) :/:
gnest("(previous declaration was at:" :/: prev.pos.toString :: text(")")))
error(f.pos, ppr(doc))
}
private def wrongPat(f:FunDecl, p:Pat, d:Document) = {
val doc = gnest(d :/:
gnest(
group("in pattern:" :/: PrettyPrinter.ppr(p)) :/:
group("at:" :/: text(p.pos.toString))) :/:
gnest(
group("in function:" :/: text(f.n.toString)) :/:
group("at:" :/: text(f.pos.toString))))
error(p.pos, ppr(doc))
}
def undefPat(f:FunDecl, p:Pat, c:ConRef) {
val msg = gnest("undefined constructor:" :/: text(quote(c)))
wrongPat(f, p, msg)
}
def nonLinearPat(f:FunDecl, p:Pat, vs:List[Var]) {
wrongPat(f, p, gnest(text("non-linear variable(s):") :/: pprList(vs)))
}
def unsaturatedPat(f:FunDecl, p:Pat, c:ConRef, over:Boolean) {
val s = "%ssaturated constructor" format (if (over) "over" else "un")
wrongPat(f, p, gnest(s :/: text(quote(c))))
}
def nonLinearParams(f:FunDecl, vs:List[Var]) {
val doc = gnest(
gnest("non-linear parameter(s):" :/: pprList(vs)) :/:
gnest("in function:" :/: quote(f.n) :/: "at:" :/: text(f.pos.toString)))
error(f.pos, ppr(doc))
}
private def genExprMsg(act:(Position,String)=>Unit)(f:FunDecl, expr:Expr, d:Document) {
val doc = gnest(d :/:
gnest(
group("in expression:" :/: PrettyPrinter.ppr(expr)) :/:
group("at:" :/: text(expr.pos.toString))) :/:
gnest(
group("in function:" :/: text(f.n.toString)) :/:
group("at:" :/: text(f.pos.toString))))
act(expr.pos, ppr(doc))
}
def wrongExpr = genExprMsg(error)_
def fishyExpr = genExprMsg(warning)_
def undefDataCon(f:FunDecl, expr:Expr, c:ConRef) {
wrongExpr(f, expr, gnest("undefined data constructor" :/: text(quote(c))))
}
def unsaturatedDataCon(f:FunDecl, expr:Expr, c:ConRef, over:Boolean) {
val s = "%ssaturated constructor" format (if (over) "over" else "un")
wrongExpr(f, expr, gnest(s :/: text(quote(c))))
}
def nonAtomicExpr(f:FunDecl, parent:Expr, expr:Expr) {
wrongExpr(f, parent,
gnest("unexpected non-atomic expression" :/: PrettyPrinter.ppr(expr)))
}
def invalidLit(f:FunDecl, parent:Expr, expr:Expr) {
wrongExpr(f, parent,
gnest("literal" :/: PrettyPrinter.ppr(expr)) :/:
text("is not a recursive expression"))
}
def invalidCon(f:FunDecl, parent:Expr, expr:Expr) {
wrongExpr(f, parent,
gnest("zero-ary constructor" :/: PrettyPrinter.ppr(expr) :/:
text("is not a recursive expression")))
}
def dupRecDecls(f:FunDecl, parent:Expr, vs:List[Var]) {
val decl = "declaration" + (if (vs.length > 1) "s:" else ":")
val d = gnest(("duplicated " + decl) :/: text(vs.mkString("[",",","]")))
wrongExpr(f, parent, d)
}
def undefVar(f:FunDecl, expr:Expr, v:Var) {
wrongExpr(f, expr, gnest("undefined variable" :/: text(quote(v))))
}
def multipleDefaults(f:FunDecl, expr:Expr, alts:List[Alt]) {
val a = (alts.tail foldLeft (PrettyPrinter.pprAlt(alts.head)))((d,a) =>
d :/: group("and:" :/: PrettyPrinter.pprAlt(a)))
val d = gnest(text("mutltiple default alternatives:") :/: group(a))
wrongExpr(f, expr, d)
}
def ambiguousMatch(f:FunDecl, expr:Expr) {
val d = text("Ambiguous match expression")
wrongExpr(f, expr, d)
}
def wrongType(f:FunDecl, expr:Expr, alt:Alt) {
val d = gnest("incompatible alternative:" :/: PrettyPrinter.pprAlt(alt))
wrongExpr(f, expr, d)
}
def duplicatedAlt(f:FunDecl, expr:Expr, alt:Alt) {
val d = gnest("duplicated alternative:" :/: PrettyPrinter.pprAlt(alt))
fishyExpr(f, expr, d)
}
def unreachableAlts(f:FunDecl, expr:Expr, alt:Alt) {
val d = gnest("unreachable alternatives after:" :/: PrettyPrinter.pprAlt(alt))
fishyExpr(f, expr, d)
}
def undefExt(f:FunDecl, expr:Expr, v:Var) {
val d = gnest("undeclared extern" :/: text(quote(v))) :/:
text("(the compiler will generate suboptimal code for this call)")
fishyExpr(f, expr, d)
}
}
/**
* Simple static analysis phase.
* @author ppedemon
*/
class StaticAnalysis(m:Module) {
private val err = new L4Errors()
/**
* Answer if we found validation errors.
*/
def hasErrors = err.hasErrors
/**
* Analyze the module passed in the class constructor.
*/
def analyze[T >: Errors]:Either[T,Env] = {
val env = collectDecls(m)
if (hasErrors) return Left(err)
m.decls foreach {
case f@FunDecl(_,_,_) => analyzeFun(env)(f)
case _ => ()
}
if (hasErrors) return Left(err) else Right(env)
}
// Check for over or unsaturated constructors
private def arity(env:Env, c:ConRef):Int = env(c).con.arity
// Get repeated elements in the given list.
// If every element is unique, return Nil.
private def repeated[T](xs:List[T]):List[T] = xs diff (xs.distinct)
// Is the given expression a literal?
private def isLit(expr:Expr):Boolean = expr match {
case ELit(_) => true
case Note(_,e) => isLit(e)
case _ => false
}
// Id the given expression a zero-ary constructor?
private def isZCon(expr:Expr):Boolean = expr match {
case ECon(c,args) if args.length == 0 => true
case Note(_,e) => isZCon(e)
case _ => false
}
// Check that all the expressions in es are valid "recursive" expressions
// E.g., literals and zero-ary constructors can be recursive
private def allRec(f:FunDecl, parent:Expr, es:List[Expr]):Boolean = es match {
case Nil => true
case expr::es if isLit(expr) => err invalidLit(f, parent, expr); false
case expr::ex if isZCon(expr) => err invalidCon(f, parent, expr); false
case _::es => allRec(f, parent, es)
}
// Check that let rec declarations are unique
private def uniqueRecDecls(f:FunDecl, parent:Expr, vs:List[Var]):Boolean = {
val m = vs groupBy identity
val rs = (m foldLeft (Nil:List[Var])) {case (rs,(v,vs)) =>
if (vs.length > 1) v::rs else rs
}
val unique = rs.isEmpty
if (!unique) err dupRecDecls(f, parent, rs)
unique
}
// Is the given expression atomic?
private def isAtom(expr:Expr):Boolean = expr match {
case Eval(_,_,_) | Match(_,_) => false
case Note(_,e) => isAtom(e)
case Let(_,_,e) => isAtom(e)
case LetRec(_,e) => isAtom(e)
case _ => true
}
// Check that all the expressions in es are atomic,
// giving proper error messages if not
private def allAtoms(f:FunDecl, parent:Expr, es:List[Expr]):Boolean = es match {
case Nil => true
case expr::es if isAtom(expr) => allAtoms (f, parent, es)
case expr::_ => err nonAtomicExpr (f, parent, expr); false
}
// Collect top-level declarations, checking for duplicates
private def collectDecls(m:Module):Env = {
(Env(m.n) /: m.decls) ((env,d) => d match {
case d@DataDecl(c,_) if env hasDataCon c => err dupDataCon (d,env(c)); env
case d@DataDecl(_,_) => env addDataCon d
case e@ExtDecl(v,_) if env hasExtern(v) => err dupExtern(e,env ext v); env
case e@ExtDecl(v,_) if env isLocalId(v) => err localExtern(e); env
case e@ExtDecl(_,_) => env addExtern(e)
case f@FunDecl(v,_,_) if env hasFun v => err dupFun(f, env(v)); env
case f@FunDecl(_,_,_) => env addFun f
})
}
// Check if the given path is valid
private def analyzePat(env:Env, f:FunDecl)(p:Pat):(Env,Boolean) = p match {
case PLit(_) => (env,true)
case PVar(v) => (env addLocal v,true)
case PCon(c,_) if !(env hasDataCon c) => err undefPat (f,p,c); (env,false)
case PCon(c,args) if args.length != arity(env,c) => {
val a = arity(env,c)
err unsaturatedPat (f, p, c, args.length > a)
(env,false)
}
case PCon(c,vs) => repeated(vs) match {
case Nil => (env addLocals vs,true)
case vs => err nonLinearPat (f,p,vs); (env,false)
}
}
// Analyze an application
private def analyzeApp(env:Env, f:FunDecl, parent:Expr, fun:Var, args:List[Expr]) {
if (env.isLocalId(fun) && !env.inScope(fun)) err undefVar (f, parent, fun)
if (!env.isLocalId(fun) && !env.hasExtern(fun)) err undefExt(f, parent, fun)
val argsOk = allAtoms(f, parent, args)
// Treat arg variables specially, so we get better error messages
if (argsOk) args foreach {
case App(v,Nil) => analyzeApp(env, f, parent, v,Nil)
case expr => analyzeExpr(env,f)(expr)
}
}
// Check that there is at most one default alternative
private def defOk(f:FunDecl, expr:Expr, alts:List[Alt]):Boolean = {
val (vs,as) = alts.partition(_.isVarAlt)
if (vs.length > 1) err multipleDefaults(f, expr, vs)
if (as.isEmpty) err ambiguousMatch(f,expr)
vs.length <= 1
}
// Check that types are compatible in the given list of alternatives
private def typesOk(f:FunDecl, expr:Expr, alts:List[Alt]):Boolean = {
def check(alts:List[Alt]):Boolean = alts match {
case Nil => true
case List(a) => true
case a::b::as if Pat.sameType(a.p,b.p) => check(b::as)
case _::b::_ => err wrongType (f, expr, b); false
}
check(alts filter {!_.isVarAlt})
}
// Check that there are no duplicated in the given list of alternatives
private def dupsOk(env:Env, f:FunDecl, expr:Expr, alts:List[Alt]):Boolean = {
def check(alts:List[Alt], values:Set[Any]):Boolean = alts match {
case Nil => true
case a::as => a.p match {
case PVar(_) => check(as, values)
case PCon(c,_) if (env hasDataCon c) && (values contains c) =>
err duplicatedAlt(f, expr, a); false
case PCon(c,_) => check(as, values + c)
case PLit(x) if values contains Lit.value(x) =>
err duplicatedAlt(f, expr, a); false
case PLit(x) => check(as, values + Lit.value(x))
}
}
check(alts, Set())
}
// Check that all alternatives are reachable
private def reachableOk(f:FunDecl, expr:Expr, alts:List[Alt]):Boolean = alts match {
case Nil => true
case List(a) => true
case a::_ if a.isVarAlt => err unreachableAlts (f, expr, a); false
case _::as => reachableOk(f, expr, as)
}
// Analyze a case alternative
private def analyzeAlt(env:Env, f:FunDecl)(alt:Alt) {
val (extEnv,patOk) = analyzePat(env, f)(alt.p)
if (patOk) analyzeExpr(extEnv, f)(alt.e)
}
// Analyze a list of case alternatives
private def analyzeAlts(env:Env, f:FunDecl, expr:Expr, alts:List[Alt]) {
val dOk = defOk(f, expr, alts)
if (!dOk) return
val tysOk = typesOk(f, expr, alts)
if (!tysOk) return;
val reachOk = reachableOk(f, expr, alts)
if (reachOk) dupsOk(env, f, expr, alts)
alts foreach analyzeAlt(env,f)
}
// Validate an expression
private def analyzeExpr(env:Env, f:FunDecl)(expr:Expr):Unit = expr match {
case ELit(_) =>
case App(fun,args) => analyzeApp(env, f, expr, fun, args)
case NApp(fun,args) => analyzeApp(env, f, expr, fun, args)
case Note(_,e) => analyzeExpr(env,f)(e)
case con@ECon(c,args) => {
if (!(env hasDataCon c)) { err undefDataCon (f,expr,c); return }
val a = arity(env,c)
if (args.length != a) err unsaturatedDataCon(f, expr, c, args.length > a)
val argsOk = allAtoms(f, expr, args)
if (argsOk) args foreach analyzeExpr(env,f)
}
case Let(v,e,b) => {
if (!isAtom(e)) { err nonAtomicExpr(f, expr, e); return }
analyzeExpr(env,f)(e)
analyzeExpr(env addLocal v, f)(b)
}
case Eval(v,e,b) => {
analyzeExpr(env,f)(e)
analyzeExpr(env addLocal v, f)(b)
}
case LetRec(decls,b) => {
val (vs,es) = (decls.unzip)
if (!uniqueRecDecls(f,expr,vs)) return
if (!allAtoms(f,expr,es)) return
if (!allRec(f,expr,es)) return
val extEnv = env addLocals vs
es foreach analyzeExpr(extEnv,f)
analyzeExpr(extEnv,f)(b)
}
case Match(v,alts) => {
if (!(env inScope v)) {
err undefVar (f, expr, v)
return
}
analyzeAlts(env, f, expr, alts)
}
}
// Validate a function
private def analyzeFun(env:Env)(f:FunDecl) {
val rep = repeated(f.args)
if (!rep.isEmpty) err nonLinearParams (f,rep)
analyzeExpr(env addLocals(f.args), f)(f.body)
}
}
/**
* Associated static object for the static analyzer. Provides a
* convenient entry point for triggering the analysis.
*
* @author ppedemon
*/
object StaticAnalysis {
/**
* Do static analysis on the given module.
*/
def analyze(m:Module) = new StaticAnalysis(m).analyze
}
| ppedemon/Bluejelly | bluejelly-l4/src/main/scala/bluejelly/l4/StaticAnalysis.scala | Scala | bsd-3-clause | 14,289 |
package com.github.fellowship_of_the_bus
package tdtd
package game
import scala.collection.mutable.Set
import scala.util.Random
import scala.math._
abstract class AI {
def pick(r: Float, c: Float, enemies: Set[Enemy]) : Enemy
def pick(r: Float, c: Float, enemiesU: Set[Enemy],
enemiesD: Set[Enemy], enemiesL: Set[Enemy], enemiesR: Set[Enemy]) : Enemy = {
throw new IllegalArgumentException
}
override def toString() : String
}
class RandomAI extends AI {
val rand = new Random()
override def pick(r: Float, c: Float, enemies: Set[Enemy]) : Enemy = {
enemies.maxBy(x => rand.nextInt())
}
override def toString() : String = "Random"
}
class ClosestAI extends AI {
override def pick(r: Float, c: Float, enemies: Set[Enemy]) : Enemy = {
enemies.minBy(enemy => {
val ydiff = r - enemy.r
val xdiff = c - enemy.c
sqrt((xdiff * xdiff) + (ydiff * ydiff))
}
)
}
override def toString() : String = "Closest to Tower"
}
class ClosestToGoalAI extends AI {
override def pick(r: Float, c: Float, enemies: Set[Enemy]) : Enemy = {
enemies.minBy(enemy => enemy.place.dist)
}
override def toString() : String = "Closest to Goal"
}
class SteamRandomAI extends AI {
val rand = new Random()
override def pick(r: Float, c: Float, enemies: Set[Enemy]): Enemy = {
throw new IllegalArgumentException
}
override def pick(r: Float, c: Float, enemiesU: Set[Enemy],
enemiesD: Set[Enemy], enemiesL: Set[Enemy], enemiesR: Set[Enemy]) : Enemy = {
val enemies = enemiesU ++ enemiesL ++ enemiesD ++ enemiesR
enemies.maxBy(x => rand.nextInt())
}
override def toString() : String = "Random Direction"
}
class SteamClosestAI extends AI {
override def pick(r: Float, c: Float, enemies: Set[Enemy]): Enemy = {
throw new IllegalArgumentException
}
override def pick(r: Float, c: Float, enemiesU: Set[Enemy],
enemiesD: Set[Enemy], enemiesL: Set[Enemy], enemiesR: Set[Enemy]) : Enemy = {
val enemies = enemiesU ++ enemiesL ++ enemiesD ++ enemiesR;
enemies.minBy(enemy => {
val ydiff = r - enemy.r
val xdiff = c - enemy.c
sqrt((xdiff * xdiff) + (ydiff * ydiff))
}
)
}
override def toString() : String = "Closest To Tower"
}
class SteamClosestToGoalAI extends AI {
override def pick(r: Float, c: Float, enemies: Set[Enemy]): Enemy = {
throw new IllegalArgumentException
}
override def pick(r: Float, c: Float, enemiesU: Set[Enemy],
enemiesD: Set[Enemy], enemiesL: Set[Enemy], enemiesR: Set[Enemy]) : Enemy = {
val enemies = enemiesU ++ enemiesL ++ enemiesD ++ enemiesR;
enemies.minBy(enemy => enemy.place.dist)
}
override def toString() : String = "Closest To Goal"
} | Fellowship-of-the-Bus/tdtd | src/main/scala/game/AI.scala | Scala | apache-2.0 | 2,642 |
package io.github.binaryfoo.lagotto
import org.scalatest.{Matchers, FlatSpec}
import io.github.binaryfoo.lagotto.LogFiles._
class LogFilesTest extends FlatSpec with Matchers {
"Log sequence number" should "use N in blah.N.log" in {
sequenceNumber(file("/some/directory/name.42.log")) shouldBe 42
sequenceNumber("/some/directory/name.42.log") shouldBe 42
}
it should "use N in blah.N.log.gz" in {
sequenceNumber(file("/some/directory/name.13.log.gz")) shouldBe 13
sequenceNumber("/some/directory/name.13.log.gz") shouldBe 13
}
it should "default to zero when missing" in {
sequenceNumber(file("/some/directory/name.log")) shouldBe 0
sequenceNumber(file("/some/directory/name.rubbish.log")) shouldBe 0
}
}
| binaryfoo/lagotto | src/test/scala/io/github/binaryfoo/lagotto/LogFilesTest.scala | Scala | mit | 745 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator
import java.util
import kafka.utils.nonthreadsafe
import scala.collection.Map
case class MemberSummary(memberId: String,
clientId: String,
clientHost: String,
metadata: Array[Byte],
assignment: Array[Byte])
/**
* Member metadata contains the following metadata:
*
* Heartbeat metadata:
* 1. negotiated heartbeat session timeout
* 2. timestamp of the latest heartbeat
*
* Protocol metadata:
* 1. the list of supported protocols (ordered by preference)
* 2. the metadata associated with each protocol
*
* In addition, it also contains the following state information:
*
* 1. Awaiting rebalance callback: when the group is in the prepare-rebalance state,
* its rebalance callback will be kept in the metadata if the
* member has sent the join group request
* 2. Awaiting sync callback: when the group is in the awaiting-sync state, its sync callback
* is kept in metadata until the leader provides the group assignment
* and the group transitions to stable
*/
@nonthreadsafe
private[coordinator] class MemberMetadata(val memberId: String,
val groupId: String,
val clientId: String,
val clientHost: String,
val sessionTimeoutMs: Int,
var supportedProtocols: List[(String, Array[Byte])]) {
var assignment: Array[Byte] = Array.empty[Byte]
var awaitingJoinCallback: JoinGroupResult => Unit = null
var awaitingSyncCallback: (Array[Byte], Short) => Unit = null
var latestHeartbeat: Long = -1
var isLeaving: Boolean = false
def protocols = supportedProtocols.map(_._1).toSet
/**
* Get metadata corresponding to the provided protocol.
*/
def metadata(protocol: String): Array[Byte] = {
supportedProtocols.find(_._1 == protocol) match {
case Some((_, metadata)) => metadata
case None =>
throw new IllegalArgumentException("Member does not support protocol")
}
}
/**
* Check if the provided protocol metadata matches the currently stored metadata.
*/
def matches(protocols: List[(String, Array[Byte])]): Boolean = {
if (protocols.size != this.supportedProtocols.size)
return false
for (i <- 0 until protocols.size) {
val p1 = protocols(i)
val p2 = supportedProtocols(i)
if (p1._1 != p2._1 || !util.Arrays.equals(p1._2, p2._2))
return false
}
return true
}
def summary(protocol: String): MemberSummary = {
MemberSummary(memberId, clientId, clientHost, metadata(protocol), assignment)
}
def summaryNoMetadata(): MemberSummary = {
MemberSummary(memberId, clientId, clientHost, Array.empty[Byte], Array.empty[Byte])
}
/**
* Vote for one of the potential group protocols. This takes into account the protocol preference as
* indicated by the order of supported protocols and returns the first one also contained in the set
*/
def vote(candidates: Set[String]): String = {
supportedProtocols.find({ case (protocol, _) => candidates.contains(protocol)}) match {
case Some((protocol, _)) => protocol
case None =>
throw new IllegalArgumentException("Member does not support any of the candidate protocols")
}
}
override def toString = {
"[%s,%s,%s,%d]".format(memberId, clientId, clientHost, sessionTimeoutMs)
}
}
| samaitra/kafka | core/src/main/scala/kafka/coordinator/MemberMetadata.scala | Scala | apache-2.0 | 4,462 |
/*
* Copyright (c) 2013-2014 TelefΓ³nica InvestigaciΓ³n y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.api.controllers
import play.api.mvc.Controller
import es.tid.cosmos.api.auth.multiauth.MultiAuthProviderComponent
import es.tid.cosmos.api.auth.request.RequestAuthenticationComponent
import es.tid.cosmos.api.email.EmailerComponent
import es.tid.cosmos.api.controllers.admin._
import es.tid.cosmos.api.controllers.admin.stats.StatsResource
import es.tid.cosmos.api.controllers.cluster.ClusterResource
import es.tid.cosmos.api.controllers.cosmos.CosmosResource
import es.tid.cosmos.api.controllers.infinity.{GroupMapperResource, InfinityAuthenticationResource}
import es.tid.cosmos.api.controllers.info.InfoResource
import es.tid.cosmos.api.controllers.pages.{AdminPage, Pages}
import es.tid.cosmos.api.controllers.profile.ProfileResource
import es.tid.cosmos.api.controllers.services.ServicesResource
import es.tid.cosmos.api.controllers.storage.StorageResource
import es.tid.cosmos.api.controllers.task.TaskResource
import es.tid.cosmos.api.profile.dao.CosmosDataStoreComponent
import es.tid.cosmos.api.report.ClusterReporter
import es.tid.cosmos.api.task.TaskDaoComponent
import es.tid.cosmos.api.usage.MachineUsageComponent
import es.tid.cosmos.common.ConfigComponent
import es.tid.cosmos.platform.ial.InfrastructureProviderComponent
import es.tid.cosmos.servicemanager.ServiceManagerComponent
/** Web application template to be mixed-in with its dependencies. */
abstract class Application {
this: ServiceManagerComponent
with InfrastructureProviderComponent
with MultiAuthProviderComponent
with CosmosDataStoreComponent
with MachineUsageComponent
with TaskDaoComponent
with MaintenanceStatusComponent
with RequestAuthenticationComponent
with EmailerComponent
with ConfigComponent =>
lazy val conf = this.config
lazy val reporter = new ClusterReporter(
conf.getString("email.environment"),
conf.getString("email.fromHost"),
conf.getString("email.reportToAddress"),
emailer
)
lazy val controllers: Map[Class[Controller], Controller] = {
val status = this.maintenanceStatus
val ial = this.infrastructureProvider
val multiAuthProvider = this.multiAuthProvider
val auth = apiRequestAuthentication
val infinityPort = conf.getInt("infinity.httpPort")
controllerMap(
// Pages
new Pages(
multiAuthProvider,
serviceManager,
reporter,
taskDao,
store,
status,
conf.getConfig("pages")),
new AdminPage(store, status),
new CliConfigResource(store),
// Non-authenticated user API
new CosmosResource(),
new ServicesResource(serviceManager),
// Authenticated user API
new StatsResource(auth, store, serviceManager, ial),
new InfoResource(auth, store, serviceManager, machineUsage),
new ProfileResource(auth, store),
new ClusterResource(auth, serviceManager, machineUsage, taskDao, store, status, reporter),
new StorageResource(auth, serviceManager, status, infinityPort),
new MaintenanceResource(auth, status),
new TaskResource(auth, taskDao),
// Infinity API
new InfinityAuthenticationResource(store, serviceManager, conf),
new GroupMapperResource(store, conf),
// Admin API
new UserResource(multiAuthProvider, serviceManager, store, status, reporter)
)
}
def services: ServiceManagerComponent
with MultiAuthProviderComponent
with MaintenanceStatusComponent
with TaskDaoComponent = this
private def controllerMap(controllers: Controller*) = Map(
(for (controller <- controllers)
yield controller.getClass.asInstanceOf[Class[Controller]] -> controller): _*)
}
| telefonicaid/fiware-cosmos-platform | cosmos-api/app/es/tid/cosmos/api/controllers/Application.scala | Scala | apache-2.0 | 4,313 |
package org.firesocks.net.ws.server
import akka.util.ByteString
import org.java_websocket.WebSocket
import org.java_websocket.handshake.ClientHandshake
sealed abstract class WSEvent extends Serializable
@SerialVersionUID(1)
case class WSError(conn: WebSocket, ex: Exception) extends WSEvent
@SerialVersionUID(1)
case class WSStringMessage(conn: WebSocket, message: String) extends WSEvent
@SerialVersionUID(1)
case class WSByteMessage(conn: WebSocket, message: ByteString) extends WSEvent
@SerialVersionUID(1)
case class WSClose(conn: WebSocket, code: Int, reason: String, remote: Boolean)
extends WSEvent
@SerialVersionUID(1)
case class WSOpen(conn: WebSocket, handshake: ClientHandshake) extends WSEvent
| fa08c/firesocks | modules/proxy/src/main/scala/org/firesocks/net/ws/server/WSEvent.scala | Scala | mit | 716 |
package P {
object X { val x = 1; val y = 2; }
}
package Q {
object X { val x = true; val y = "" }
}
| yusuke2255/dotty | tests/untried/neg/specification-scopes/P_1.scala | Scala | bsd-3-clause | 105 |
/*
* Copyright 2014β2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.server
import slamdata.Predef._
import scala.collection.Seq
import org.http4s.Uri.Authority
import org.http4s.{Method, Request, Status, Uri}
import org.http4s.server.syntax._
import scalaz._, Scalaz._
import scalaz.concurrent.Task
import shapeless.nat._
class ControlServiceSpec extends quasar.Qspec {
sequential // These tests spin up a server and there is potential for port conflicts if
// they don't run sequentially
val client = org.http4s.client.blaze.defaultClient
def withServerExpectingRestart[B](timeoutMillis: Long = 30000, initialPort: Int = 8888, defaultPort: Int = 8888)
(causeRestart: Uri => Task[Unit])(afterRestart: Task[B]): B = {
val uri = Uri(authority = Some(Authority(port = Some(initialPort))))
(for {
shutdown <- PortChangingServer.start(initialPort, reload => control.service(defaultPort, reload) orElse info.service)
b <- (causeRestart(uri) >> afterRestart).onFinish(_ => shutdown)
} yield b).unsafePerformSyncFor(timeoutMillis)
}
"Control Service" should {
def checkRunningOn(port: Int) = {
val req = Request(uri = Uri(authority = Some(Authority(port = Some(port)))), method = Method.GET)
client.fetch(req)(response => Task.now(response.status must_== Status.Ok))
}
"restart on new port when it receives a PUT with a new port number (even if it might not respond properly to the request)" in {
val Seq(startPort, newPort) = Http4sUtils.anyAvailablePorts[_2].unsafePerformSync.unsized
withServerExpectingRestart(initialPort = startPort){ baseUri: Uri =>
for {
req <- Request(uri = baseUri, method = Method.PUT).withBody(newPort.toString)
_ <- client.fetch(req)(Task.now).attempt // Currently, sometimes the server doesn't respond back to this request
// because it's already been killed which is why we call `attempt` and
// ignore the result in favor of making sure the server is now running
// on a new port
} yield ()
}{ checkRunningOn(newPort) }
}
"restart on default port when it receives a DELETE request (even if it might not respond properly to the request)" in {
val Seq(startPort, defaultPort) = Http4sUtils.anyAvailablePorts[_2].unsafePerformSync.unsized
withServerExpectingRestart(initialPort = startPort, defaultPort = defaultPort){ baseUri: Uri =>
val req = Request(uri = baseUri, method = Method.DELETE)
client.fetch(req)(Task.now).void.attempt.void // Currently, sometimes the server doesn't respond back to this request
// because it's already been killed which is why we call `attempt` and
// ignore the result in favor of making sure the server is now running
// on a new port
}{ checkRunningOn(defaultPort) }
}
}
}
| jedesah/Quasar | web/src/test/scala/quasar/server/ControlServiceSpec.scala | Scala | apache-2.0 | 3,726 |
sealed trait HZip[A <: HList, B <: HList, Result <: HList] {
def apply(a: A, b: B): Result
}
object HZip
{
import HList.::
implicit def hzipNil0: HZip[HNil, HNil, HNil] = new HZip[HNil, HNil, HNil] { def apply(a: HNil, b: HNil) = HNil }
implicit def hzipNil1[H, T <: HList]: HZip[HCons[H,T], HNil, HNil] = new HZip[HCons[H,T], HNil, HNil] { def apply(a: HCons[H,T], b: HNil) = HNil }
implicit def hzipNil2[H, T <: HList]: HZip[HNil, HCons[H,T], HNil] = new HZip[HNil, HCons[H,T], HNil] { def apply(a: HNil, b: HCons[H,T]) = HNil }
implicit def hzipCons[HA, HB, TA <: HList, TB <: HList, TR <: HList](implicit hzipTail: HZip[TA, TB, TR]): HZip[HA :: TA, HB :: TB, (HA, HB) :: TR] =
new HZip[HA :: TA, HB :: TB, (HA, HB) :: TR] {
def apply(a: HA :: TA, b: HB :: TB) = HCons( (a.head, b.head), hzipTail(a.tail, b.tail) )
}
} | harrah/up | HZip.scala | Scala | bsd-3-clause | 835 |
package models
import slick.driver.MySQLDriver.api._
import slick.lifted.{ Tag => SlickTag }
case class Tag(
id: Int,
url: String,
title: String) extends KeyedEntity
class TagTable(tag: SlickTag)
extends Table[Tag](tag, "tag") with KeyedEntityTable {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def url = column[String]("url")
def title = column[String]("title")
def * = (id, url, title) <> (Tag.tupled, Tag.unapply)
}
object Tags extends TableQuery(new TagTable(_)) with BaseTableQuery
| metaxmx/pm15 | app/models/Tag.scala | Scala | apache-2.0 | 520 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools.ingest
import java.util.Collections
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.JobStatus
import org.apache.hadoop.tools.{DistCp, DistCpOptions}
import org.locationtech.geomesa.tools.Command
import org.locationtech.geomesa.tools.utils.StatusCallback
object StorageJobUtils extends LazyLogging {
def distCopy(srcRoot: Path, destRoot: Path, statusCallback: StatusCallback): Boolean = {
statusCallback.reset()
Command.user.info("Submitting DistCp job - please wait...")
val opts = distCpOptions(srcRoot, destRoot)
val job = new DistCp(new Configuration, opts).execute()
Command.user.info(s"Tracking available at ${job.getStatus.getTrackingUrl}")
// distCp has no reduce phase
while (!job.isComplete) {
if (job.getStatus.getState != JobStatus.State.PREP) {
statusCallback(s"DistCp: ", job.mapProgress(), Seq.empty, done = false)
}
Thread.sleep(1000)
}
statusCallback(s"DistCp: ", job.mapProgress(), Seq.empty, done = true)
val success = job.isSuccessful
if (success) {
Command.user.info(s"Successfully copied data to $destRoot")
} else {
Command.user.error(s"Failed to copy data to $destRoot")
}
success
}
private def distCpOptions(src: Path, dest: Path): DistCpOptions =
try { distCpOptions3(src, dest) } catch { case _: ClassNotFoundException => distCpOptions2(src, dest) }
// hadoop 3 API
private def distCpOptions3(src: Path, dest: Path): DistCpOptions = {
val clas = Class.forName("org.apache.hadoop.tools.DistCpOptions.Builder")
val constructor = clas.getConstructor(classOf[java.util.List[Path]], classOf[Path])
val builder = constructor.newInstance(Collections.singletonList(src), dest)
clas.getMethod("withAppend", classOf[Boolean]).invoke(builder, java.lang.Boolean.FALSE)
clas.getMethod("withOverwrite", classOf[Boolean]).invoke(builder, java.lang.Boolean.TRUE)
clas.getMethod("withCopyStrategy", classOf[String]).invoke(builder, "dynamic")
clas.getMethod("build").invoke(builder).asInstanceOf[DistCpOptions]
}
// hadoop 2 API
private def distCpOptions2(src: Path, dest: Path): DistCpOptions = {
val constructor = classOf[DistCpOptions].getConstructor(classOf[java.util.List[Path]], classOf[Path])
val opts = constructor.newInstance(Collections.singletonList(src), dest)
classOf[DistCpOptions].getMethod("setAppend", classOf[Boolean]).invoke(opts, java.lang.Boolean.FALSE)
classOf[DistCpOptions].getMethod("setOverwrite", classOf[Boolean]).invoke(opts, java.lang.Boolean.TRUE)
classOf[DistCpOptions].getMethod("setCopyStrategy", classOf[String]).invoke(opts, "dynamic")
opts
}
}
| ccri/geomesa | geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/ingest/StorageJobUtils.scala | Scala | apache-2.0 | 3,287 |
package test
import org.junit.{Assert, Test}
class SamplePhaseTest extends DottyTest {
// Disabled, awaiting resolution: @Test
def testTypechekingSimpleClass = checkCompile("frontend", "class A{}") {
(tree, context) =>
implicit val ctx = context
Assert.assertTrue("can typecheck simple class",
tree.toString == "PackageDef(Ident(<empty>),List(TypeDef(Modifiers(,,List()),A,Template(DefDef(Modifiers(,,List()),<init>,List(),List(List()),TypeTree[TypeRef(ThisType(module class scala),Unit)],EmptyTree),List(Apply(Select(New(TypeTree[TypeRef(ThisType(module class lang),Object)]),<init>),List())),ValDef(Modifiers(private,,List()),_,EmptyTree,EmptyTree),List()))))"
)
}
}
| yusuke2255/dotty | test/test/SamplePhaseTest.scala | Scala | bsd-3-clause | 709 |
/*
* Copyright 2012-2015 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.aspectj
import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.comcast.money.annotations.{ TracedData, Timed, Traced }
import com.comcast.money.core.Money
import com.comcast.money.emitters.LogRecord
import com.comcast.money.internal.MDCSupport
import org.aspectj.lang.ProceedingJoinPoint
import org.mockito.Mockito._
import org.scalatest._
import org.scalatest.mock.MockitoSugar
import scala.concurrent.duration._
class TraceAspectSpec extends TestKit(ActorSystem("money", Money.config.getConfig("money.akka")))
with WordSpecLike with GivenWhenThen with OneInstancePerTest with BeforeAndAfterEach with Matchers with MockitoSugar {
@Traced("methodWithArguments")
def methodWithArguments(@TracedData("foo") foo: String, @TracedData("CUSTOM_NAME") bar: String) = {
Thread.sleep(50)
}
@Traced("methodWithoutArguments")
def methodWithoutArguments() = {
Thread.sleep(50)
}
@Traced("methodThrowingException")
def methodThrowingException() = {
Thread.sleep(50)
throw new RuntimeException("test failure")
}
@Traced("methodThrowingExceptionWithNoMessage")
def methodThrowingExceptionWithNoMessage() = {
Thread.sleep(50)
throw new RuntimeException()
}
@Traced("methodWithArgumentsPropagated")
def methodWithArgumentsPropagated(
@TracedData(value = "PROPAGATE", propagate = true) foo: String,
@TracedData("CUSTOM_NAME") bar: String
) = {
Thread.sleep(50)
methodWithoutArguments()
}
@Traced(
value = "methodWithIgnoredException",
ignoredExceptions = Array(classOf[IllegalArgumentException])
)
def methodWithIgnoredException() = {
throw new IllegalArgumentException("ignored")
}
@Traced(
value = "methodWithNonMatchingIgnoredException",
ignoredExceptions = Array(classOf[IllegalArgumentException])
)
def methodWithNonMatchingIgnoredException() = {
throw new RuntimeException("not-ignored")
}
@Timed("methodWithTiming")
def methodWithTiming() = {
Thread.sleep(50)
}
def expectLogMessageContaining(contains: String, wait: FiniteDuration = 2.seconds) {
awaitCond(
LogRecord.contains("log")(_.contains(contains)), wait, 100 milliseconds,
s"Expected log message containing string $contains not found after $wait"
)
}
def expectLogMessageContainingStrings(strings: Seq[String], wait: FiniteDuration = 2.seconds) {
awaitCond(
LogRecord.contains("log")(s => strings.forall(s.contains)), wait, 100 milliseconds,
s"Expected log message containing $strings not found after $wait"
)
}
val mockMdcSupport = mock[MDCSupport]
object testTraceAspect extends TraceAspect {
override val mdcSupport: MDCSupport = mockMdcSupport
}
override def beforeEach() = {
reset(mockMdcSupport)
}
"TraceAspect" when {
"advising methods by tracing them" should {
"handle methods that have no arguments" in {
Given("a method that has the tracing annotation but has no arguments")
When("the method is invoked")
methodWithoutArguments()
Then("the method execution is traced")
expectLogMessageContaining("methodWithoutArguments")
And("the result of success is captured")
expectLogMessageContaining("span-success=true")
}
"complete the trace for methods that throw exceptions" in {
Given("a method that throws an exception")
When("the method is invoked")
a[RuntimeException] should be thrownBy {
methodThrowingException()
}
Then("the method execution is logged")
expectLogMessageContaining("methodThrowingException")
And("a span-success is logged with a value of true")
expectLogMessageContaining("span-success=true")
}
"complete the trace with success for methods that throw ignored exceptions" in {
Given("a method that throws an ignored exception")
When("the method is invoked")
an[IllegalArgumentException] should be thrownBy {
methodWithIgnoredException()
}
Then("the method execution is logged")
expectLogMessageContaining("methodWithIgnoredException")
And("a span-success is logged with a value of true")
expectLogMessageContaining("span-success=true")
}
"complete the trace with failure for methods that throw exceptions that are not in ignored list" in {
Given("a method that throws an ignored exception")
When("the method is invoked")
a[RuntimeException] should be thrownBy {
methodWithNonMatchingIgnoredException()
}
Then("the method execution is logged")
expectLogMessageContaining("methodWithNonMatchingIgnoredException")
And("a span-success is logged with a value of false")
expectLogMessageContaining("span-success=false")
}
}
"advising methods that have parameters with the TracedData annotation" should {
"record the value of the parameter in the trace" in {
Given("a method that has arguments with the TraceData annotation")
When("the method is invoked")
methodWithArguments("hello", "bob")
Then("The method execution is logged")
expectLogMessageContaining("methodWithArguments")
And("the values of the arguments that have the TracedData annotation are logged")
expectLogMessageContaining("hello")
And(
"the values of the arguments that have a custom name for the TracedData annotation log using the custom name"
)
expectLogMessageContaining("CUSTOM_NAME=bob")
}
"record parameters whose value is null" in {
Given("a method that has arguments with the TraceData annotation")
When("the method is invoked with a null value")
methodWithArguments(null, null)
Then("The method execution is logged")
expectLogMessageContaining("methodWithArguments")
And("the parameter values are captured")
expectLogMessageContaining("foo=")
expectLogMessageContaining("CUSTOM_NAME=")
}
"propagate traced data parameters" in {
Given("a method that has arguments with the TracedData annotation")
And("one of those arguments is set to propagate")
And("the method calls another method that is also traced")
When("the method is invoked")
methodWithArgumentsPropagated("boo", "far")
Then("the main method execution is logged")
expectLogMessageContainingStrings(Seq("methodWithArgumentsPropagated", "PROPAGATE=boo", "CUSTOM_NAME=far"))
And("the child span has the propagated parameters")
expectLogMessageContainingStrings(Seq("methodWithoutArguments", "PROPAGATE=boo"))
}
}
"timing method execution" should {
"record the execution time of a method that returns normally" in {
Given("a trace exists")
Money.tracer.startSpan("test-timing")
And("a method that has the Timed annotation")
When("the method is called")
methodWithTiming()
And("the trace is stopped")
Money.tracer.stopSpan()
Then("a message is logged containing the duration of the method execution")
expectLogMessageContaining("methodWithTiming")
}
}
"testing pointcuts" should {
"love us some code coverage" in {
val traceAspect = new TraceAspect()
traceAspect.traced(null)
traceAspect.timed(null)
}
}
"advising methods" should {
"set span name in MDC" in {
val jp = mock[ProceedingJoinPoint]
val ann = mock[Traced]
doReturn("testSpanName").when(ann).value()
doReturn(None).when(mockMdcSupport).getSpanNameMDC
testTraceAspect.adviseMethodsWithTracing(jp, ann)
verify(mockMdcSupport).setSpanNameMDC(Some("testSpanName"))
verify(mockMdcSupport).setSpanNameMDC(None)
}
"save the current span name and reset after the child span is complete" in {
val jp = mock[ProceedingJoinPoint]
val ann = mock[Traced]
doReturn("testSpanName").when(ann).value()
doReturn(Some("parentSpan")).when(mockMdcSupport).getSpanNameMDC
testTraceAspect.adviseMethodsWithTracing(jp, ann)
verify(mockMdcSupport).setSpanNameMDC(Some("parentSpan"))
}
}
}
}
| ipapa/money | money-aspectj/src/test/scala/com/comcast/money/aspectj/TraceAspectSpec.scala | Scala | apache-2.0 | 9,013 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.serializer
import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.sql.Types
import com.esotericsoftware.kryo.io.{Input, KryoObjectInput, KryoObjectOutput, Output}
import com.esotericsoftware.kryo.{Kryo, KryoException, Serializer => KryoClassSerializer}
import org.apache.spark.sql.PartitionResult
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.row.{GemFireXDClientDialect, GemFireXDDialect}
import org.apache.spark.sql.sources.ConnectionProperties
import org.apache.spark.sql.types._
private[spark] class ExternalizableOnlySerializer[T <: Externalizable]
extends KryoClassSerializer[T] {
private var objectInput: KryoObjectInput = _
private var objectOutput: KryoObjectOutput = _
override def write(kryo: Kryo, output: Output, obj: T): Unit = {
try {
obj.writeExternal(getObjectOutput(kryo, output))
} catch {
case e@(_: ClassCastException | _: IOException) =>
throw new KryoException(e)
}
}
override def read(kryo: Kryo, input: Input, c: Class[T]): T = {
try {
val obj = kryo.newInstance(c)
obj.readExternal(getObjectInput(kryo, input))
obj
} catch {
case e@(_: ClassCastException | _: ClassNotFoundException |
_: IOException) => throw new KryoException(e)
}
}
private def getObjectOutput(kryo: Kryo, output: Output): ObjectOutput = {
if (objectOutput == null) {
objectOutput = new KryoObjectOutput(kryo, output)
} else {
objectOutput.setOutput(output)
}
objectOutput
}
private def getObjectInput(kryo: Kryo, input: Input): ObjectInput = {
if (objectInput == null) {
objectInput = new KryoObjectInput(kryo, input)
} else {
objectInput.setInput(input)
}
objectInput
}
}
private[spark] final class ExternalizableResolverSerializer[T <: Externalizable](
readResolve: T => T) extends ExternalizableOnlySerializer[T] {
override def read(kryo: Kryo, input: Input, c: Class[T]): T = {
readResolve(super.read(kryo, input, c))
}
}
object StructTypeSerializer extends KryoClassSerializer[StructType] {
def writeType(kryo: Kryo, output: Output, dataType: DataType): Unit = {
dataType match {
case IntegerType => output.writeVarInt(Types.INTEGER, false)
case LongType => output.writeVarInt(Types.BIGINT, false)
case StringType => output.writeVarInt(Types.CLOB, false)
case DoubleType => output.writeVarInt(Types.DOUBLE, false)
case FloatType => output.writeVarInt(Types.FLOAT, false)
case ShortType => output.writeVarInt(Types.SMALLINT, false)
case ByteType => output.writeVarInt(Types.TINYINT, false)
case BooleanType => output.writeVarInt(Types.BOOLEAN, false)
case BinaryType => output.writeVarInt(Types.BLOB, false)
case TimestampType => output.writeVarInt(Types.TIMESTAMP, false)
case DateType => output.writeVarInt(Types.DATE, false)
case t: DecimalType =>
output.writeVarInt(Types.DECIMAL, false)
output.writeVarInt(t.precision, true)
output.writeVarInt(t.scale, true)
case a: ArrayType =>
output.writeVarInt(Types.ARRAY, false)
writeType(kryo, output, a.elementType)
output.writeBoolean(a.containsNull)
case m: MapType =>
// indicates MapType since there is no equivalent in JDBC
output.writeVarInt(Types.JAVA_OBJECT, false)
writeType(kryo, output, m.keyType)
writeType(kryo, output, m.valueType)
output.writeBoolean(m.valueContainsNull)
case s: StructType =>
output.writeVarInt(Types.STRUCT, false)
write(kryo, output, s)
case _ =>
output.writeVarInt(Types.OTHER, false)
kryo.writeClassAndObject(output, dataType)
}
}
def readType(kryo: Kryo, input: Input): DataType = {
input.readVarInt(false) match {
case Types.INTEGER => IntegerType
case Types.BIGINT => LongType
case Types.CLOB => StringType
case Types.DOUBLE => DoubleType
case Types.FLOAT => FloatType
case Types.SMALLINT => ShortType
case Types.TINYINT => ByteType
case Types.BOOLEAN => BooleanType
case Types.BLOB => BinaryType
case Types.TIMESTAMP => TimestampType
case Types.DATE => DateType
case Types.DECIMAL =>
val precision = input.readVarInt(true)
val scale = input.readVarInt(true)
DecimalType(precision, scale)
case Types.ARRAY =>
val elementType = readType(kryo, input)
ArrayType(elementType, input.readBoolean())
case Types.JAVA_OBJECT => // indicates MapType
val keyType = readType(kryo, input)
val valueType = readType(kryo, input)
MapType(keyType, valueType, input.readBoolean())
case Types.STRUCT => read(kryo, input, classOf[StructType])
case Types.OTHER => kryo.readClassAndObject(input).asInstanceOf[DataType]
case t => throw new KryoException(
s"Serialization error: unexpected DataType ID $t")
}
}
override def write(kryo: Kryo, output: Output, struct: StructType): Unit = {
val fields = struct.fields
val numFields = fields.length
output.writeVarInt(numFields, true)
var i = 0
while (i < numFields) {
val field = fields(i)
output.writeString(field.name)
writeType(kryo, output, field.dataType)
output.writeBoolean(field.nullable)
TypeUtilities.writeMetadata(field.metadata, kryo, output)
i += 1
}
}
override def read(kryo: Kryo, input: Input,
c: Class[StructType]): StructType = {
val numFields = input.readVarInt(true)
val fields = new Array[StructField](numFields)
var i = 0
while (i < numFields) {
val name = input.readString()
val dataType = readType(kryo, input)
val nullable = input.readBoolean()
val metadata = TypeUtilities.readMetadata(kryo, input)
fields(i) = StructField(name, dataType, nullable, metadata)
i += 1
}
StructType(fields)
}
}
object PartitionResultSerializer extends KryoClassSerializer[PartitionResult] {
override def write(kryo: Kryo, output: Output, obj: PartitionResult): Unit = {
val data = obj._1
val len = data.length
output.writeInt(len)
output.writeBytes(data, 0, len)
output.writeVarInt(obj._2, true)
}
override def read(kryo: Kryo, input: Input,
c: Class[PartitionResult]): PartitionResult = {
val len = input.readInt()
val data = input.readBytes(len)
new PartitionResult(data, input.readVarInt(true))
}
}
object ConnectionPropertiesSerializer
extends KryoClassSerializer[ConnectionProperties] {
override def write(kryo: Kryo, output: Output,
connProps: ConnectionProperties): Unit = {
output.writeString(connProps.url)
output.writeString(connProps.driver)
connProps.dialect match {
case GemFireXDDialect => output.writeByte(0)
case GemFireXDClientDialect => output.writeByte(1)
case d => output.writeByte(2)
kryo.writeClassAndObject(output, d)
}
val poolProps = connProps.poolProps
if (poolProps ne null) {
val numProps = poolProps.size
output.writeVarInt(numProps, true)
if (numProps > 0) {
for ((key, value) <- poolProps) {
output.writeString(key)
output.writeString(value)
}
}
} else {
output.writeVarInt(0, true)
}
// write only executor properties if available since on target side
// that is the one which will be used
if (connProps.executorConnProps.isEmpty) {
TypeUtilities.writeProperties(connProps.connProps, output)
} else {
TypeUtilities.writeProperties(connProps.executorConnProps, output)
}
output.writeBoolean(connProps.hikariCP)
}
override def read(kryo: Kryo, input: Input,
c: Class[ConnectionProperties]): ConnectionProperties = {
read(kryo, input)
}
def read(kryo: Kryo, input: Input): ConnectionProperties = {
val url = input.readString()
val driver = input.readString()
val dialect = input.readByte() match {
case 0 => GemFireXDDialect
case 1 => GemFireXDClientDialect
case _ => kryo.readClassAndObject(input).asInstanceOf[JdbcDialect]
}
var numProps = input.readVarInt(true)
var poolProps: Map[String, String] = Map.empty
if (numProps > 0) {
val propsBuilder = Map.newBuilder[String, String]
while (numProps > 0) {
val key = input.readString()
propsBuilder += key -> input.readString()
numProps -= 1
}
poolProps = propsBuilder.result
}
val connProps = TypeUtilities.readProperties(input)
val hikariCP = input.readBoolean()
ConnectionProperties(url, driver, dialect, poolProps, connProps,
connProps, hikariCP)
}
}
| vjr/snappydata | core/src/main/scala/org/apache/spark/serializer/serializers.scala | Scala | apache-2.0 | 9,484 |
package io.circe.benchmarks
import argonaut.Parse
import argonaut.Argonaut._
import org.json4s.jackson.JsonMethods
import org.scalatest.flatspec.AnyFlatSpec
import play.api.libs.json.Json
class EncodingBenchmarkSpec extends AnyFlatSpec {
val benchmark: EncodingBenchmark = new EncodingBenchmark
import benchmark._
def decodeInts(json: String): Option[List[Int]] =
Parse.decodeOption[List[Int]](json)
def decodeFoos(json: String): Option[Map[String, Foo]] =
Parse.decodeOption[Map[String, Foo]](json)
"The encoding benchmark" should "correctly encode integers using Circe" in {
assert(decodeInts(encodeIntsCirce.noSpaces) === Some(ints))
}
it should "correctly encode integers using Argonaut" in {
assert(decodeInts(encodeIntsArgonaut.nospaces) === Some(ints))
}
it should "correctly encode integers using Spray JSON" in {
assert(decodeInts(encodeIntsSpray.compactPrint) === Some(ints))
}
it should "correctly encode integers using Json4s" in {
assert(decodeInts(JsonMethods.compact(encodeIntsJson4s)) === Some(ints))
}
it should "correctly encode integers using Play JSON" in {
assert(decodeInts(Json.prettyPrint(encodeIntsPlay)) === Some(ints))
}
it should "correctly encode case classes using Circe" in {
assert(decodeFoos(encodeFoosCirce.noSpaces) === Some(foos))
}
it should "correctly encode case classes using Argonaut" in {
assert(decodeFoos(encodeFoosArgonaut.nospaces) === Some(foos))
}
it should "correctly encode case classes using Spray JSON" in {
assert(decodeFoos(encodeFoosSpray.compactPrint) === Some(foos))
}
it should "correctly encode case classes using Json4s" in {
assert(decodeFoos(JsonMethods.compact(encodeFoosJson4s)) === Some(foos))
}
it should "correctly encode case classes using Play JSON" in {
assert(decodeFoos(Json.prettyPrint(encodeFoosPlay)) === Some(foos))
}
}
| circe/circe-benchmarks | src/test/scala/io/circe/benchmarks/EncodingBenchmarkSpec.scala | Scala | apache-2.0 | 1,907 |
package tests
object Example {
def foo(o: AnyRef) = {
val i: Int = 2
i<caret> + 5
}
}
| JetBrains/intellij-scala | scala/scala-impl/testdata/postfixTemplate/tabCompletion/var-after.scala | Scala | apache-2.0 | 99 |
package sangria.util
import cats.effect.{ContextShift, IO}
import fs2.Stream
import sangria.streaming.SubscriptionStream
import scala.concurrent.Future
object Fs2Support {
type IOS[A] = Stream[IO, A]
class Fs2SubscriptionStream(implicit CS: ContextShift[IO]) extends SubscriptionStream[IOS] {
def supported[T[_]](other: SubscriptionStream[T]) = other.isInstanceOf[Fs2SubscriptionStream]
def map[A, B](source: IOS[A])(fn: A => B) = source.map(fn)
def singleFuture[T](value: Future[T]) =
Stream.eval(IO.fromFuture(IO(value)))
def single[T](value: T) = Stream.emit(value)
def mapFuture[A, B](source: IOS[A])(fn: A => Future[B]) =
source.evalMap(a => IO.fromFuture(IO(fn(a))))
def first[T](s: IOS[T]) =
s.compile.toVector.map(_.head).unsafeToFuture()
def failed[T](e: Throwable) = Stream.raiseError[IO](e)
def onComplete[Ctx, Res](result: IOS[Res])(op: => Unit) =
result.onFinalize(IO(op))
def flatMapFuture[Ctx, Res, T](future: Future[T])(resultFn: T => IOS[Res]) =
Stream.eval(IO.fromFuture(IO(future))).flatMap(resultFn)
def merge[T](streams: Vector[IOS[T]]) =
if (streams.nonEmpty)
streams.tail.foldLeft(streams.head)(_.merge(_))
else
throw new IllegalStateException("No streams produced!")
def recover[T](stream: IOS[T])(fn: Throwable => T) =
stream.handleErrorWith { case e => Stream.emit(fn(e)) }
}
implicit def observableSubscriptionStream(implicit
CS: ContextShift[IO]): SubscriptionStream[IOS] =
new Fs2SubscriptionStream
}
| OlegIlyenko/sangria | modules/core/src/test/scala/sangria/util/Fs2SubscriptionStream.scala | Scala | apache-2.0 | 1,570 |
package com.sksamuel.akka.patterns
/**
* The AcknowledgingActor will send an ack to the sender as soon as a message is received
* before continuing with processing the message.
* Uses the stackable trait pattern with DecoratingActor. Override receiver in traits that wish to use this actor.
* See DecoratingActor.
* This actor is most often used as the other end to the flow control actors.
*
* @author Stephen Samuel */
trait AcknowledgingActor extends DecoratingActor {
receiver {
case msg: AnyRef => sender ! Acknowledged
}
}
case object Acknowledged
| stoopbrain/akka-patterns | src/main/scala/com/sksamuel/akka/patterns/AcknowledgingActor.scala | Scala | apache-2.0 | 571 |
package com.gilesc
package mynab
package testkit
package generator
import org.scalacheck.Gen
import java.util.UUID
import java.time.LocalDate
import java.time.OffsetDateTime
trait TransactionGenerator {
// def generateTransactionId: Stream[TransactionId] = Stream.cons(TransactionId(UUID.randomUUID), generateTransactionId)
// def generateDate: Stream[LocalDate] = {
// val year = Gen.choose(1800, OffsetDateTime.now().getYear()).sample.get
// val month = Gen.choose(1, 12).sample.get
// val maxday = if (month == 2) 28 else 30
// val day = Gen.choose(1, maxday).sample.get
// Stream.cons(
// LocalDate.of(year, month, day),
// generateDate)
// }
// def generatePayee: Stream[Payee] = Stream.cons(Payee(Gen.alphaStr.sample.get), generatePayee)
// def generateCategory: Stream[Category] = Stream.cons(
// Category(
// MajorCategory(Gen.alphaStr.sample.get),
// MinorCategory(Gen.alphaStr.sample.get)),
// generateCategory)
//
// def generateMemo: Stream[Memo] = Stream.cons(Memo(Gen.alphaStr.sample.get), generateMemo)
// def generateAmount: Stream[Amount] = Stream.cons(Amount(BigDecimal(Gen.choose(Long.MinValue, Long.MaxValue).sample.get)), generateAmount)
// def generateCleared: Stream[Cleared] = Stream.cons(Cleared(
// Gen.oneOf(true, false).sample.get), generateCleared)
//
// def generateTransaction: Stream[Transaction] = Stream.cons(
// Transaction(
// generateTransactionId.head,
// generateDate.head,
// generatePayee.head,
// generateCategory.head,
// generateMemo.head,
// generateAmount.filter(_.value > 0).head,
// generateAmount.filter(_.value > 0).head,
// generateCleared.head),
// generateTransaction)
}
| CraigGiles/mynab | testkit/src/main/scala/com/gilesc/mynab/testkit/generator/TransactionGenerator.scala | Scala | mit | 1,727 |
package org.inosion.dadagen.api
import javax.script.{ScriptEngineManager, ScriptEngine}
import org.inosion.dadagen.Dadagenerator
import scala.tools.nsc.interpreter.IMain
/**
* A Scala Object to load up the Java Script Engine
*/
object ScalaScriptEngine {
val loadEngine: ScriptEngine = {
val engine = new ScriptEngineManager().getEngineByName("scala").asInstanceOf[IMain]
val cl: ClassLoader = classOf[Dadagenerator[_]].getClassLoader
engine.settings.usejavacp.value = true
engine.settings.feature.value = true
engine.settings.language.add("postfixOps")
engine
}
}
| inosion/dadagen | dadagen-support/src/main/scala/org/inosion/dadagen/support/ScalaScriptEngine.scala | Scala | apache-2.0 | 601 |
package nodes.util
import org.apache.spark.rdd.RDD
import pipelines.Logging
import workflow.Transformer
import scala.reflect.ClassTag
/**
* Randomly shuffle the rows of an RDD within a pipeline. Uses a shuffle operation in Spark.
*
* @param numParts An optional parameter indicating the number of output partitions.
* @tparam T Type of the input to shuffle.
*/
class Shuffler[T: ClassTag](numParts: Option[Int] = None) extends Transformer[T,T] with Logging {
override def apply(in: RDD[T]): RDD[T] = {
val numToRepartition = numParts.getOrElse(in.partitions.size)
in.repartition(numToRepartition)
}
override def apply(in: T): T = in
}
| tomerk/keystone | src/main/scala/nodes/util/Shuffler.scala | Scala | apache-2.0 | 659 |
package io.iohk.ethereum.vm
import akka.util.ByteString
import io.iohk.ethereum.crypto.kec256
import io.iohk.ethereum.domain.{Account, Address, UInt256}
object MockWorldState {
type TestVM = VM[MockWorldState, MockStorage]
type PS = ProgramState[MockWorldState, MockStorage]
type PC = ProgramContext[MockWorldState, MockStorage]
type PR = ProgramResult[MockWorldState, MockStorage]
}
case class MockWorldState(
accounts: Map[Address, Account] = Map(),
codeRepo: Map[Address, ByteString] = Map(),
storages: Map[Address, MockStorage] = Map(),
numberOfHashes: UInt256 = 0,
touchedAccounts: Set[Address] = Set.empty,
noEmptyAccountsCond: Boolean = false
) extends WorldStateProxy[MockWorldState, MockStorage] {
def getAccount(address: Address): Option[Account] =
accounts.get(address)
override def getGuaranteedAccount(address: Address): Account =
super.getGuaranteedAccount(address)
def saveAccount(address: Address, account: Account): MockWorldState =
copy(accounts = accounts + (address -> account))
def deleteAccount(address: Address): MockWorldState =
copy(accounts = accounts - address, codeRepo - address, storages - address)
def getCode(address: Address): ByteString =
codeRepo.getOrElse(address, ByteString.empty)
def getStorage(address: Address): MockStorage =
storages.getOrElse(address, MockStorage.Empty)
def getBlockHash(number: UInt256): Option[UInt256] =
if (numberOfHashes >= number && number >= 0)
Some(UInt256(kec256(number.toString.getBytes)))
else
None
def saveCode(address: Address, code: ByteString): MockWorldState =
if (code.isEmpty)
copy(codeRepo = codeRepo - address)
else
copy(codeRepo = codeRepo + (address -> code))
def saveStorage(address: Address, storage: MockStorage): MockWorldState =
if (storage.isEmpty)
copy(storages = storages - address)
else
copy(storages = storages + (address -> storage))
def getEmptyAccount: Account = Account.empty()
override def touchAccounts(addresses: Address*): MockWorldState =
if (noEmptyAccounts)
copy(touchedAccounts = touchedAccounts ++ addresses.toSet)
else
this
def clearTouchedAccounts: MockWorldState =
copy(touchedAccounts = touchedAccounts.empty)
def noEmptyAccounts: Boolean = noEmptyAccountsCond
override def keepPrecompileTouched(world: MockWorldState): MockWorldState = {
if (world.touchedAccounts.contains(ripmdContractAddress))
copy(touchedAccounts = touchedAccounts + ripmdContractAddress)
else
this
}
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/vm/MockWorldState.scala | Scala | mit | 2,590 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.coref
import cc.factorie.app.nlp._
import cc.factorie.app.nlp.phrase._
import cc.factorie.app.nlp.pos.PennPosDomain
import scala.collection.mutable.ArrayBuffer
import cc.factorie.app.nlp.Token
import cc.factorie.app.nlp.ner.BilouConllNerTag
import cc.factorie.app.nlp.pos.PennPosTag
import scala.Option.option2Iterable
/** Trait for objects that return a list of Phrases given a Document
whose annotations includes those classes listed in prereqAttrs.
This is not a DocumentAnnotator because it does not add its results to the Document.attr;
invocations to its apply method simple return a collection of Phrases.
This design was chosen because these phrases are often used for coreference
in which there are many coreference-specific choices of what mentions are filtered
or included, and we didn't want to pollute the Document.attr with a tremendous number
of postAttrs that are specific to individual coreference solutions.
If you really want a DocumentAnnotator that saves its results, it is easy to
create one uses a PhraseFinder.
@author Andrew McCallum
*/
trait MentionPhraseFinder {
def prereqAttrs: Seq[Class[_]]
//def phrasePostAttrs: Seq[Class[_]] // TODO Should we have something like this?
def apply(document:Document): Iterable[Phrase]
}
/** Apply returns a list of pronoun phrases, given PennPosTags.
@author Andrew McCallum */
object PronounFinder extends MentionPhraseFinder {
def prereqAttrs = Seq(classOf[PennPosTag])
def apply(document:Document): Iterable[Phrase] = {
val phrases = document.tokens.filter(_.posTag.isPersonalPronoun).map(t => new Phrase(t.section, start=t.positionInSection, length=1,offsetToHeadToken = -1))
for (phrase <- phrases) phrase.attr += new NounPhraseType(phrase, "PRO")
phrases
}
}
/** Apply returns a list of proper noun phrases, given BilouConllNerTags.
@author Andrew McCallum */
object ConllProperNounPhraseFinder extends MentionPhraseFinder {
def prereqAttrs = Seq(classOf[BilouConllNerTag])
def apply(doc:Document): Seq[Phrase] = {
val result = new ArrayBuffer[Phrase]
for (section <- doc.sections; token <- section.tokens) {
if (token.attr[BilouConllNerTag].categoryValue != "O") {
val attr = token.attr[BilouConllNerTag].categoryValue.split("-")
if (attr(0) == "U") {
val phrase = new Phrase(section, token.positionInSection, length=1,offsetToHeadToken = -1)
phrase.attr += new ConllPhraseEntityType(phrase, attr(1))
DeterministicNounPhraseTypeLabeler.process(phrase)
result += phrase
} else if (attr(0) == "B") {
if (token.hasNext) {
var lookFor = token.next
while (lookFor.hasNext && lookFor.attr[BilouConllNerTag].categoryValue.matches("(I|L)-" + attr(1))) lookFor = lookFor.next
// TODO Be more clever in determining the headTokenOffset
val phrase = new Phrase(section, token.positionInSection, length=lookFor.positionInSection - token.positionInSection,offsetToHeadToken = -1)
phrase.attr += new ConllPhraseEntityType(phrase, attr(1))
DeterministicNounPhraseTypeLabeler.process(phrase)
result += phrase
} else {
val phrase = new Phrase(section, token.positionInSection, length=1,offsetToHeadToken = -1)
phrase.attr += new ConllPhraseEntityType(phrase, attr(1))
DeterministicNounPhraseTypeLabeler.process(phrase)
result += phrase
}
}
}
}
result
}
}
/** Apply returns a list of acronym noun phrases.
@author Andrew McCallum */
object AcronymNounPhraseFinder extends MentionPhraseFinder {
def prereqAttrs = Seq(classOf[Token])
def apply(doc:Document): Seq[Phrase] = {
val result = new ArrayBuffer[Phrase]
for (section <- doc.sections; token <- section.tokens) {
// Matches middle word of "Yesterday IBM announced" but not "OBAMA WINS ELECTION"
if ( token.string.length > 2 && !token.containsLowerCase && Character.isUpperCase(token.string(0)) && (token.getNext ++ token.getPrev).exists(_.containsLowerCase)) {
val phrase = new Phrase(section, token.positionInSection, length=1,offsetToHeadToken = -1)
phrase.attr += new ConllPhraseEntityType(phrase, "ORG")
phrase.attr += new NounPhraseType(phrase, "NAM")
result += phrase
}
}
result
}
}
/** Apply returns a list of NNP-indicated proper noun phrases, given PennPosTags.
@author Andrew McCallum */
object NnpPosNounPhraseFinder extends MentionPhraseFinder {
def prereqAttrs = Seq(classOf[PennPosTag])
def apply(doc:Document): Seq[Phrase] = {
val result = new ArrayBuffer[Phrase]
var start = 0
for (section <- doc.sections) {
val tokens = section.tokens
while (start < tokens.length) {
val token = tokens(start)
var end = start
while (end < tokens.length && tokens(end).posTag.intValue == PennPosDomain.nnpIndex) end += 1
if (end != start && tokens(end-1).posTag.intValue == PennPosDomain.nnpIndex) {
val phrase = new Phrase(section, token.positionInSection, length=end-start,offsetToHeadToken = -1)
phrase.attr += new NounPhraseType(phrase, "NAM")
NounPhraseEntityTypeLabeler.process(phrase)
}
start = math.max(start+1, end)
}
}
result
}
}
| iesl/fuse_ttl | src/factorie-factorie_2.11-1.1/src/main/scala/cc/factorie/app/nlp/coref/MentionPhraseFinder.scala | Scala | apache-2.0 | 6,198 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.util
import java.util
import java.util.Properties
import javax.xml.transform.Result
import org.orbeon.saxon.`type`.ValidationFailure
import org.orbeon.saxon.expr.{StackFrame, XPathContext, XPathContextMajor, XPathContextMinor}
import org.orbeon.saxon.instruct.ParameterSet
import org.orbeon.saxon.om.{Item, NamePool}
import org.orbeon.saxon.regex.RegexIterator
import org.orbeon.saxon.sort.{GroupIterator, StringCollator}
import org.orbeon.saxon.trans.{Mode, Rule}
import org.orbeon.saxon.value.{CalendarValue, DateTimeValue, DateValue}
import org.orbeon.saxon.{Configuration, Controller}
// TODO: Ideally use all java.time APIs?
object DateUtilsUsingSaxon {
// Epoch dateTime/date
private val EpochDateTime = new DateTimeValue(1970, 1, 1, 0, 0, 0, 0, 0)
private val EpochDate = new DateValue(1970, 1, 1, 0) // CalendarValue.NO_TIMEZONE
def isISODateOrDateTime(value: String): Boolean =
tryParseISODateOrDateTime(value, TimeZone.Default).isDefined
// Parse a date in XML Schema-compatible ISO format:
//
// - Format for a dateTime: [-]yyyy-mm-ddThh:mm:ss[.fff*][([+|-]hh:mm | Z)]
// - Format for a date: [-]yyyy-mm-dd[([+|-]hh:mm | Z)]
//
// Throws IllegalArgumentException if the date format is incorrect.
//
// If the date or dateTime doesn't have a timezone, then xxx.
//
def parseISODateOrDateTime(date: String): Long =
tryParseISODateOrDateTime(date, TimeZone.Default) getOrElse (throw new IllegalArgumentException)
def tryParseISODateOrDateTime(date: String, defaultTimeZone: TimeZone): Option[Long] = {
// FIXME: what if the value has an optional `-` sign in front?
val valueOrFailure =
if (date.length >= 11 && date.charAt(10) == 'T')
DateTimeValue.makeDateTimeValue(date)
else
DateValue.makeDateValue(date)
valueOrFailure match {
case value: CalendarValue =>
// FIXME: Could we not just use: `value.getCalendar.getTimeInMillis`
Some(value.subtract(if (value.isInstanceOf[DateTimeValue]) EpochDateTime else EpochDate, defaultTimeZone).getLengthInMilliseconds)
case _: ValidationFailure => None
}
}
// Parse an ISO date and return the number of milliseconds from the epoch.
def tryParseISODate(date: String, defaultTimeZone: TimeZone): Option[Long] =
DateValue.makeDateValue(date) match {
case value: DateValue =>
value.getCalendar.getTimeInMillis
Some(value.subtract(EpochDate, defaultTimeZone).getLengthInMilliseconds)
case _: ValidationFailure => None
}
object TimeZone {
object Default extends TimeZone {
def getImplicitTimezone: Int = DateUtils.DefaultOffsetMinutes
}
object UTC extends TimeZone {
def getImplicitTimezone: Int = 0
}
}
sealed trait TimeZone extends XPathContext {
import org.orbeon.saxon.`type`.SchemaType
import org.orbeon.saxon.event.SequenceReceiver
import org.orbeon.saxon.expr.XPathContext
import org.orbeon.saxon.instruct.LocalParam
import org.orbeon.saxon.om.{SequenceIterator, StructuredQName, ValueRepresentation}
import org.orbeon.saxon.trace.InstructionInfo
def getImplicitTimezone: Int
// None of these methods are called by Saxon upon `subtract()`
def newContext : XPathContextMajor = illegal
def newCleanContext : XPathContextMajor = illegal
def newMinorContext : XPathContextMinor = illegal
def getLocalParameters : ParameterSet = illegal
def getTunnelParameters : ParameterSet = illegal
def setOrigin(expr: InstructionInfo) : Unit = illegal
def setOriginatingConstructType(loc: Int) : Unit = illegal
def getOrigin : InstructionInfo = illegal
def getOriginatingConstructType : Int = illegal
def getController : Controller = illegal
def getConfiguration : Configuration = illegal
def getNamePool : NamePool = illegal
def setCaller(caller: XPathContext) : Unit = illegal
def getCaller : XPathContext = illegal
def setCurrentIterator(iter: SequenceIterator) : Unit = illegal
def getCurrentIterator : SequenceIterator = illegal
def getContextPosition : Int = illegal
def getContextItem : Item = illegal
def getLast : Int = illegal
def isAtLast : Boolean = illegal
def getCollation(name: String) : StringCollator = illegal
def getDefaultCollation : StringCollator = illegal
def useLocalParameter(
qName : StructuredQName,
binding : LocalParam,
isTunnel : Boolean
): Boolean = illegal
def getStackFrame : StackFrame = illegal
def evaluateLocalVariable(slotnumber: Int) : ValueRepresentation = illegal
def setLocalVariable(
slotnumber : Int,
value : ValueRepresentation
): Unit = illegal
def changeOutputDestination(
props : Properties,
result : Result,
isFinal : Boolean,
hostLanguage : Int,
validation : Int,
schemaType : SchemaType
) : Unit = illegal
def setTemporaryReceiver(out: SequenceReceiver): Unit = illegal
def setReceiver(receiver: SequenceReceiver) : Unit = illegal
def getReceiver : SequenceReceiver = illegal
def getCurrentMode : Mode = illegal
def getCurrentTemplateRule : Rule = illegal
def getCurrentGroupIterator : GroupIterator = illegal
def getCurrentRegexIterator : RegexIterator = illegal
def getCurrentDateTime : DateTimeValue = illegal
def iterateStackFrames : util.Iterator[_] = illegal
private def illegal = throw new IllegalStateException
}
}
| orbeon/orbeon-forms | src/main/scala/org/orbeon/oxf/util/DateUtilsUsingSaxon.scala | Scala | lgpl-2.1 | 7,194 |
package com.twitter.finatra.streams.queryable.thrift
import com.twitter.app.Flag
import com.twitter.finatra.kafkastreams.partitioning.StaticPartitioning
import com.twitter.finatra.kafkastreams.query.{
QueryableFinatraCompositeWindowStore,
QueryableFinatraKeyValueStore,
QueryableFinatraWindowStore
}
import com.twitter.util.Duration
import java.io.File
import org.apache.kafka.common.serialization.Serde
/**
* Trait to mix into a Kafka Streams Server exposing queryable state
*/
trait QueryableState extends StaticPartitioning {
protected val currentShard: Flag[Int] = flag[Int]("kafka.current.shard", "")
protected val numQueryablePartitions: Flag[Int] = flag[Int]("kafka.num.queryable.partitions", "")
/**
* Returns a queryable Finatra key value store
* @param storeName Name of the queryable store
* @param primaryKeySerde Serde of the primary key being queried which is used to determine
* which queryable store is responsible for they key being queried
* @tparam PK Type of the primary key
* @tparam K Type of the key being queried
* @tparam V Type of the value associated with the key being queried
* @return A QueryableFinatraKeyValueStore
*/
protected def queryableFinatraKeyValueStore[PK, K, V](
storeName: String,
primaryKeySerde: Serde[PK]
): QueryableFinatraKeyValueStore[PK, K, V] = {
new QueryableFinatraKeyValueStore[PK, K, V](
stateDir = stateDirWithLeadingSlash,
storeName = storeName,
primaryKeySerde = primaryKeySerde,
numShards = numApplicationInstances(),
numQueryablePartitions = numQueryablePartitions(),
currentShardId = currentShard())
}
/**
* Returns a queryable Finatra window store
* @param storeName Name of the queryable store
* @param windowSize Size of the windows being queried
* @param allowedLateness Allowed lateness for the windows being queried
* @param queryableAfterClose Time the window being queried will exist after closing
* @param primaryKeySerde Serde of the primary key being queried which is used to determine
* which queryable store is responsible for they key being queried
* @tparam K Type of the key being queried
* @tparam V Type of the value associated with the key being queried
* @return A QueryableFinatraWindowStore
*/
protected def queryableFinatraWindowStore[K, V](
storeName: String,
windowSize: Duration,
allowedLateness: Duration,
queryableAfterClose: Duration,
primaryKeySerde: Serde[K]
): QueryableFinatraWindowStore[K, V] = {
new QueryableFinatraWindowStore[K, V](
stateDir = stateDirWithLeadingSlash,
storeName = storeName,
windowSize = windowSize,
allowedLateness = allowedLateness,
queryableAfterClose = queryableAfterClose,
keySerde = primaryKeySerde,
numShards = numApplicationInstances(),
numQueryablePartitions = numQueryablePartitions(),
currentShardId = currentShard())
}
/**
* Returns a queryable Finatra composite window store (composite windows contain composite keys
* which contain a primary and secondary key)
*
* @param storeName Name of the queryable store
* @param windowSize Size of the windows being queried
* @param allowedLateness Allowed lateness for the windows being queried
* @param queryableAfterClose Time the window being queried will exist after closing
* @param primaryKeySerde Serde of the primary key being queried which is used to determine
* which queryable store is responsible for they key being queried
* @tparam PK Type of the primary key being queried
* @tparam SK Type of the secondary key being queried
* @tparam V Type of the value associated with the key being queried
* @return A QueryableFinatraCompositeWindowStore
*/
protected def queryableFinatraCompositeWindowStore[PK, SK, V](
storeName: String,
windowSize: Duration,
allowedLateness: Duration,
queryableAfterClose: Duration,
primaryKeySerde: Serde[PK]
): QueryableFinatraCompositeWindowStore[PK, SK, V] = {
new QueryableFinatraCompositeWindowStore[PK, SK, V](
stateDir = stateDirWithLeadingSlash,
storeName = storeName,
windowSize = windowSize,
allowedLateness = allowedLateness,
queryableAfterClose = queryableAfterClose,
primaryKeySerde = primaryKeySerde,
numShards = numApplicationInstances(),
numQueryablePartitions = numQueryablePartitions(),
currentShardId = currentShard())
}
/*
* Note: We need to ensure the state dir has a leading slash because processorContext.getStateDir
* always returns the state dir with a leading slash and getStateDir is used when state stores
* are added to the FinatraStoresGlobalManager
*/
private def stateDirWithLeadingSlash[V, K, PK]: File = {
val stateDirValue = stateDir()
val dirWithLeadingSlash = if (!stateDirValue.startsWith("/")) {
"/" + stateDirValue
} else {
stateDirValue
}
new File(dirWithLeadingSlash)
}
}
| twitter/finatra | kafka-streams/kafka-streams-queryable-thrift/src/main/scala/com/twitter/finatra/streams/queryable/thrift/QueryableState.scala | Scala | apache-2.0 | 5,104 |
package com.bfm.topnotch.tnassertion
import org.antlr.runtime.tree.Tree
import org.apache.hadoop.hive.ql.parse.HiveParser.TOK_TABLE_OR_COL
import org.apache.hadoop.hive.ql.parse.{ParseDriver, ParseException}
import org.apache.spark.sql.{Column, DataFrame}
import org.json4s._
import org.json4s.native.JsonMethods._
/**
* A report containing the results of checking the many assertions in one assertion group
* (assertion group = all the assertions in an assertion command)
*
* @param outputKey The outputKey of the command in the plan
* @param assertionReports The reports of all the assertions in the group
*/
case class TnAssertionGroupReport(outputKey: String, assertionReports: Seq[TnAssertionReport])
/**
* A report on the results of checking an assertion against a data set
*
* @param query The assertion's filter expression for separating valid and invalid data
* @param description A description of the rule
* @param threshold The maximum percent of invalid data allowed by the rule before it is considered a failed rule
* @param fractionInvalid The percent data that failed the rule
* @param numInvalid The number of rows for which the assertion was false.
* @param sampleInvalidData A sample of the rows that are invalid according to the rule.
* @param sampleWindowReport The windows of rows collected around each invalid row in the sample.
* @param userDefinedSummaryStats Summary statistics of the data defined by the user. Each statistic is a single number,
* such as average value of a column.
* @param userDefinedFeatures The user defined features to include in sampleInvalid regardless of whether the query references them
*/
case class TnAssertionReport(
query: String,
description: String,
threshold: Double,
fractionInvalid: Double,
numInvalid: Int = 0,
sampleInvalidData: DataFrame,
sampleWindowReport: Option[TnSampleWindowReport] = None,
userDefinedSummaryStats: Option[DataFrame] = None,
userDefinedFeatures: Seq[String] = Seq()
)
/**
* Windows of data providing context for each value in sampleInvalidData. If the user provides a way to partition and
* order the data, TopNotch will provide the ordered partition for each invalid example.
* @param sampleWindowParams The parameters used partition and order the data for generating the sample windows.
* This is needed in the report to determine the relevant columns for the dataframes
* in the report.
* @param sampleWindows The windows of data surrounding each example. In order for TopNotch to work, when a window report
* is created, the sampleWindows seq must be ordered so that the ith window corresponds with
* the ith example in sampleInvalidData.
*/
case class TnSampleWindowReport(
sampleWindowParams: TnSampleWindowParams,
sampleWindows: Seq[DataFrame]
)
class TnAssertionReportSerializer extends CustomSerializer[TnAssertionReport] (format => (
{
case _: JValue => throw new NotImplementedError("No reason to create a TnAssertionReport object from JSON.")
},
{
case tnReport : TnAssertionReport => {
val columnsForReport = TnAssertionReportSerializer.getColumns(tnReport.query, tnReport.sampleInvalidData,
tnReport.sampleWindowReport, tnReport.userDefinedFeatures)
JObject(
JField("query", JString(tnReport.query)),
JField("description", JString(tnReport.description)),
JField("threshold", JDouble(tnReport.threshold)),
JField("fractionInvalid", JDouble(tnReport.fractionInvalid)),
JField("numInvalid", JInt(tnReport.numInvalid)),
JField("sampleInvalid", TnAssertionReportSerializer
.dataFrameToJArray(tnReport.sampleInvalidData.select(columnsForReport.head, columnsForReport.tail: _*))),
JField("userSummaryStatistics", (tnReport.userDefinedSummaryStats match {
// since each summary statistic has 1 value to summarize all the rows, we only need the first row
case Some(userDefinedSummaryStatsDF) => userDefinedSummaryStatsDF.toJSON.collect.sorted
.map(rowJsonStr => parse(rowJsonStr)).head
case None => JObject()
})),
JField("sampleWindows", (tnReport.sampleWindowReport match {
case Some(sampleWindowReport) => JArray(sampleWindowReport.sampleWindows
.map(df => TnAssertionReportSerializer
.dataFrameToJArray(df.select(columnsForReport.head, columnsForReport.tail: _*))).toList)
case None => JArray(List())
}))
)
}
}
))
object TnAssertionReportSerializer {
val parseDriver = new ParseDriver()
/**
* Convert a Spark dataframe to a JsArray containing all the rows that appear in the report
*
* @param df The dataframe to convert
* @return The JsArray of the dataframe
*/
def dataFrameToJArray(df: DataFrame): JArray = JArray(df.toJSON.collect.map(rowJsonStr => parse(rowJsonStr)).toList)
/**
* Get the columns used in the assertion's query string as a sequence of SparkSQL columns.
*
* @param query The assertion's filter expression for separating valid and invalid data
* @param sampleInvalidData A sample of the rows that are invalid according to the rule.
* @param sampleWindowReport The windows of rows collected around each invalid row in the sample.
* @param userDefinedFeatures The user defined features to include in sampleInvalid regardless of whether the query references them
* @return The columns used in the assertion's query
*/
def getColumns(query: String, sampleInvalidData: DataFrame, sampleWindowReport: Option[TnSampleWindowReport],
userDefinedFeatures: Seq[String]): Seq[String] = {
// add the user defined functions to the query's columns
// if the query is invalid, return no columns
val queryAST =
try {
Some(parseDriver.parse(s"select * from testTableName where ${query}"))
}
catch {
case (_: ParseException) => None
}
// get everything that is a table name or a column name
def recColumnCollector(astNode: Tree): Seq[String] = {
(0 to astNode.getChildCount).flatMap(childIdx => astNode.getChild(childIdx) match {
case nullNode if nullNode == null => Seq[String]()
case tableOrColNameNode if tableOrColNameNode.getType == TOK_TABLE_OR_COL => Seq(tableOrColNameNode.getChild(0).getText)
case node => recColumnCollector(node)
})
}
// in addition to columns in the query, add the user defined features and the columns for window partitioning and ordering
// order the columns so that the partitioning and ordering columns come first
val tableOrColumnNames =
if (queryAST.isDefined) {
(sampleWindowReport match {
case Some(windowReport) => windowReport.sampleWindowParams.idsForWindowPartitioning ++ windowReport.sampleWindowParams.orderEachWindowBy
case None => Seq()
}) ++ recColumnCollector(queryAST.get) ++ userDefinedFeatures
}
else {
Seq[String]()
}
// get only the columns, not the tables
// we lowercase all values because HiveQL is case insensitive for table and column names
sampleInvalidData.columns.map(_.toLowerCase).toSet
.intersect(tableOrColumnNames.map(_.toLowerCase).toSet).toSeq.sorted
}
} | blackrock/TopNotch | src/main/scala/com/bfm/topnotch/tnassertion/TnAssertionReport.scala | Scala | apache-2.0 | 7,927 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants
/**
* A Unit of Measure is used to define the scale of a quantity measurement
*
* Each Quantity Dimension must include at least one Unit of Measure, and one and only one Primary.
* Other units of measure are defined with conversionFactors relative to the Primary.
*
* @author garyKeorkunian
* @since 0.1
*
* @tparam A The type of Quantity being measured
*/
trait UnitOfMeasure[A <: Quantity[A]] extends Serializable {
/**
* Factory method for creating instances of a Quantity in this UnitOfMeasure
* @param n N - the Quantity's value in terms of this UnitOfMeasure
* @return
*/
def apply[N](n: N)(implicit num: Numeric[N]): A
/**
* Extractor method for getting the Numeric value of a Quantity in this UnitOfMeasure
* @param q A - The Quantity being matched
* @return
*/
def unapply(q: A) = Some(q.to(this))
/**
* Symbol used when representing Quantities in this UnitOfMeasure
* @return
*/
def symbol: String
/**
* Defines a signature for converting a quantity from this UOM to the Value UOM
* @return
*/
protected def converterFrom: Double β Double
/**
* Defines a signature for converting a quantity to this UOM from the Value UOM
* @return
*/
protected def converterTo: Double β Double
/**
* Applies the converterTo method to a value
* @param n N value in terms of the ValueUnit
* @param num Numeric[N]
* @tparam N Type
* @return
*/
final def convertTo[N](n: N)(implicit num: Numeric[N]) = converterTo(num.toDouble(n))
/**
* Applies the converterFrom method to a value
*
* @param n N value in terms of this Unit
* @param num Numeric[N]
* @tparam N Type
* @return
*/
final def convertFrom[N](n: N)(implicit num: Numeric[N]) = converterFrom(num.toDouble(n))
}
/**
* A Unit of Measure that require a simple multiplier for converting to and from the underlying value's unit
*/
trait UnitConverter { uom: UnitOfMeasure[_] β
/**
* Defines a multiplier value relative to the Quantity's [[squants.PrimaryUnit]]
*
* @return
*/
protected def conversionFactor: Double
/**
* Implements the converterTo method as a simple quotient of the value and the multiplier
* @return
*/
protected def converterTo: Double β Double = value β value / conversionFactor
/**
* Implements the converterFrom method as a simple product of the value and the multiplier
* @return
*/
protected def converterFrom: Double β Double = value β value * conversionFactor
}
/**
* Identifies the Unit of Measure with a conversionFactor of 1.0.
*
* It is used as the intermediary unit during conversions
*
* Each Quantity should have one and only one ValueUnit
*/
trait PrimaryUnit extends UnitConverter { uom: UnitOfMeasure[_] β
/**
* Implements the converterTo method to just return the underlying value
* @return
*/
override final def converterTo: Double β Double = value β value
/**
* Implements the converterFrom method to just return the underlying value
* @return
*/
override final def converterFrom: Double β Double = value β value
/**
* Value unit multiplier is always equal to 1
*/
final val conversionFactor = 1d
}
/**
* A marker trait identifying SI Units
*/
trait SiUnit
/**
* A marker trait identifying SI Base Units
*/
trait SiBaseUnit extends SiUnit
| typelevel/squants | shared/src/main/scala/squants/UnitOfMeasure.scala | Scala | apache-2.0 | 3,926 |
package calculator
import scala.scalajs.js
import org.scalajs.dom
import org.scalajs.dom.html
import dom.document
object CalculatorUI {
def main(args: Array[String]): Unit = {
try {
setupTweetMeasurer()
setup2ndOrderPolynomial()
setupCalculator()
} catch {
case th: Throwable =>
th.printStackTrace()
}
}
// Helpers
def elementById[A <: js.Any](id: String): A =
document.getElementById(id).asInstanceOf[A]
def elementValueSignal(element: html.Element,
getValue: () => String): Signal[String] = {
var prevVal = getValue()
val value = new Var(prevVal)
val onChange = { (event: dom.Event) =>
// Reconstruct manually the optimization at the root of the graph
val newVal = getValue()
if (newVal != prevVal) {
prevVal = newVal
value() = newVal
}
}
element.addEventListener("change", onChange)
element.addEventListener("keypress", onChange)
element.addEventListener("keyup", onChange)
value
}
def inputValueSignal(input: html.Input): Signal[String] =
elementValueSignal(input, () => input.value)
def textAreaValueSignal(textAreaID: String): Signal[String] = {
val textArea = elementById[html.TextArea](textAreaID)
elementValueSignal(textArea, () => textArea.value)
}
private lazy val ClearCssClassRegExp =
new js.RegExp(raw"""(?:^|\\s)has-error(?!\\S)""", "g")
def doubleValueOfInput(input: html.Input): Signal[Double] = {
val text = inputValueSignal(input)
val parent = input.parentElement
Signal {
import js.JSStringOps._
parent.className = parent.className.jsReplace(ClearCssClassRegExp, "")
try {
text().toDouble
} catch {
case e: NumberFormatException =>
parent.className += " has-error"
Double.NaN
}
}
}
// TWEET LENGTH
def setupTweetMeasurer(): Unit = {
val tweetText = textAreaValueSignal("tweettext")
val remainingCharsArea =
document.getElementById("tweetremainingchars").asInstanceOf[html.Span]
val remainingCount = TweetLength.tweetRemainingCharsCount(tweetText)
Signal {
remainingCharsArea.textContent = remainingCount().toString
}
val color = TweetLength.colorForRemainingCharsCount(remainingCount)
Signal {
remainingCharsArea.style.color = color()
}
}
// 2ND ORDER POLYNOMIAL
def setup2ndOrderPolynomial(): Unit = {
val ids = List("polyroota", "polyrootb", "polyrootc")
val inputs = ids.map(id => elementById[html.Input](id))
val doubleValues = inputs.map(doubleValueOfInput)
val List(a, b, c) = doubleValues
val delta = Polynomial.computeDelta(a, b, c)
val deltaArea = elementById[html.Span]("polyrootdelta")
Signal {
deltaArea.textContent = delta().toString
}
val solutions = Polynomial.computeSolutions(a, b, c, delta)
val solutionsArea = elementById[html.Span]("polyrootsolutions")
Signal {
solutionsArea.textContent = solutions().toString
}
}
// CALCULATOR
def setupCalculator(): Unit = {
val names = (0 until 10).map(i => ('a' + i).toChar.toString)
val inputs = names.map(name => elementById[html.Input]("calculatorexpr" + name))
val exprs = inputs.map(exprOfInput)
val namedExpressions = names.zip(exprs).toMap
val namedValues = Calculator.computeValues(namedExpressions)
assert(namedValues.keySet == namedExpressions.keySet)
for ((name, valueSignal) <- namedValues) {
val span = elementById[html.Span]("calculatorval" + name)
var dehighlightTimeout: Option[js.timers.SetTimeoutHandle] = None
Signal {
span.textContent = valueSignal().toString
span.style.backgroundColor = "#ffff99"
dehighlightTimeout.foreach(js.timers.clearTimeout)
dehighlightTimeout = Some(js.timers.setTimeout(1500) {
dehighlightTimeout = None
span.style.backgroundColor = "white"
})
}
}
}
def exprOfInput(input: html.Input): Signal[Expr] = {
val text = inputValueSignal(input)
val parent = input.parentElement
Signal {
import js.JSStringOps._
parent.className = parent.className.jsReplace(ClearCssClassRegExp, "")
try {
parseExpr(text())
} catch {
case e: IllegalArgumentException =>
parent.className += " has-error"
Literal(Double.NaN)
}
}
}
def parseExpr(text: String): Expr = {
def parseSimple(text: String): Expr = {
if (text.forall(l => l >= 'a' && l <= 'z')) {
Ref(text)
} else {
try {
Literal(text.toDouble)
} catch {
case e: NumberFormatException =>
throw new IllegalArgumentException(s"$text is neither a variable name nor a number")
}
}
}
text.split(" ").map(_.trim).filter(_ != "") match {
case Array(x) => parseSimple(x)
case Array(aText, op, bText) =>
val a = parseSimple(aText)
val b = parseSimple(bText)
op match {
case "+" => Plus(a, b)
case "-" => Minus(a, b)
case "*" => Times(a, b)
case "/" => Divide(a, b)
case _ =>
throw new IllegalArgumentException(s"$op is not a valid operator")
}
case _ =>
throw new IllegalArgumentException(s"$text is not a valid simple expression")
}
}
}
| rusucosmin/courses | fp/9-calculator-rusucosmin/web-ui/src/main/scala/calculator/CalculatorUI.scala | Scala | mit | 5,412 |
package controllers.s_consent_and_declaration
import org.specs2.mutable._
import utils.WithBrowser
import controllers.BrowserMatchers
import utils.pageobjects.{TestData, PageObjects}
import utils.pageobjects.s_consent_and_declaration.GDeclarationPage
import utils.pageobjects.preview.PreviewPage
class GDeclarationIntegrationSpec extends Specification {
section("integration", models.domain.ConsentAndDeclaration.id)
"Declaration" should {
"be presented" in new WithBrowser with BrowserMatchers with PageObjects {
val page = GDeclarationPage(context)
page goToThePage()
}
"contain errors on invalid submission" in new WithBrowser with BrowserMatchers {
browser.goTo(GDeclarationPage.url)
urlMustEqual(GDeclarationPage.url)
browser.submit("button[type='submit']")
urlMustEqual(GDeclarationPage.url)
findMustEqualSize("div[class=validation-summary] ol li", 1)
}
"navigate back to Preview page" in new WithBrowser with PageObjects {
val page = PreviewPage(context)
page goToThePage()
val declarationPage = page submitPage()
val previewPage = declarationPage goBack()
previewPage must beAnInstanceOf[PreviewPage]
}
"no contact selected in GDeclarationPage field with optional text" in new WithBrowser with PageObjects{
val page = GDeclarationPage(context)
val claim = new TestData
claim.ConsentDeclarationGettingInformationFromAnyOther = "No"
page goToThePage()
page fillPageWith claim
val pageWithErrors = page.submitPage()
pageWithErrors.listErrors.size mustEqual 1
pageWithErrors.source must contain("validation-message")
}
"page contains JS enabled check" in new WithBrowser with PageObjects {
val page = GDeclarationPage(context)
page goToThePage()
page.jsCheckEnabled must beTrue
}
}
section("integration", models.domain.ConsentAndDeclaration.id)
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/controllers/s_consent_and_declaration/GDeclarationIntegrationSpec.scala | Scala | mit | 1,951 |
import sbt._
import Keys._
object P extends Build
{
lazy val root = Project("root", file("."))
lazy val a = Project("a", file("a")) dependsOn(b)
lazy val b = Project("b", file("b"))
} | jaceklaskowski/sbt | sbt/src/sbt-test/dependency-management/invalidate-internal/project/P.scala | Scala | bsd-3-clause | 189 |
/**
* Here is a thoroughly artifical set of definitions, just to test the range of code fragments
*/
object test {
/* Should include this preceding comment through the closing paren */
val c =
(
1,
2,
3
)
// Should include this preceding comment through the closing brace
var b = {
1; 2; 3
}
/**
* Should include this Scaladoc through the closing square bracket
*/
def a = b.asInstanceOf[
Any
]
} | bhoward/Escalator | demo/test.scala | Scala | apache-2.0 | 450 |
package hercules.config.processingunit
import java.io.File
import akka.event.LoggingAdapter
/**
* Provide a config for a IlluminaProcessingUnitFetcherConfig.
*
* @param runfolderRoots
* @param sampleSheetRoot
* @param customQCConfigRoot
* @param defaultQCConfigFile
* @param customProgramConfigRoot
* @param defaultProgramConfigFile
* @param log
*/
case class IlluminaProcessingUnitFetcherConfig(
runfolderRoots: Seq[File],
sampleSheetRoot: File,
customQCConfigRoot: File,
defaultQCConfigFile: File,
customProgramConfigRoot: File,
defaultProgramConfigFile: File,
log: LoggingAdapter) extends ProcessingUnitFetcherConfig {} | johandahlberg/hercules | src/main/scala/hercules/config/processingunit/IlluminaProcessingUnitFetcherConfig.scala | Scala | mit | 648 |
package io.actorbase.actor.main
import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import io.actorbase.actor.api.Api.Request._
import io.actorbase.actor.api.Api.Response._
import org.apache.commons.lang3.SerializationUtils
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, MustMatchers, WordSpecLike}
/**
* The MIT License (MIT)
*
* Copyright (c) 2015 - 2017 Riccardo Cardin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Tests relative to an empty Actorbase (main) actor.
*
* @author Riccardo Cardin
* @version 0.1
* @since 0.1
*/
class EmptyActorbaseTest extends TestKit(ActorSystem("testSystemActorbase"))
with ImplicitSender
with WordSpecLike
with MustMatchers
with BeforeAndAfter
with BeforeAndAfterAll {
var ab: TestActorRef[Actorbase] = _
override protected def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
before {
ab = TestActorRef(new Actorbase)
}
"An empty database" must {
"create a new table with a name" in {
ab ! CreateCollection("table")
expectMsg(CreateCollectionAck("table"))
}
"send an error for any find request" in {
ab ! Find("table", "key")
expectMsg(FindNAck("table", "key", "Collection table does not exist"))
}
"send an error for any upsert request" in {
ab ! Upsert("table", "key", Array())
expectMsg(UpsertNAck("table", "key", "Collection table does not exist"))
}
"send an error for any deletion request" in {
ab ! Delete("table", "key")
expectMsg(DeleteNAck("table", "key", "Collection table does not exist"))
}
"send an error for any count request" in {
ab ! Count("table")
expectMsg(CountNAck("table", "Collection table does not exist"))
}
}
}
/**
* Tests relative to a not empty Actorbase (main) actor.
*
* @author Riccardo Cardin
* @version 0.1
* @since 0.1
*/
class ActorbaseTest extends TestKit(ActorSystem("testSystemActorbase"))
with ImplicitSender
with WordSpecLike
with MustMatchers
with BeforeAndAfter
with BeforeAndAfterAll {
val Payload: Array[Byte] = SerializationUtils.serialize(42)
val Payload1: Array[Byte] = SerializationUtils.serialize(4242)
var ab: TestActorRef[Actorbase] = _
override protected def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
before {
ab = TestActorRef(new Actorbase)
createCollection("table")
}
"An non empty database" must {
"be able to create two different collections" in {
ab ! CreateCollection("table1")
expectMsg(CreateCollectionAck("table1"))
}
"send an error if it tries to create the same collection more than once" in {
ab ! CreateCollection("table")
expectMsg(CreateCollectionNAck("table", "Collection table already exists"))
}
"upsert some information into an existing collection" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Upsert("table", "key1", Payload)
expectMsg(UpsertAck("table", "key1"))
ab ! Upsert("table", "key", Payload1)
expectMsg(UpsertAck("table", "key"))
}
"send an error in case of upsertion to an inexistent collection" in {
ab ! Upsert("table1", "key", Payload)
expectMsg(UpsertNAck("table1", "key", "Collection table1 does not exist"))
}
"send an error if the upsertion key is null" in {
ab ! Upsert("table", null, Payload)
expectMsg(UpsertNAck("table", null, "Key cannot be null"))
}
"count a single item in a collection" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Count("table")
expectMsg(CountAck("table", 1L))
}
"count a multiple items in a collection" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Upsert("table", "key1", Payload1)
expectMsg(UpsertAck("table", "key1"))
ab ! Count("table")
expectMsg(CountAck("table", 2L))
}
"send an error in case of count to an inexistent collection" in {
ab ! Count("table1")
expectMsg(CountNAck("table1", "Collection table1 does not exist"))
}
"not count upserts that receives a nack" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Upsert("table", null, Payload1)
expectMsg(UpsertNAck("table", null, "Key cannot be null"))
ab ! Count("table")
expectMsg(CountAck("table", 1L))
}
"get a previous upserted item in an empty table" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Find("table", "key")
expectMsg(FindAck("table", "key", Option(Payload)))
}
"get a none for a key not present" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Find("table", "key1")
expectMsg(FindAck("table", "key1", None))
}
"send an error in case of find to an inexistent collection" in {
ab ! Find("table1", "key")
expectMsg(FindNAck("table1", "key", "Collection table1 does not exist"))
}
"get a previous upserted item in a table containing more than one item" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab! Upsert("table" ,"key1", Payload1)
expectMsg(UpsertAck("table" ,"key1"))
ab ! Find("table" ,"key")
expectMsg(FindAck("table", "key", Option(Payload)))
}
"send an error in case of deletion of an inexistent collection" in {
ab ! Delete("table1", "key")
expectMsg(DeleteNAck("table1", "key", "Collection table1 does not exist"))
}
"delete a previously inserted key" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Delete("table", "key")
expectMsg(DeleteAck("table", "key"))
ab ! Find("table", "key")
expectMsg(FindAck("table", "key", None))
ab ! Count("table")
expectMsg(CountAck("table", 0))
}
/*
// FIXME
"manage to delete a key that does not exist" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Delete("table", "key1")
expectMsg(DeleteAck("table", "key1"))
ab ! Find("table", "key")
expectMsg(FindAck("table", "key", Some(Payload)))
ab ! Count("table")
expectMsg(CountAck("table", 1))
}
*/
"delete a previously inserted key (more than one key present)" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Upsert("table", "key1", Payload1)
expectMsg(UpsertAck("table", "key1"))
ab ! Delete("table", "key")
expectMsg(DeleteAck("table", "key"))
ab ! Find("table", "key")
expectMsg(FindAck("table", "key", None))
ab ! Find("table", "key1")
expectMsg(FindAck("table", "key1", Some(Payload1)))
ab ! Count("table")
expectMsg(CountAck("table", 1))
}
"deletion a previously inserted key and then insert another key" in {
ab ! Upsert("table", "key", Payload)
expectMsg(UpsertAck("table", "key"))
ab ! Delete("table", "key")
expectMsg(DeleteAck("table", "key"))
ab ! Upsert("table", "key1", Payload1)
expectMsg(UpsertAck("table", "key1"))
ab ! Find("table", "key")
expectMsg(FindAck("table", "key", None))
ab ! Find("table", "key1")
expectMsg(FindAck("table", "key1", Some(Payload1)))
ab ! Count("table")
expectMsg(CountAck("table", 1))
}
}
private def createCollection(name: String) = {
ab ! CreateCollection(name)
expectMsg(CreateCollectionAck(name))
}
} | rcardin/actorbase | src/test/scala/io/actorbase/actor/main/ActorbaseTest.scala | Scala | mit | 8,814 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.{IdentityOutputShape, TensorModule}
import com.intel.analytics.bigdl.tensor.{Storage, Tensor}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Engine
import com.intel.analytics.bigdl.utils.RandomGenerator._
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* Dropout masks(set to zero) parts of input using a bernoulli distribution.
* Each input element has a probability initP of being dropped. If `scale` is
* true(true by default), the outputs are scaled by a factor of `1/(1-initP)`
* during training.
* During evaluating, output is the same as input.
*
* It has been proven an effective approach for regularization and preventing
* co-adaptation of feature detectors. For more details, plese see
* [Improving neural networks by preventing co-adaptation of feature detectors]
* (https://arxiv.org/abs/1207.0580)
*
* @param initP the probability p
* @param inplace whether to make `input` and `output` share the same storage
* @param scale whether to scale the output by a factor of `1 / (1 - p)`
*/
@SerialVersionUID(- 4636332259181125718L)
class Dropout[T: ClassTag](
val initP: Double = 0.5,
val inplace: Boolean = false,
var scale: Boolean = true)(
implicit ev: TensorNumeric[T]) extends TensorModule[T] {
private var p = initP
var noise = Tensor[T]()
var isResampling = true
@transient
protected var results: Array[Future[Unit]] = null
/**
* Get current probability to be dropped.
* @return p
*/
def getP(): T = {
return ev.fromType[Double](p)
}
override def updateOutput(input: Tensor[T]): Tensor[T] = {
if (inplace) {
this.output = input
} else {
this.output.resizeAs(input).copy(input)
}
if (results == null) {
results = new Array[Future[Unit]](Engine.model.getPoolSize)
}
if (train) {
noise.resizeAs(input)
if (input.isContiguous()) {
if (isResampling) {
val noiseData = noise.storage().array()
var taskSize = noise.nElement() / Engine.model.getPoolSize
var extraTask = noise.nElement() % Engine.model.getPoolSize
var allocated = 0
val offset = this.output.storageOffset() - 1
val data = this.output.storage.array()
var i = 0
while (allocated < noise.nElement()) {
val start = allocated
allocated += taskSize
if (extraTask > 0) {
allocated += 1
extraTask -= 1
}
val end = allocated
results(i) = Engine.model.invoke(() => {
var k = start
while (k < end) {
noiseData(k) = if (RNG.bernoulli(1 - p)) {
if (scale) {
data(offset + k) = ev.divide(data(offset + k), ev.fromType[Double](1 - p))
ev.fromType[Double](1.0 / (1 - p))
} else {
ev.fromType[Int](1)
}
} else {
data(offset + k) = ev.fromType[Int](0)
ev.fromType[Int](0)
}
k += 1
}
})
i += 1
}
Engine.model.sync(results)
} else {
this.output.cmul(noise)
}
this.output
} else {
if (isResampling) {
noise.bernoulli(1 - p)
if (scale) {
noise.div(ev.fromType[Double](1 - p))
}
}
this.output.cmul(noise)
}
} else if (!scale) {
this.output.mul(ev.fromType[Double](1 - p))
} else {
output
}
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
if (results == null) {
results = new Array[Future[Unit]](Engine.model.getPoolSize)
}
if (train) {
if (inplace) {
this.gradInput = gradOutput
} else {
this.gradInput.resizeAs(gradOutput).copy(gradOutput)
}
if (gradInput.isContiguous()) {
val noiseData = noise.storage().array()
var taskSize = noise.nElement() / Engine.model.getPoolSize
var extraTask = noise.nElement() % Engine.model.getPoolSize
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
var allocated = 0
var i = 0
while (allocated < noise.nElement()) {
val start = allocated
allocated += taskSize
if (extraTask > 0) {
allocated += 1
extraTask -= 1
}
val end = allocated
results(i) = Engine.model.invoke(() => {
var k = start
while (k < end) {
gradInputData(gradInputOffset + k) =
ev.times(gradInputData(gradInputOffset + k), noiseData(k))
k += 1
}
})
i += 1
}
Engine.model.sync(results)
this.gradInput
} else {
this.gradInput.cmul(noise)
}
} else {
throw new IllegalArgumentException("backprop only defined while training")
}
this.gradInput
}
override def clearState(): this.type = {
if (!inplace) {
super.clearState()
}
noise.set()
this
}
/**
* Set current probability to be dropped.
* @param p new probability
* @return
*/
def setP(p: Double): this.type = {
this.p = p
this
}
override def toString(): String = {
s"${getPrintName}($p)"
}
}
object Dropout {
def apply[T: ClassTag](
initP: Double = 0.5,
inplace: Boolean = false,
scale: Boolean = true)(implicit ev: TensorNumeric[T]) : Dropout[T] = {
new Dropout[T](initP, inplace, scale)
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/Dropout.scala | Scala | apache-2.0 | 6,454 |
package org.jetbrains.plugins.scala.testingSupport.utest
import com.intellij.execution.RunnerAndConfigurationSettings
import org.jetbrains.plugins.scala.testingSupport.ScalaTestingTestCase
import org.jetbrains.plugins.scala.testingSupport.test.utest.{UTestRunConfiguration, UTestConfigurationProducer}
/**
* @author Roman.Shein
* @since 13.05.2015.
*/
abstract class UTestTestCase extends ScalaTestingTestCase(new UTestConfigurationProducer) {
}
| igrocki/intellij-scala | test/org/jetbrains/plugins/scala/testingSupport/utest/UTestTestCase.scala | Scala | apache-2.0 | 454 |
package scala.meta
package internal.hosts.scalac
package converters
import org.scalameta.invariants._
import org.scalameta.unreachable
import scala.{Seq => _}
import scala.collection.immutable.Seq
import scala.tools.nsc.{Global => ScalaGlobal}
import scala.meta.internal.{ast => m}
import scala.meta.internal.hosts.scalac.reflect._
// This module exposes methods to convert from scala.meta members to scala.reflect symbols.
// To be more precise, it returns logical symbols instead of scala.reflect ones, but that's almost the same.
trait ToGsymbol extends GlobalToolkit with MetaToolkit {
self: Api =>
protected implicit class XtensionMnameToLsymbols(mname: m.Name) {
def toLsymbols: Seq[l.Symbol] = {
mname.denot.symbols.map(symbolTable.convert)
}
}
protected implicit class XtensionMmemberToLsymbols(mmember: m.Member) {
def toLsymbols: Seq[l.Symbol] = mmember.name.require[m.Name].toLsymbols
}
} | beni55/scalameta | scalahost/src/main/scala/scala/meta/internal/hosts/scalac/converters/ToGsymbol.scala | Scala | bsd-3-clause | 931 |
package com.twitter.finatra.utils
import org.jboss.netty.handler.codec.http.HttpResponse
object ResponseUtils {
def is5xxResponse(response: HttpResponse) = {
errorClass(response) == 5
}
def is4xxOr5xxResponse(response: HttpResponse) = {
val errClass = errorClass(response)
errClass == 4 || errClass == 5
}
private def errorClass(response: HttpResponse): Int = {
response.getStatus.getCode / 100
}
}
| kaushik94/finatra | utils/src/main/scala/com/twitter/finatra/utils/ResponseUtils.scala | Scala | apache-2.0 | 432 |
package presenters
import java.time.Instant
import java.time.temporal._
import nl.surfnet.nsiv2.utils._
import org.ogf.schemas.nsi._2013._12.connection.types._
class ConnectionPresenterTest extends helpers.Specification {
"A connection" >> {
def states = new ConnectionStatesType()
.withReservationState(ReservationStateEnumType.RESERVE_HELD)
.withProvisionState(ProvisionStateEnumType.PROVISIONED)
.withLifecycleState(LifecycleStateEnumType.CREATED)
.withDataPlaneStatus(new DataPlaneStatusType().withActive(false))
def data = new QuerySummaryResultType().withConnectionId("ID").withGlobalReservationId("GLOBAL").withDescription("description")
.withRequesterNSA("requester").withConnectionStates(states)
def criteria = new ReservationRequestCriteriaType().withSchedule(new ScheduleType)
val now = Instant.now
"given a query summary result" should {
val subject = ConnectionPresenter(data, Some(criteria))
"have a connection ID" in {
subject.connectionId must beEqualTo(data.getConnectionId)
}
"have a description" in {
subject.description must beSome(data.getDescription)
}
"have a global reservation ID" in {
subject.globalReservationId must beEqualTo(Some(data.getGlobalReservationId))
}
"have a requester NSA" in {
subject.requesterNsa must beEqualTo(data.getRequesterNSA)
}
"have a status" in {
subject.status must not(beEmpty)
}
}
"given an active connection" should {
val schedule = criteria.getSchedule
.withStartTime(now.minus(1, ChronoUnit.DAYS).toXMLGregorianCalendar())
.withEndTime(now.plus(1, ChronoUnit.DAYS).toXMLGregorianCalendar())
val subject = ConnectionPresenter(data.withConnectionStates(states.withDataPlaneStatus( new DataPlaneStatusType().withActive(true) )),
Some(criteria.withSchedule(schedule)))
"have an active data plane" in {
subject.dataPlaneStatus must beEqualTo("active")
}
"qualify as 'current" in {
subject.qualifier(now) must beEqualTo('current)
}
}
"given a future connection" should {
val schedule = criteria.getSchedule
.withStartTime(now.plus(1, ChronoUnit.DAYS).toXMLGregorianCalendar())
.withEndTime(now.plus(5, ChronoUnit.DAYS).toXMLGregorianCalendar())
val subject = ConnectionPresenter(data.withConnectionStates(states.withDataPlaneStatus( new DataPlaneStatusType().withActive(false) )),
Some(criteria.withSchedule(schedule)))
"have an inactive data plane" in {
subject.dataPlaneStatus must beEqualTo("inactive")
}
"qualify as 'future" in {
subject.qualifier(now) must beEqualTo('future)
}
}
"given a past connection" should {
val schedule = criteria.getSchedule
.withStartTime(now.minus(5, ChronoUnit.DAYS).toXMLGregorianCalendar())
.withEndTime(now.minus(1, ChronoUnit.DAYS).toXMLGregorianCalendar())
val subject = ConnectionPresenter(data.withConnectionStates(states.withDataPlaneStatus( new DataPlaneStatusType().withActive(false) )),
Some(criteria.withSchedule(schedule)))
"have an inactive data plane" in {
subject.dataPlaneStatus must beEqualTo("inactive")
}
"qualify as 'past" in {
subject.qualifier(now) must beEqualTo('past)
}
}
}
}
| BandwidthOnDemand/nsi-safnari | test/presenters/ConnectionPresenterTest.scala | Scala | bsd-3-clause | 3,519 |
package web
import org.json4s.{DefaultFormats, Formats}
import org.scalatra.ScalatraServlet
import org.scalatra.json.JacksonJsonSupport
/**
* Created by salim on 04/02/2016.
*/
class JSONServlet extends ScalatraServlet with JacksonJsonSupport{
before() {
contentType = formats("json")
}
protected implicit val jsonFormats: Formats = DefaultFormats
} | salimfadhley/funproxy | src/main/scala/web/JSONServlet.scala | Scala | mit | 367 |
/* Copyright 2015 Richard WiedenhΓΆft <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.metanoise.android.jenastop
import android.os.Bundle
import android.text.Html
import android.text.method.LinkMovementMethod
import android.widget.TextView
import com.github.ghik.silencer.silent
import net.metanoise.android.jenastop.ui.{ HomeButton, ScalaActivity }
class AboutActivity extends ScalaActivity with HomeButton {
def contentView = getLayoutInflater.inflate(R.layout.activity_about, null)
lazy val textView = findViewById(R.id.textView).asInstanceOf[TextView]
override def onCreate(bundle: Bundle): Unit = {
super.onCreate(bundle)
val html = Html.fromHtml(getResources.getString(R.string.about_text)): @silent // Deprecated but needed for compat
textView.setText(html)
textView.setMovementMethod(LinkMovementMethod.getInstance())
}
}
| Richard-W/jenastop | src/main/scala/net/metanoise/android/jenastop/AboutActivity.scala | Scala | gpl-3.0 | 1,502 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gander.extractors
/**
* Created by Jim Plush
* User: jim
* Date: 8/15/11
*/
object StandardContentExtractor extends ContentExtractor
| lloydmeta/gander | src/main/scala/gander/extractors/StandardContentExtractor.scala | Scala | apache-2.0 | 952 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.mutation
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.hive.HiveSessionCatalog
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
/**
* Util for IUD common function
*/
object IUDCommonUtil {
/**
* iterates the plan and check whether CarbonCommonConstants.CARBON_INPUT_SEGMENTS set for any
* any table or not
* @param sparkSession
* @param logicalPlan
*/
def checkIfSegmentListIsSet(sparkSession: SparkSession, logicalPlan: LogicalPlan): Unit = {
val carbonProperties = CarbonProperties.getInstance()
logicalPlan.foreach {
case unresolvedRelation: UnresolvedRelation =>
val dbAndTb =
sparkSession.sessionState.catalog.asInstanceOf[HiveSessionCatalog].getCurrentDatabase +
"." + unresolvedRelation.tableIdentifier.table
val segmentProperties = carbonProperties
.getProperty(CarbonCommonConstants.CARBON_INPUT_SEGMENTS + dbAndTb, "")
if (!(segmentProperties.equals("") || segmentProperties.trim.equals("*"))) {
throw new MalformedCarbonCommandException("carbon.input.segments." + dbAndTb +
"should not be set for table used in DELETE " +
"query. Please reset the property to carbon" +
".input.segments." +
dbAndTb + "=*")
}
case logicalRelation: LogicalRelation if (logicalRelation.relation
.isInstanceOf[CarbonDatasourceHadoopRelation]) =>
val dbAndTb =
logicalRelation.relation.asInstanceOf[CarbonDatasourceHadoopRelation].carbonTable
.getDatabaseName + "." +
logicalRelation.relation.asInstanceOf[CarbonDatasourceHadoopRelation].carbonTable
.getTableName
val sementProperty = carbonProperties
.getProperty(CarbonCommonConstants.CARBON_INPUT_SEGMENTS + dbAndTb, "")
if (!(sementProperty.equals("") || sementProperty.trim.equals("*"))) {
throw new MalformedCarbonCommandException("carbon.input.segments." + dbAndTb +
"should not be set for table used in UPDATE " +
"query. Please reset the property to carbon" +
".input.segments." +
dbAndTb + "=*")
}
case filter: Filter => filter.subqueries.toList
.foreach(subquery => checkIfSegmentListIsSet(sparkSession, subquery))
case _ =>
}
}
}
| sgururajshetty/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/IUDCommonUtil.scala | Scala | apache-2.0 | 3,866 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.{IOException, ObjectInputStream, ObjectOutputStream}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import scala.util.hashing.byteswap32
import org.apache.spark.rdd.{PartitionPruningRDD, RDD}
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.util.{CollectionsUtils, Utils}
import org.apache.spark.util.random.SamplingUtils
/**
* An object that defines how the elements in a key-value pair RDD are partitioned by key.
* Maps each key to a partition ID, from 0 to `numPartitions - 1`.
*/
abstract class Partitioner extends Serializable {
def numPartitions: Int
def getPartition(key: Any): Int
}
object Partitioner {
/**
* Choose a partitioner to use for a cogroup-like operation between a number of RDDs.
*
* If any of the RDDs already has a partitioner, choose that one.
*
* Otherwise, we use a default HashPartitioner. For the number of partitions, if
* spark.default.parallelism is set, then we'll use the value from SparkContext
* defaultParallelism, otherwise we'll use the max number of upstream partitions.
*
* Unless spark.default.parallelism is set, the number of partitions will be the
* same as the number of partitions in the largest upstream RDD, as this should
* be least likely to cause out-of-memory errors.
*
* We use two method parameters (rdd, others) to enforce callers passing at least 1 RDD.
*/
def defaultPartitioner(rdd: RDD[_], others: RDD[_]*): Partitioner = {
val bySize = (Seq(rdd) ++ others).sortBy(_.partitions.length).reverse
for (r <- bySize if r.partitioner.isDefined && r.partitioner.get.numPartitions > 0) {
return r.partitioner.get
}
if (rdd.context.conf.contains("spark.default.parallelism")) {
new HashPartitioner(rdd.context.defaultParallelism)
} else {
new HashPartitioner(bySize.head.partitions.length)
}
}
}
/**
* A [[org.apache.spark.Partitioner]] that implements hash-based partitioning using
* Java's `Object.hashCode`.
*
* Java arrays have hashCodes that are based on the arrays' identities rather than their contents,
* so attempting to partition an RDD[Array[_]] or RDD[(Array[_], _)] using a HashPartitioner will
* produce an unexpected or incorrect result.
*/
class HashPartitioner(partitions: Int) extends Partitioner {
require(partitions >= 0, s"Number of partitions ($partitions) cannot be negative.")
def numPartitions: Int = partitions
def getPartition(key: Any): Int = key match {
case null => 0
case _ => Utils.nonNegativeMod(key.hashCode, numPartitions)
}
override def equals(other: Any): Boolean = other match {
case h: HashPartitioner =>
h.numPartitions == numPartitions
case _ =>
false
}
override def hashCode: Int = numPartitions
}
/**
* A [[org.apache.spark.Partitioner]] that partitions sortable records by range into roughly
* equal ranges. The ranges are determined by sampling the content of the RDD passed in.
*
* Note that the actual number of partitions created by the RangePartitioner might not be the same
* as the `partitions` parameter, in the case where the number of sampled records is less than
* the value of `partitions`.
*/
class RangePartitioner[K : Ordering : ClassTag, V](
partitions: Int,
rdd: RDD[_ <: Product2[K, V]],
private var ascending: Boolean = true)
extends Partitioner {
// We allow partitions = 0, which happens when sorting an empty RDD under the default settings.
require(partitions >= 0, s"Number of partitions cannot be negative but found $partitions.")
private var ordering = implicitly[Ordering[K]]
// An array of upper bounds for the first (partitions - 1) partitions
private var rangeBounds: Array[K] = {
if (partitions <= 1) {
Array.empty
} else {
// This is the sample size we need to have roughly balanced output partitions, capped at 1M.
val sampleSize = math.min(20.0 * partitions, 1e6)
// Assume the input partitions are roughly balanced and over-sample a little bit.
val sampleSizePerPartition = math.ceil(3.0 * sampleSize / rdd.partitions.length).toInt
val (numItems, sketched) = RangePartitioner.sketch(rdd.map(_._1), sampleSizePerPartition)
if (numItems == 0L) {
Array.empty
} else {
// If a partition contains much more than the average number of items, we re-sample from it
// to ensure that enough items are collected from that partition.
val fraction = math.min(sampleSize / math.max(numItems, 1L), 1.0)
val candidates = ArrayBuffer.empty[(K, Float)]
val imbalancedPartitions = mutable.Set.empty[Int]
sketched.foreach { case (idx, n, sample) =>
if (fraction * n > sampleSizePerPartition) {
imbalancedPartitions += idx
} else {
// The weight is 1 over the sampling probability.
val weight = (n.toDouble / sample.length).toFloat
for (key <- sample) {
candidates += ((key, weight))
}
}
}
if (imbalancedPartitions.nonEmpty) {
// Re-sample imbalanced partitions with the desired sampling probability.
val imbalanced = new PartitionPruningRDD(rdd.map(_._1), imbalancedPartitions.contains)
val seed = byteswap32(-rdd.id - 1)
val reSampled = imbalanced.sample(withReplacement = false, fraction, seed).collect()
val weight = (1.0 / fraction).toFloat
candidates ++= reSampled.map(x => (x, weight))
}
RangePartitioner.determineBounds(candidates, partitions)
}
}
}
def numPartitions: Int = rangeBounds.length + 1
private var binarySearch: ((Array[K], K) => Int) = CollectionsUtils.makeBinarySearch[K]
def getPartition(key: Any): Int = {
val k = key.asInstanceOf[K]
var partition = 0
if (rangeBounds.length <= 128) {
// If we have less than 128 partitions naive search
while (partition < rangeBounds.length && ordering.gt(k, rangeBounds(partition))) {
partition += 1
}
} else {
// Determine which binary search method to use only once.
partition = binarySearch(rangeBounds, k)
// binarySearch either returns the match location or -[insertion point]-1
if (partition < 0) {
partition = -partition-1
}
if (partition > rangeBounds.length) {
partition = rangeBounds.length
}
}
if (ascending) {
partition
} else {
rangeBounds.length - partition
}
}
override def equals(other: Any): Boolean = other match {
case r: RangePartitioner[_, _] =>
r.rangeBounds.sameElements(rangeBounds) && r.ascending == ascending
case _ =>
false
}
override def hashCode(): Int = {
val prime = 31
var result = 1
var i = 0
while (i < rangeBounds.length) {
result = prime * result + rangeBounds(i).hashCode
i += 1
}
result = prime * result + ascending.hashCode
result
}
@throws(classOf[IOException])
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
val sfactory = SparkEnv.get.serializer
sfactory match {
case js: JavaSerializer => out.defaultWriteObject()
case _ =>
out.writeBoolean(ascending)
out.writeObject(ordering)
out.writeObject(binarySearch)
val ser = sfactory.newInstance()
Utils.serializeViaNestedStream(out, ser) { stream =>
stream.writeObject(scala.reflect.classTag[Array[K]])
stream.writeObject(rangeBounds)
}
}
}
@throws(classOf[IOException])
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
val sfactory = SparkEnv.get.serializer
sfactory match {
case js: JavaSerializer => in.defaultReadObject()
case _ =>
ascending = in.readBoolean()
ordering = in.readObject().asInstanceOf[Ordering[K]]
binarySearch = in.readObject().asInstanceOf[(Array[K], K) => Int]
val ser = sfactory.newInstance()
Utils.deserializeViaNestedStream(in, ser) { ds =>
implicit val classTag = ds.readObject[ClassTag[Array[K]]]()
rangeBounds = ds.readObject[Array[K]]()
}
}
}
}
private[spark] object RangePartitioner {
/**
* Sketches the input RDD via reservoir sampling on each partition.
*
* @param rdd the input RDD to sketch
* @param sampleSizePerPartition max sample size per partition
* @return (total number of items, an array of (partitionId, number of items, sample))
*/
def sketch[K : ClassTag](
rdd: RDD[K],
sampleSizePerPartition: Int): (Long, Array[(Int, Long, Array[K])]) = {
val shift = rdd.id
// val classTagK = classTag[K] // to avoid serializing the entire partitioner object
val sketched = rdd.mapPartitionsWithIndex { (idx, iter) =>
val seed = byteswap32(idx ^ (shift << 16))
val (sample, n) = SamplingUtils.reservoirSampleAndCount(
iter, sampleSizePerPartition, seed)
Iterator((idx, n, sample))
}.collect()
val numItems = sketched.map(_._2).sum
(numItems, sketched)
}
/**
* Determines the bounds for range partitioning from candidates with weights indicating how many
* items each represents. Usually this is 1 over the probability used to sample this candidate.
*
* @param candidates unordered candidates with weights
* @param partitions number of partitions
* @return selected bounds
*/
def determineBounds[K : Ordering : ClassTag](
candidates: ArrayBuffer[(K, Float)],
partitions: Int): Array[K] = {
val ordering = implicitly[Ordering[K]]
val ordered = candidates.sortBy(_._1)
val numCandidates = ordered.size
val sumWeights = ordered.map(_._2.toDouble).sum
val step = sumWeights / partitions
var cumWeight = 0.0
var target = step
val bounds = ArrayBuffer.empty[K]
var i = 0
var j = 0
var previousBound = Option.empty[K]
while ((i < numCandidates) && (j < partitions - 1)) {
val (key, weight) = ordered(i)
cumWeight += weight
if (cumWeight >= target) {
// Skip duplicate values.
if (previousBound.isEmpty || ordering.gt(key, previousBound.get)) {
bounds += key
target += step
j += 1
previousBound = Some(key)
}
}
i += 1
}
bounds.toArray
}
}
| gioenn/xSpark | core/src/main/scala/org/apache/spark/Partitioner.scala | Scala | apache-2.0 | 11,306 |
package org.apache.spark.sql
import com.datastax.spark.connector.SparkCassandraITFlatSpecBase
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.rdd.{CqlWhereClause, CassandraTableScanRDD}
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.{Filter, SparkPlan, PhysicalRDD}
import scala.concurrent.Future
class CassandraPrunedFilteredScanSpec extends SparkCassandraITFlatSpecBase with Logging {
useCassandraConfig(Seq("cassandra-default.yaml.template"))
useSparkConf(defaultConf)
val conn = CassandraConnector(defaultConf)
val sqlContext: SQLContext = new SQLContext(sc)
val cassandraFormat = "org.apache.spark.sql.cassandra"
override def beforeAll(): Unit = {
conn.withSessionDo { session =>
createKeyspace(session)
awaitAll(
Future {
session.execute(
s"""CREATE TABLE IF NOT EXISTS $ks.colors
|(name TEXT, color TEXT, priority INT, PRIMARY KEY (name, priority)) """
.stripMargin)
},
Future {
session.execute(
s"""CREATE TABLE IF NOT EXISTS $ks.fields
|(k INT, a TEXT, b TEXT, c TEXT, d TEXT, e TEXT, PRIMARY KEY (k)) """
.stripMargin)
}
)
}
}
val colorOptions = Map("keyspace" -> ks, "table" -> "colors")
val fieldsOptions = Map("keyspace" -> ks, "table" -> "fields")
val withPushdown = Map("pushdown" -> "true")
val withoutPushdown = Map("pushdown" -> "false")
"CassandraPrunedFilteredScan" should "pushdown predicates for clustering keys" in {
val colorDF = sqlContext.read.format(cassandraFormat).options(colorOptions ++ withPushdown).load()
val executionPlan = colorDF.filter("priority > 5").queryExecution.executedPlan
val cts = findCassandraTableScanRDD(executionPlan)
cts.isDefined shouldBe true
cts.get.where shouldBe CqlWhereClause(Seq(""""priority" > ?"""), List(5))
}
it should "not pushdown predicates for clustering keys if filterPushdown is disabled" in {
val colorDF = sqlContext.read.format(cassandraFormat).options(colorOptions ++ withoutPushdown).load()
val executionPlan = colorDF.filter("priority > 5").queryExecution.executedPlan
val cts = findCassandraTableScanRDD(executionPlan)
cts.isDefined shouldBe true
cts.get.where shouldBe CqlWhereClause(Seq(), List())
}
it should "prune data columns" in {
val fieldsDF = sqlContext.read.format(cassandraFormat).options(fieldsOptions ++ withPushdown).load()
val executionPlan = fieldsDF.select("b","c","d").queryExecution.executedPlan
val cts = findCassandraTableScanRDD(executionPlan)
cts.isDefined shouldBe true
cts.get.selectedColumnNames should contain theSameElementsAs Seq("b", "c", "d")
}
it should "prune data columns if filterPushdown is disabled" in {
val fieldsDF = sqlContext.read.format(cassandraFormat).options(fieldsOptions ++ withoutPushdown).load()
val executionPlan = fieldsDF.select("b","c","d").queryExecution.executedPlan
val cts = findCassandraTableScanRDD(executionPlan)
cts.isDefined shouldBe true
cts.get.selectedColumnNames should contain theSameElementsAs Seq("b", "c", "d")
}
def findCassandraTableScanRDD(sparkPlan: SparkPlan): Option[CassandraTableScanRDD[_]] = {
def _findCassandraTableScanRDD(rdd: RDD[_]): Option[CassandraTableScanRDD[_]] = {
rdd match {
case ctsrdd: CassandraTableScanRDD[_] => Some(ctsrdd)
case other: RDD[_] => other.dependencies.iterator
.flatMap(dep => _findCassandraTableScanRDD(dep.rdd)).take(1).toList.headOption
}
}
sparkPlan match {
case prdd: PhysicalRDD => _findCassandraTableScanRDD(prdd.rdd)
case filter: Filter => findCassandraTableScanRDD(filter.child)
case _ => None
}
}
} | jimenefe/spark-cassandra-connector | spark-cassandra-connector/src/it/scala/org/apache/spark/sql/CassandraPrunedFilteredScanSpec.scala | Scala | apache-2.0 | 3,925 |
/**
* ζ©ε±ε¦δΈηBankAccountη±»οΌζ°η±»CheckingAccountε―Ήζ―欑εζ¬Ύεεζ¬Ύι½ζΆε1ηΎε
ηζη»θ΄Ή
class BankAccount(initialBalance:Double){
private var balance = initialBalance
def deposit(amount:Double) = { balance += amount; balance}
def withdraw(amount:Double) = {balance -= amount; balance}
}
*/
class BankAccount(initialBalance:Double){
private var balance = initialBalance
def deposit(amount:Double) = { balance += amount; balance}
def withdraw(amount:Double) = {balance -= amount; balance}
}
class CheckingAccount(initialBanlance:Double) extends BankAccount(initialBanlance){
override def deposit(amount:Double) = super.deposit(amount-1)
override def withdraw(amount:Double) = super.withdraw(amount+1)
} | vernonzheng/scala-for-the-Impatient | src/Chapter08/exercise01.scala | Scala | mit | 769 |
/**
* Copyright (c) 2016, Anthony Anderson<Illyohs>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package us.illyohs.civilmagiks.common.core.util
import scala.tools.nsc.util.HashSet
import net.minecraft.block.Block
import net.minecraft.world.World
import us.illyohs.civilmagiks.common.core.util.math.WorldPos
object BlockUtils {
def getAttachedCardinalBlocks(world: World, pos: WorldPos): Array[Block] = {
val ORIGIN = world.getBlockState(pos).getBlock
val NORTH = world.getBlockState(pos.north).getBlock
val SOUTH = world.getBlockState(pos.south).getBlock
val EAST = world.getBlockState(pos.east).getBlock
val WEST = world.getBlockState(pos.south).getBlock
val UP = world.getBlockState(pos.up).getBlock
val DOWN = world.getBlockState(pos.down)getBlock
Array(ORIGIN, NORTH, SOUTH, EAST, WEST, UP, DOWN)
}
}
| Illyohs/CivilMagicks | src/main/scala/us/illyohs/civilmagiks/common/core/util/BlockUtils.scala | Scala | bsd-2-clause | 2,156 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.jsinterop
import scala.scalajs.js
import js.annotation._
import org.scalajs.jasminetest.{JasmineTest, TestSuiteContext}
import scala.annotation.meta
object ExportsTest extends JasmineTest {
/** This package in the JS (export) namespace */
val jsPackage = js.Dynamic.global.org.scalajs.testsuite.jsinterop
describe("@JSExport") {
it("should offer exports for methods with implicit name") {
class Foo {
@JSExport
def bar(): Int = 42
@JSExport
def double(x: Int): Int = x*2
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.bar)).toBe("function")
expect(foo.bar()).toEqual(42)
expect(foo.double(3)).toEqual(6)
}
it("should offer exports for methods with explicit name") {
class Foo {
@JSExport("theAnswer")
def bar(): Int = 42
@JSExport("doubleTheParam")
def double(x: Int): Int = x*2
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.bar).toBeUndefined
expect(js.typeOf(foo.theAnswer)).toBe("function")
expect(foo.theAnswer()).toEqual(42)
expect(foo.doubleTheParam(3)).toEqual(6)
}
it("should offer exports for methods with constant folded name") {
class Foo {
@JSExport(ExportNameHolder.methodName)
def bar(): Int = 42
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.bar).toBeUndefined
expect(foo.myMethod()).toEqual(42)
}
it("should offer exports for protected methods") {
class Foo {
@JSExport
protected def bar(): Int = 42
@JSExport
protected[testsuite] def foo(): Int = 100
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.bar)).toBe("function")
expect(foo.bar()).toEqual(42)
expect(js.typeOf(foo.foo)).toBe("function")
expect(foo.foo()).toEqual(100)
}
it("should offer exports for properties with implicit name") {
class Foo {
private[this] var myY: String = "hello"
@JSExport
val answer: Int = 42
@JSExport
var x: Int = 3
@JSExport
def doubleX: Int = x*2
@JSExport
def y: String = myY + " get"
@JSExport
def y_=(v: String): Unit = myY = v + " set"
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.answer)).toBe("number")
expect(foo.answer).toEqual(42)
expect(foo.x).toEqual(3)
expect(foo.doubleX).toEqual(6)
foo.x = 23
expect(foo.x).toEqual(23)
expect(foo.doubleX).toEqual(46)
expect(foo.y).toEqual("hello get")
foo.y = "world"
expect(foo.y).toEqual("world set get")
}
it("should offer exports for properties with explicit name") {
class Foo {
private[this] var myY: String = "hello"
@JSExport("answer")
val answerScala: Int = 42
@JSExport("x")
var xScala: Int = 3
@JSExport("doubleX")
def doubleXScala: Int = xScala*2
@JSExport("y")
def yGetter: String = myY + " get"
@JSExport("y")
def ySetter_=(v: String): Unit = myY = v + " set"
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.answerScala).toBeUndefined
expect(js.typeOf(foo.answer)).toBe("number")
expect(foo.answer).toEqual(42)
expect(foo.x).toEqual(3)
expect(foo.doubleX).toEqual(6)
foo.x = 23
expect(foo.x).toEqual(23)
expect(foo.doubleX).toEqual(46)
expect(foo.y).toEqual("hello get")
foo.y = "world"
expect(foo.y).toEqual("world set get")
}
it("should offer exports for protected properties") {
class Foo {
@JSExport
protected val x: Int = 42
@JSExport
protected[testsuite] val y: Int = 43
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.x).toEqual(42)
expect(foo.y).toEqual(43)
}
it("should offer overloaded exports for methods") {
class Foo {
@JSExport("foobar")
def foo(): Int = 42
@JSExport("foobar")
def bar(x: Int): Int = x*2
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.foobar)).toBe("function")
expect(foo.foobar()).toEqual(42)
expect(foo.foobar(3)).toEqual(6)
}
it("should offer multiple exports for the same method") {
class Foo {
@JSExport
@JSExport("b")
@JSExport("c")
def a(): Int = 1
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.a)).toBe("function")
expect(js.typeOf(foo.b)).toBe("function")
expect(js.typeOf(foo.c)).toBe("function")
expect(foo.a()).toEqual(1)
expect(foo.b()).toEqual(1)
expect(foo.c()).toEqual(1)
}
it("should inherit exports from traits") {
trait Foo {
@JSExport
def x: Int
@JSExport
def method(x: Int): Int
}
class Bar extends Foo {
val x = 1
def method(x: Int) = 2 * x
}
val bar = (new Bar).asInstanceOf[js.Dynamic]
expect(bar.x).toEqual(1)
expect(js.typeOf(bar.method)).toBe("function")
expect(bar.method(2)).toEqual(4)
}
it("should offer overloading with inherited exports") {
class A {
@JSExport
def foo(x: Int) = 2*x
}
class B extends A{
@JSExport("foo")
def bar(x: String) = s"Hello $x"
}
val b = (new B).asInstanceOf[js.Dynamic]
expect(js.typeOf(b.foo)).toBe("function")
expect(b.foo(1)).toEqual(2)
expect(b.foo("World")).toEqual("Hello World")
}
it("should offer exports for generic methods") {
class Foo {
@JSExport
def gen[T <: AnyRef](x: T) = x
}
val x = (new Object).asInstanceOf[js.Any]
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.gen)).toBe("function")
expect(foo.gen(x)).toBe(x)
}
it("should offer exports for lambda return types") {
class Foo {
@JSExport
def lambda(x: Int) = (y: Int) => x + y
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.lambda)).toBe("function")
val lambda = foo.lambda(5).asInstanceOf[Function1[Int,Int]]
expect(lambda(4)).toEqual(9)
}
it("should offer exports for multi parameter lists") {
class Foo {
@JSExport
def multiParam(x: Int)(y: Int): Int = x + y
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.multiParam)).toBe("function")
expect(foo.multiParam(5,6)).toEqual(11)
}
it("should offer exports for default arguments") {
class Foo {
@JSExport
def defArg(x: Int = 1) = x
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.defArg)).toBe("function")
expect(foo.defArg(5)).toEqual(5)
}
it("should offer exports for weird stuff") {
class UhOh {
// Something no one should export
@JSExport
def ahem[T : Comparable](x: T)(implicit y: Int) = ???
}
val x = (new UhOh).asInstanceOf[js.Dynamic]
expect(js.typeOf(x.ahem)).toBe("function")
}
it("should offer exports with value class return types") {
class Foo {
@JSExport
def vc(x: Int) = new SomeValueClass(x)
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.vc)).toBe("function")
// The result should be a boxed SomeValueClass
val result = foo.vc(5)
expect(js.typeOf(result)).toEqual("object")
expect((result: Any).isInstanceOf[SomeValueClass]).toBeTruthy
expect((result: Any) == (new SomeValueClass(5))).toBeTruthy
}
it("should allow exports with Any as return type") {
class A
class Foo {
@JSExport
def foo(switch: Boolean): Any =
if (switch) 1 else new A
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.foo(true).isInstanceOf[Int]).toBeTruthy
expect(foo.foo(false).isInstanceOf[A]).toBeTruthy
}
it("should accept boxed value classes as parameter") {
class Foo {
@JSExport
def vc(x: SomeValueClass) = x.i
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(js.typeOf(foo.vc)).toBe("function")
// The parameter should be a boxed SomeValueClass
val valueCls = new SomeValueClass(7)
val result = foo.vc(valueCls.asInstanceOf[js.Any])
expect(js.typeOf(result)).toEqual("number")
expect(result).toEqual(7)
}
it("should offer exports for overridden methods with refined return type") {
class A
class B extends A
class C1 {
@JSExport
def x: A = new A
}
class C2 extends C1 {
override def x: B = new B
}
val c2 = (new C2).asInstanceOf[js.Dynamic]
expect(c2.x.isInstanceOf[B]).toBeTruthy
}
it("should offer exports for methods with refined types as return type") {
class A {
@JSExport
def foo(x: String): js.Object with js.Dynamic =
js.Dynamic.literal(arg = x)
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo("hello")).toEqual(js.Dynamic.literal(arg = "hello"))
}
it("should offer exports for variable argument methods - #393") {
class A {
@JSExport
def foo(i: String*) = i.mkString("|")
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo()).toEqual("")
expect(a.foo("a", "b", "c")).toEqual("a|b|c")
expect(a.foo("a", "b", "c", "d")).toEqual("a|b|c|d")
}
it("should correctly overload in view of difficult repeated parameter lists") {
class A {
@JSExport
def foo(a: String, b: String, i: Int, c: String) = 1
@JSExport
def foo(a: String*) = 2
@JSExport
def foo(x: Int)(a: Int*) = x * 100000 + a.sum
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo()).toEqual(2)
expect(a.foo("asdf")).toEqual(2)
expect(a.foo("asdf", "foo")).toEqual(2)
expect(a.foo("asdf", "foo", "bar")).toEqual(2)
expect(a.foo("asdf", "foo", 1, "bar")).toEqual(1)
expect(a.foo("asdf", "foo", "foo", "bar")).toEqual(2)
expect(a.foo(5, 1, 2, 3, 10)).toEqual(500016)
expect(a.foo(1)).toEqual(100000)
}
it("should offer exports with default arguments") {
class A {
var oneCount: Int = 0
def one = {
oneCount += 1
1
}
@JSExport
def foo(a: Int = one)(b: Int = a + one)(c: Int = b + one) =
a + b + c
}
val a = new A
val jsa = a.asInstanceOf[js.Dynamic]
expect(jsa.foo()).toEqual(6)
expect(a.oneCount).toEqual(3)
expect(jsa.foo(2)).toEqual(9)
expect(a.oneCount).toEqual(5)
expect(jsa.foo(2,4)).toEqual(11)
expect(a.oneCount).toEqual(6)
expect(jsa.foo(2,4,10)).toEqual(16)
expect(a.oneCount).toEqual(6)
expect(jsa.foo((),4,10)).toEqual(15)
expect(a.oneCount).toEqual(7)
expect(jsa.foo((),4)).toEqual(10)
expect(a.oneCount).toEqual(9)
}
it("should correctly overload methods in presence of default parameters") {
class A {
@JSExport
def foo(a: Int)(b: Int = 5)(c: Int = 7) = 1000 + a + b + c
@JSExport
def foo(a: Int, b: String) = 2
@JSExport
def foo(a: Int, b: Int, c: String) = 3
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo(1)).toEqual(1013)
expect(a.foo(1, 4)).toEqual(1012)
expect(a.foo(1, 4, 5)).toEqual(1010)
expect(a.foo(1, "foo")).toEqual(2)
expect(a.foo(1, 2, "foo")).toEqual(3)
}
it("should prefer overloads taking a Unit over methods with default parameters") {
class A {
@JSExport
def foo(a: Int)(b: String = "asdf") = s"$a $b"
@JSExport
def foo(a: Int, b: Unit) = "woot"
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo(1)).toEqual("1 asdf")
expect(a.foo(2, "omg")).toEqual("2 omg")
expect(a.foo(1, ())).toEqual("woot")
}
it("should correctly overload methods in presence of default parameters and repeated parameters") {
class A {
@JSExport
def foo(x: Int, y: Int = 1) = x + y
@JSExport
def foo(x: String*) = x.mkString("|")
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo(1)).toEqual(2)
expect(a.foo(1, 2)).toEqual(3)
expect(a.foo()).toEqual("")
expect(a.foo("foo")).toEqual("foo")
expect(a.foo("foo","bar")).toEqual("foo|bar")
}
it("should correctly overload exports called `toString`") {
class A {
override def toString(): String = "no arg"
@JSExport
def toString(x: Int): String = s"with arg: $x"
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.applyDynamic("toString")()).toEqual("no arg")
expect(a.applyDynamic("toString")(1)).toEqual("with arg: 1")
}
it("should allow to explicitly export toString") {
class A {
@JSExport("toString")
override def toString(): String = "called"
}
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.applyDynamic("toString")()).toEqual("called")
}
it("should correctly box repeated parameter lists with value classes") {
class A {
@JSExport
def foo(vcs: SomeValueClass*) = vcs.map(_.i).sum
}
val vc1 = new SomeValueClass(1)
val vc2 = new SomeValueClass(2)
val a = (new A).asInstanceOf[js.Dynamic]
expect(a.foo(vc1.asInstanceOf[js.Any], vc2.asInstanceOf[js.Any])).toEqual(3)
}
it("should offer exports for objects with implicit name") {
val accessor = jsPackage.ExportedObject
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(js.typeOf(obj)).toEqual("object")
expect(obj.witness).toEqual("witness")
}
it("should offer exports for objects with explicit name") {
val accessor = js.Dynamic.global.TheExportedObject
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(js.typeOf(obj)).toEqual("object")
expect(obj.witness).toEqual("witness")
}
it("should offer exports for objects with qualified name") {
val accessor = js.Dynamic.global.qualified.testobject.ExportedObject
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(js.typeOf(obj)).toEqual("object")
expect(obj.witness).toEqual("witness")
}
it("should offer exports for objects with constant folded name") {
val accessor = js.Dynamic.global.ConstantFoldedObjectExport
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(js.typeOf(obj)).toEqual("object")
expect(obj.witness).toEqual("witness")
}
it("should offer exports for protected objects") {
val accessor = jsPackage.ProtectedExportedObject
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(js.typeOf(obj)).toEqual("object")
expect(obj.witness).toEqual("witness")
}
it("should offer exports for classes with implicit name") {
val constr = jsPackage.ExportedClass
expect(constr).toBeDefined
expect(js.typeOf(constr)).toEqual("function")
val obj = js.Dynamic.newInstance(constr)(5)
expect(obj.x).toEqual(5)
}
it("should offer exports for classes with explicit name") {
val constr = js.Dynamic.global.TheExportedClass
expect(constr).toBeDefined
expect(js.typeOf(constr)).toEqual("function")
val obj = js.Dynamic.newInstance(constr)(5)
expect(obj.x).toEqual(5)
}
it("should offer exports for classes with qualified name") {
val constr = js.Dynamic.global.qualified.testclass.ExportedClass
expect(constr).toBeDefined
expect(js.typeOf(constr)).toEqual("function")
val obj = js.Dynamic.newInstance(constr)(5)
expect(obj.x).toEqual(5)
}
it("should offer exports for classes with constant folded name") {
val constr = js.Dynamic.global.ConstantFoldedClassExport
expect(constr).toBeDefined
expect(js.typeOf(constr)).toEqual("function")
val obj = js.Dynamic.newInstance(constr)(5)
expect(obj.x).toEqual(5)
}
it("should offer exports for protected classes") {
val constr = jsPackage.ProtectedExportedClass
expect(constr).toBeDefined
expect(js.typeOf(constr)).toEqual("function")
val obj = js.Dynamic.newInstance(constr)(5)
expect(obj.x).toEqual(5)
}
it("should offer export for classes with repeated parameters in ctor") {
val constr = jsPackage.ExportedVarArgClass
expect(js.Dynamic.newInstance(constr)().result).toEqual("")
expect(js.Dynamic.newInstance(constr)("a").result).toEqual("a")
expect(js.Dynamic.newInstance(constr)("a", "b").result).toEqual("a|b")
expect(js.Dynamic.newInstance(constr)("a", "b", "c").result).toEqual("a|b|c")
expect(js.Dynamic.newInstance(constr)(5, "a").result).toEqual("Number: <5>|a")
}
it("should offer export for classes with default parameters in ctor") {
val constr = jsPackage.ExportedDefaultArgClass
expect(js.Dynamic.newInstance(constr)(1,2,3).result).toEqual(6)
expect(js.Dynamic.newInstance(constr)(1).result).toEqual(106)
expect(js.Dynamic.newInstance(constr)(1,2).result).toEqual(103)
}
it("should correctly disambiguate overloads involving longs") {
class Foo {
@JSExport
def foo(x: Int) = 1
@JSExport
def foo(x: Long) = 2
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
// Create a long factory we can call dynamically to retrieve an unboxed
// long which is typed as a js.Any
object LongFactory {
@JSExport
def aLong = 1L
}
val trueJsLong = LongFactory.asInstanceOf[js.Dynamic].aLong
expect(foo.foo(1)).toEqual(1)
expect(foo.foo(trueJsLong)).toEqual(2)
}
it("should return boxed Chars") {
class Foo {
@JSExport
def bar(x: Int): Char = x.toChar
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
val funs = js.eval("""
var funs = {
testIsChar: function(foo) { return JSUtils().isChar(foo.bar(65)); },
testCharValue: function(foo) { return JSUtils().charToString(foo.bar(65)); }
}; funs;
""").asInstanceOf[js.Dynamic]
expect(funs.testIsChar(foo)).toBeTruthy
expect(funs.testCharValue(foo)).toEqual("A")
}
it("should take boxed Chars as parameter") {
class Foo {
@JSExport
def bar(x: Char): Int = x.toInt
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
val f = js.eval("""
var f = function(foo) { return foo.bar(JSUtils().stringToChar('e')); };
f;
""").asInstanceOf[js.Dynamic]
expect(f(foo)).toEqual('e'.toInt)
}
it("should be able to disambiguate an Int from a Char") {
class Foo {
@JSExport
def bar(x: Char): String = "char: "+x
@JSExport
def bar(x: Int): String = "int: "+x
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
val funs = js.eval("""
var funs = {
testChar: function(foo) { return foo.bar(JSUtils().stringToChar('S')); },
testInt: function(foo) { return foo.bar(68); }
}; funs;
""").asInstanceOf[js.Dynamic]
expect(funs.testChar(foo)).toEqual("char: S")
expect(funs.testInt(foo)).toEqual("int: 68")
}
it("should support exporting constructor parameter fields - #970") {
class Foo(@(JSExport @meta.field) val x: Int)
val foo = (new Foo(1)).asInstanceOf[js.Dynamic]
expect(foo.x).toEqual(1)
}
it("should support exporting case class fields - #970") {
case class Foo(@(JSExport @meta.field) x: Int)
val foo = (new Foo(1)).asInstanceOf[js.Dynamic]
expect(foo.x).toEqual(1)
}
it("should support exporting lazy values - #977") {
class Foo {
@JSExport
lazy val x = 1
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.x).toEqual(1)
}
it("should support exporting all members of a class") {
@JSExportAll
class Foo {
val a = 1
@JSExport // double annotation allowed
def b = 2
lazy val c = 3
class Bar // not exported, but should not fail
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.a).toEqual(1)
expect(foo.b).toEqual(2)
expect(foo.c).toEqual(3)
}
it("should not export synthetic members with @JSExportAll - #1195") {
@JSExportAll
case class Foo(x: Int)
val foo = Foo(1).asInstanceOf[js.Dynamic]
expect(foo.x).toEqual(1)
expect(foo.copy).toBeUndefined
}
it("should allow mutliple equivalent JSExport annotations") {
class Foo {
@JSExport
@JSExport("a")
@JSExport
@JSExport("a")
def b = 1
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(foo.b).toEqual(1)
}
it("should support named exports") {
import js.Dynamic.{literal => lit}
class FooNamed {
@JSExportNamed("bar1")
def bar(x: Int, y: Int) = x + y
@JSExportNamed("bar2")
@JSExport
def bar(x: Int = 1)(y: Int = x)(z: Int = y) = x + y + z
}
val foo = (new FooNamed).asInstanceOf[js.Dynamic]
expect(foo.bar1(lit(x = 1, y = 2))).toEqual(3)
if (TestSuiteContext.hasTag("compliant-asinstanceof"))
expect(() => foo.bar1(lit(x = 1))).toThrow // missing arg
expect(foo.bar2(lit())).toEqual(3)
expect(foo.bar2(lit(x = 2))).toEqual(6)
expect(foo.bar2(lit(y = 2))).toEqual(5)
expect(foo.bar2(lit(y = 2, z = 1))).toEqual(4)
expect(foo.bar(2)).toEqual(6)
expect(foo.bar(2,3)).toEqual(8)
}
it("should support named constructor exports") {
import js.Dynamic.{literal => lit}
val constr = jsPackage.ExportedNamedArgClass
expect(js.Dynamic.newInstance(constr)(lit(x = 2)).result).toEqual("22true")
expect(js.Dynamic.newInstance(constr)(lit(y = "foo")).result).toEqual("1foofalse")
expect(js.Dynamic.newInstance(constr)(lit(z = true, y = "foo")).result).toEqual("1footrue")
}
it("should support exporting under 'org' namespace - #364") {
val accessor = js.Dynamic.global.org.ExportedUnderOrgObject
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBe(ExportedUnderOrgObject.asInstanceOf[js.Any])
}
when("compliant-asinstanceof").
it("should reject bad values for arguments of primitive value type") {
class Foo {
@JSExport
def doBool(x: Boolean) = x
@JSExport
def doChar(x: Char) = x
@JSExport
def doByte(x: Byte) = x
@JSExport
def doShort(x: Short) = x
@JSExport
def doInt(x: Int) = x
@JSExport
def doLong(x: Long) = x
@JSExport
def doFloat(x: Float) = x
@JSExport
def doDouble(x: Double) = x
@JSExport
def doUnit(x: Unit) = x
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
// Nulls
expect(() => foo.doBool(null)).toThrow
expect(() => foo.doChar(null)).toThrow
expect(() => foo.doByte(null)).toThrow
expect(() => foo.doShort(null)).toThrow
expect(() => foo.doInt(null)).toThrow
expect(() => foo.doLong(null)).toThrow
expect(() => foo.doFloat(null)).toThrow
expect(() => foo.doDouble(null)).toThrow
expect(() => foo.doUnit(null)).toThrow
// Class type
expect(() => foo.doBool(foo)).toThrow
expect(() => foo.doChar(foo)).toThrow
expect(() => foo.doByte(foo)).toThrow
expect(() => foo.doShort(foo)).toThrow
expect(() => foo.doInt(foo)).toThrow
expect(() => foo.doLong(foo)).toThrow
expect(() => foo.doFloat(foo)).toThrow
expect(() => foo.doDouble(foo)).toThrow
expect(() => foo.doUnit(foo)).toThrow
// Bad values
expect(() => foo.doBool(1)).toThrow
expect(() => foo.doBool("a")).toThrow
expect(() => foo.doChar(1)).toThrow
expect(() => foo.doChar("a")).toThrow
expect(() => foo.doByte(300)).toThrow
expect(() => foo.doByte("a")).toThrow
expect(() => foo.doShort(32768)).toThrow
expect(() => foo.doShort("a")).toThrow
expect(() => foo.doInt(3.2)).toThrow
expect(() => foo.doInt("a")).toThrow
expect(() => foo.doLong(3.2)).toThrow
expect(() => foo.doLong(3)).toThrow
expect(() => foo.doLong("a")).toThrow
expect(() => foo.doFloat("a")).toThrow
}
when("compliant-asinstanceof").
it("should reject bad values for arguments of value class type - #613") {
class Foo {
@JSExport
def doVC(x: SomeValueClass) = x
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(() => foo.doVC(null)).toThrow
expect(() => foo.doVC(foo)).toThrow
expect(() => foo.doVC(1)).toThrow
expect(() => foo.doVC("a")).toThrow
}
when("compliant-asinstanceof").
it("should reject bad values for arguments of class type") {
class A
class B
class Foo {
@JSExport
def doA(x: A) = x
}
val foo = (new Foo).asInstanceOf[js.Dynamic]
expect(() => foo.doA(1)).toThrow
expect(() => foo.doA((new B).asInstanceOf[js.Any])).toThrow
expect(() => foo.doA("a")).toThrow
}
it("should offer exports for classes ending in _= - #1090") {
val constr = jsPackage.ExportClassSetterNamed_=
val obj = js.Dynamic.newInstance(constr)()
expect(obj.x).toBe(1)
}
it("should offer exports for objects ending in _= - #1090") {
expect(jsPackage.ExportObjSetterNamed_=().x).toBe(1)
}
} // describe
describe("@JSExportDescendentObjects") {
it("should offer auto exports for objects extending a trait") {
val accessor =
js.Dynamic.global.org.scalajs.testsuite.jsinterop.AutoExportedTraitObject
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(obj).toBe(AutoExportedTraitObject.asInstanceOf[js.Any])
}
it("should offer auto exports for objects extending a class") {
val accessor =
js.Dynamic.global.org.scalajs.testsuite.jsinterop.AutoExportedClassObject
expect(accessor).toBeDefined
expect(js.typeOf(accessor)).toEqual("function")
val obj = accessor()
expect(obj).toBeDefined
expect(obj).toBe(AutoExportedClassObject.asInstanceOf[js.Any])
}
}
describe("@JSExportDescendentClasses") {
it("should offer auto exports for classes extending a trait") {
val ctor =
js.Dynamic.global.org.scalajs.testsuite.jsinterop.AutoExportedTraitClass
expect(ctor).toBeDefined
expect(js.typeOf(ctor)).toEqual("function")
val obj1 = js.Dynamic.newInstance(ctor)()
expect(obj1).toBeDefined
expect(obj1.x).toBe(5)
val obj2 = js.Dynamic.newInstance(ctor)(100)
expect(obj2).toBeDefined
expect(obj2.x).toBe(100)
}
it("should offer auto exports for classes extending a class") {
val ctor =
js.Dynamic.global.org.scalajs.testsuite.jsinterop.AutoExportedClassClass
expect(ctor).toBeDefined
expect(js.typeOf(ctor)).toEqual("function")
val obj1 = js.Dynamic.newInstance(ctor)()
expect(obj1).toBeDefined
expect(obj1.x).toBe(5)
val obj2 = js.Dynamic.newInstance(ctor)(100)
expect(obj2).toBeDefined
expect(obj2.x).toBe(100)
}
}
}
object ExportNameHolder {
final val className = "ConstantFoldedClassExport"
final val objectName = "ConstantFoldedObjectExport"
final val methodName = "myMethod"
}
@JSExport
@JSExport("TheExportedObject")
@JSExport("qualified.testobject.ExportedObject") // purposefully halfway the same as ExportedClass
@JSExport(ExportNameHolder.objectName)
object ExportedObject {
@JSExport
def witness: String = "witness"
}
@JSExport
protected object ProtectedExportedObject {
@JSExport
def witness: String = "witness"
}
@JSExport
@JSExport("TheExportedClass")
@JSExport("qualified.testclass.ExportedClass") // purposefully halfway the same as ExportedObject
@JSExport(ExportNameHolder.className)
class ExportedClass(_x: Int) {
@JSExport
val x = _x
}
@JSExport
protected class ProtectedExportedClass(_x: Int) {
@JSExport
val x = _x
}
@JSExport
class ExportedVarArgClass(x: String*) {
@JSExport
def this(x: Int, y: String) = this(s"Number: <$x>", y)
@JSExport
def result = x.mkString("|")
}
@JSExport
class ExportedDefaultArgClass(x: Int, y: Int, z: Int) {
@JSExport
def this(x: Int, y: Int = 5) = this(x, y, 100)
@JSExport
def result = x + y + z
}
@JSExport("org.ExportedUnderOrgObject")
object ExportedUnderOrgObject
@JSExportDescendentClasses
@JSExportDescendentObjects
trait AutoExportTrait
object AutoExportedTraitObject extends AutoExportTrait
class AutoExportedTraitClass(_x: Int) extends AutoExportTrait {
def this() = this(5)
@JSExport
def x: Int = _x
}
@JSExportDescendentClasses
@JSExportDescendentObjects
class AutoExportClass
object AutoExportedClassObject extends AutoExportClass
class AutoExportedClassClass(_x: Int) extends AutoExportTrait {
def this() = this(5)
@JSExport
def x: Int = _x
}
class SomeValueClass(val i: Int) extends AnyVal
@JSExportNamed
class ExportedNamedArgClass(x: Int = 1)(y: String = x.toString)(z: Boolean = y != "foo") {
@JSExport
val result = x + y + z
}
@JSExport
class ExportClassSetterNamed_= {
@JSExport
val x = 1
}
@JSExport
object ExportObjSetterNamed_= {
@JSExport
val x = 1
}
| colinrgodsey/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/jsinterop/ExportsTest.scala | Scala | bsd-3-clause | 31,150 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import scala.collection.immutable.SortedSet
/**
* A Count-Min sketch is a probabilistic data structure used for summarizing
* streams of data in sub-linear space.
*
* It works as follows. Let (eps, delta) be two parameters that describe the
* confidence in our error estimates, and let d = ceil(ln 1/delta)
* and w = ceil(e / eps). Then:
*
* - Take d pairwise independent hash functions h_i, each of which maps
* onto the domain [0, w - 1].
* - Create a 2-dimensional table of counts, with d rows and w columns,
* initialized with all zeroes.
* - When a new element x arrives in the stream, update the table of counts
* by setting counts[i, h_i[x]] += 1, for each 1 <= i <= d.
* - (Note the rough similarity to a Bloom filter.)
*
* As an example application, suppose you want to estimate the number of
* times an element x has appeared in a data stream so far.
* The Count-Min sketch estimate of this frequency is
*
* min_i { counts[i, h_i[x]] }
*
* With probability at least 1 - delta, this estimate is within eps * N
* of the true frequency (i.e., true frequency <= estimate <= true frequency + eps * N),
* where N is the total size of the stream so far.
*
* See http://www.eecs.harvard.edu/~michaelm/CS222/countmin.pdf for technical details,
* including proofs of the estimates and error bounds used in this implementation.
*
* Parts of this implementation are taken from
* https://github.com/clearspring/stream-lib/blob/master/src/main/java/com/clearspring/analytics/stream/frequency/CountMinSketch.java
*
* @author Edwin Chen
*/
/**
* Monoid for adding Count-Min sketches.
*
* eps and delta are parameters that bound the error of each query estimate. For example, errors in
* answering queries (e.g., how often has element x appeared in the stream described by the sketch?)
* are often of the form: "with probability p >= 1 - delta, the estimate is close to the truth by
* some factor depending on eps."
*
* @eps A parameter that bounds the error of each query estimate.
* @delta A bound on the probability that a query estimate does not lie within some small interval
* (an interval that depends on eps) around the truth.
* @seed A seed to initialize the random number generator used to create the pairwise independent
* hash functions.
* @heavyHittersPct A threshold for finding heavy hitters, i.e., elements that appear at least
* (heavyHittersPct * totalCount) times in the stream.
*/
class CountMinSketchMonoid(eps : Double, delta : Double, seed : Int,
heavyHittersPct : Double = 0.01) extends Monoid[CMS] {
assert(0 < eps && eps < 1, "eps must lie in (0, 1)")
assert(0 < delta && delta < 1, "delta must lie in (0, 1)")
assert(0 < heavyHittersPct && heavyHittersPct < 1, "heavyHittersPct must lie in (0, 1)")
// Typically, we would use d pairwise independent hash functions of the form
//
// h_i(x) = a_i * x + b_i (mod p)
//
// But for this particular application, setting b_i does not matter
// (since all it does is shift the results of a particular hash),
// so we omit it and simply use hash functions of the form
//
// h_i(x) = a_i * x (mod p)
val hashes : Seq[CMSHash] = {
val r = new scala.util.Random(seed)
val numHashes = CMS.depth(delta)
val numCounters = CMS.width(eps)
(0 to (numHashes - 1)).map { _ => CMSHash(r.nextInt, 0, numCounters) }
}
val params = CMSParams(hashes, eps, delta, heavyHittersPct)
val zero : CMS = CMSZero(params)
/**
* We assume the Count-Min sketches on the left and right use the same hash functions.
*/
def plus(left : CMS, right : CMS) : CMS = left ++ right
/**
* Create a Count-Min sketch out of a single item or data stream.
*/
def create(item : Long) : CMS = CMSItem(item, params)
def create(data : Seq[Long]) : CMS = {
data.foldLeft(zero) { case (acc, x) => plus(acc, create(x)) }
}
}
object CMS {
def monoid(eps : Double, delta : Double, seed : Int, heavyHittersPct : Double = 0.01) =
new CountMinSketchMonoid(eps, delta, seed, heavyHittersPct)
def monoid(depth : Int, width : Int, seed : Int, heavyHittersPct : Double) =
new CountMinSketchMonoid(CMS.eps(width), CMS.delta(depth), seed, heavyHittersPct)
/**
* Functions to translate between (eps, delta) and (depth, width). The translation is:
* depth = ceil(ln 1/delta)
* width = ceil(e / eps)
*/
def eps(width : Int) = scala.math.exp(1.0) / width
def delta(depth : Int) = 1.0 / scala.math.exp(depth)
def depth(delta : Double) = scala.math.ceil(scala.math.log(1.0 / delta)).toInt
def width(eps : Double) = scala.math.ceil(scala.math.exp(1) / eps).toInt
}
/**
* The actual Count-Min sketch data structure.
*/
sealed abstract class CMS extends java.io.Serializable {
// Parameters used to bound confidence in error estimates.
def eps : Double
def delta : Double
// Number of hash functions.
def depth : Int = CMS.depth(delta)
// Number of counters per hash function.
def width : Int = CMS.width(eps)
def ++(other : CMS) : CMS
/**
* Returns an estimate of the total number of times this item has been seen
* in the stream so far. This estimate is an upper bound.
*
* It is always true that trueFrequency <= estimatedFrequency.
* With probability p >= 1 - delta, it also holds that
* estimatedFrequency <= trueFrequency + eps * totalCount.
*/
def frequency(item: Long): Approximate[Long]
/**
* Returns an estimate of the inner product against another data stream.
*
* In other words, let a_i denote the number of times element i has been seen in
* the data stream summarized by this CMS, and let b_i denote the same for the other CMS.
* Then this returns an estimate of <a, b> = \\sum a_i b_i
*
* Note: this can also be viewed as the join size between two relations.
*
* It is always true that actualInnerProduct <= estimatedInnerProduct.
* With probability p >= 1 - delta, it also holds that
* estimatedInnerProduct <= actualInnerProduct + eps * thisTotalCount * otherTotalCount
*/
def innerProduct(other: CMS): Approximate[Long]
/**
* Finds all heavy hitters, i.e., elements in the stream that appear at least
* (heavyHittersPct * totalCount) times.
*
* Every item that appears at least (heavyHittersPct * totalCount) times is output,
* and with probability p >= 1 - delta, no item whose count is less than
* (heavyHittersPct - eps) * totalCount is output.
*
* Note that the set of heavy hitters contains at most 1 / heavyHittersPct
* elements, so keeping track of all elements that appear more than (say) 1% of the
* time requires tracking at most 100 items.
*/
def heavyHittersPct : Double
def heavyHitters : Set[Long]
// Total number of elements seen in the data stream so far.
def totalCount : Long
// The first frequency moment is the total number of elements in the stream.
def f1 : Long = totalCount
// The second frequency moment is \\sum a_i^2, where a_i is the count of the ith element.
def f2 : Approximate[Long] = innerProduct(this)
}
/**
* Used for initialization.
*/
case class CMSZero(params : CMSParams) extends CMS {
def eps : Double = params.eps
def delta : Double = params.delta
def heavyHittersPct : Double = params.heavyHittersPct
def totalCount = 0L
def ++(other : CMS) = other
def frequency(item : Long) = Approximate.exact(0L)
def innerProduct(other : CMS) = Approximate.exact(0L)
def heavyHitters = Set[Long]()
}
/**
* Used for holding a single element, to avoid repeatedly adding elements from
* sparse counts tables.
*/
case class CMSItem(item : Long, params : CMSParams) extends CMS {
def eps : Double = params.eps
def delta : Double = params.delta
def heavyHittersPct : Double = params.heavyHittersPct
def totalCount = 1L
def ++(other : CMS) : CMS = {
other match {
case other : CMSZero => this
case other : CMSItem => CMSInstance(params) + item + other.item
case other : CMSInstance => other + item
}
}
def frequency(x : Long) = if (item == x) Approximate.exact(1L) else Approximate.exact(0L)
def innerProduct(other : CMS) : Approximate[Long] = other.frequency(item)
def heavyHitters = Set(item)
}
/**
* The general Count-Min sketch structure, used for holding any number of elements.
*/
case class CMSInstance(countsTable : CMSCountsTable, totalCount : Long,
hhs : HeavyHitters, params : CMSParams) extends CMS {
def eps : Double = params.eps
def delta : Double = params.delta
def heavyHittersPct : Double = params.heavyHittersPct
def ++(other : CMS) : CMS = {
other match {
case other : CMSZero => this
case other : CMSItem => this + other.item
case other : CMSInstance => {
val newTotalCount = totalCount + other.totalCount
val newHhs = (hhs ++ other.hhs).dropCountsBelow(params.heavyHittersPct * newTotalCount)
CMSInstance(countsTable ++ other.countsTable, newTotalCount, newHhs, params)
}
}
}
private def makeApprox(est: Long): Approximate[Long] = {
if(est == 0L) {
Approximate.exact(0L)
}
else {
val lower = math.max(0L, est - (eps * totalCount).toLong)
Approximate(lower, est, est, 1 - delta)
}
}
def frequency(item : Long) : Approximate[Long] = {
val estimates = countsTable.counts.zipWithIndex.map { case (row, i) =>
row(params.hashes(i)(item))
}
makeApprox(estimates.min)
}
/**
* Let X be a CMS, and let count_X[j, k] denote the value in X's 2-dimensional count table at row j and
* column k.
* Then the Count-Min sketch estimate of the inner product between A and B is the minimum inner product
* between their rows:
* estimatedInnerProduct = min_j (\\sum_k count_A[j, k] * count_B[j, k])
*/
def innerProduct(other : CMS) : Approximate[Long] = {
other match {
case other : CMSInstance => {
assert((other.depth, other.width) == (depth, width), "Tables must have the same dimensions.")
def innerProductAtDepth(d : Int) = (0 to (width - 1)).map { w =>
countsTable.getCount(d, w) * other.countsTable.getCount(d, w)
}.sum
val est = (0 to (depth - 1)).map { innerProductAtDepth(_) }.min
Approximate(est - (eps * totalCount * other.totalCount).toLong, est, est, 1 - delta)
}
case _ => other.innerProduct(this)
}
}
def heavyHitters : Set[Long] = hhs.items
/**
* Updates the sketch with a new element from the data stream.
*/
def +(item : Long) : CMSInstance = this + (item, 1L)
def +(item : Long, count : Long) : CMSInstance = {
if (count < 0) {
throw new Exception("Negative counts not implemented")
} else {
val newHhs = updateHeavyHitters(item, count)
val newCountsTable =
(0 to (depth - 1)).foldLeft(countsTable) { case (table, row) =>
val pos = (row, params.hashes(row)(item))
table + (pos, count)
}
CMSInstance(newCountsTable, totalCount + count, newHhs, params)
}
}
/**
* Updates the data structure of heavy hitters when a new item (with associated count)
* enters the stream.
*/
private def updateHeavyHitters(item : Long, count : Long) : HeavyHitters = {
val oldItemCount = frequency(item).estimate
val newItemCount = oldItemCount + count
val newTotalCount = totalCount + count
// If the new item is a heavy hitter, add it, and remove any previous instances.
val newHhs =
if (newItemCount >= heavyHittersPct * newTotalCount) {
hhs - HeavyHitter(item, oldItemCount) + HeavyHitter(item, newItemCount)
} else {
hhs
}
// Remove any items below the new heavy hitter threshold.
newHhs.dropCountsBelow(heavyHittersPct * newTotalCount)
}
}
object CMSInstance {
// Initializes a CMSInstance with all zeroes.
def apply(params : CMSParams) : CMSInstance = {
val countsTable = CMSCountsTable(CMS.depth(params.delta), CMS.width(params.eps))
CMSInstance(countsTable, 0, HeavyHitters(), params)
}
}
/**
* The Count-Min sketch uses pairwise independent hash functions drawn from
* a universal hashing family of the form
*
* h(x) = [a * x + b (mod p)] (mod m)
*/
case class CMSHash(a : Int, b : Int, width : Int) extends Function1[Long, Int] {
val PRIME_MODULUS = (1L << 31) - 1
/**
* Returns a * x + b (mod p) (mod width)
*/
def apply(x : Long) : Int = {
val unmodded = a * x + b
// Apparently a super fast way of computing x mod 2^p-1
// See page 149 of
// http://www.cs.princeton.edu/courses/archive/fall09/cos521/Handouts/universalclasses.pdf
// after Proposition 7.
val modded = (unmodded + (unmodded >> 32)) & PRIME_MODULUS
// Modulo-ing integers is apparently twice as fast as
// modulo-ing Longs.
modded.toInt % width
}
}
/**
* The 2-dimensional table of counters used in the Count-Min sketch.
* Each row corresponds to a particular hash function.
* TODO: implement a dense matrix type, and use it here
*/
case class CMSCountsTable(counts : Vector[Vector[Long]]) {
assert(depth > 0, "Table must have at least 1 row.")
assert(width > 0, "Table must have at least 1 column.")
def depth : Int = counts.size
def width : Int = counts(0).size
def getCount(pos : (Int, Int)) : Long = {
val (row, col) = pos
assert(row < depth && col < width, "Position must be within the bounds of this table.")
counts(row)(col)
}
/**
* Updates the count of a single cell in the table.
*/
def +(pos : (Int, Int), count : Long) : CMSCountsTable = {
val (row, col) = pos
val currCount = getCount(pos)
val newCounts = counts.updated(row, counts(row).updated(col, currCount + count))
CMSCountsTable(newCounts)
}
/**
* Adds another counts table to this one, through elementwise addition.
*/
def ++(other : CMSCountsTable) : CMSCountsTable = {
assert((depth, width) == (other.depth, other.width), "Tables must have the same dimensions.")
val iil = Monoid.plus[IndexedSeq[IndexedSeq[Long]]](counts, other.counts)
def toVector[V](is: IndexedSeq[V]): Vector[V] = {
is match {
case v: Vector[_] => v
case _ => Vector(is: _*)
}
}
CMSCountsTable(toVector(iil.map { toVector(_) }))
}
}
object CMSCountsTable {
// Creates a new CMSCountsTable with counts initialized to all zeroes.
def apply(depth : Int, width : Int) : CMSCountsTable = CMSCountsTable(Vector.fill[Long](depth, width)(0L))
}
/**
* Convenience class for holding constant parameters of a Count-Min sketch.
*/
case class CMSParams(hashes : Seq[CMSHash], eps : Double, delta : Double, heavyHittersPct : Double)
/**
* Containers for holding heavy hitter items and their associated counts.
*/
case class HeavyHitters(
hhs : SortedSet[HeavyHitter] = SortedSet[HeavyHitter]()(HeavyHitter.ordering)) {
def -(hh : HeavyHitter) = HeavyHitters(hhs - hh)
def +(hh : HeavyHitter) = HeavyHitters(hhs + hh)
def ++(other : HeavyHitters) = HeavyHitters(hhs ++ other.hhs)
def items : Set[Long] = hhs.map { _.item }
def dropCountsBelow(minCount : Double) : HeavyHitters = {
HeavyHitters(hhs.dropWhile { _.count < minCount })
}
}
case class HeavyHitter(item : Long, count : Long)
object HeavyHitter {
val ordering = Ordering.by { hh : HeavyHitter => (hh.count, hh.item) }
}
| snoble/algebird | algebird-core/src/main/scala/com/twitter/algebird/CountMinSketch.scala | Scala | apache-2.0 | 16,095 |
package codes.bytes.macros_intro.macros
import scala.language.experimental.macros
import scala.reflect.macros.Context
import scala.collection.mutable.{ListBuffer, Stack}
object PrintfMacros {
def printf(format: String, params: Any*): Unit = macro printf_impl
def printf_impl(c: Context)(format: c.Expr[String], params: c.Expr[Any]*): c.Expr[Unit] = {
import c.universe._
val Literal(Constant(s_format: String)) = format.tree
val evals = ListBuffer[ValDef]()
def precompute(value: Tree, tpe: Type): Ident = {
val freshName = TermName(c.fresh("eval$"))
evals += ValDef(Modifiers(), freshName, TypeTree(tpe), value)
Ident(freshName)
}
val paramsStack = Stack[Tree]((params map (_.tree)): _*)
val refs = s_format.split("(?<=%[\\w%])|(?=%[\\w%])") map {
case "%d" => precompute(paramsStack.pop, typeOf[Int])
case "%s" => precompute(paramsStack.pop, typeOf[String])
case "%%" => Literal(Constant("%"))
case part => Literal(Constant(part))
}
val stats = evals ++ refs.map(ref => reify(print(c.Expr[Any](ref).splice)).tree)
c.Expr[Unit](Block(stats.toList, Literal(Constant(()))))
}
}
// vim: set ts=2 sw=2 sts=2 et: | bwmcadams/scala-macros-intro-talk | macros/src/main/scala/codes/bytes/macros_intro/macros/PrintfMacros.scala | Scala | apache-2.0 | 1,200 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import scala.reflect.ClassTag
/**
* Computes the sum along segments of a tensor.
*/
class SegmentSum[T: ClassTag]()(implicit ev: TensorNumeric[T])
extends Operation[Table, Tensor[T], T]{
def updateOutput(inputs: Table): Tensor[T] = {
val x = inputs[Tensor[T]](1)
val y = inputs[Tensor[Int]](2) // zero-indices
require(y.nDimension() == 1, "segment ids should be 1D tensor")
require(y.size(1) == x.size(1), "segment ids should be the same size as" +
s" first dimension of input, excepted ${x.size(1)}, but got ${y.size(1)}")
val newSize = x.size()
newSize(0) = y.valueAt(y.nElement()) + 1
output.resize(newSize).zero()
var i = 0
while(i < y.nElement()) {
output.select(1, y.valueAt(i + 1) + 1).add(x.select(1, i + 1))
i += 1
}
output
}
}
object SegmentSum {
def apply[T: ClassTag]()(implicit ev: TensorNumeric[T]): SegmentSum[T] = {
new SegmentSum()
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/ops/SegmentSum.scala | Scala | apache-2.0 | 1,746 |
package cn.changhong.nio.multi.selector.demo.v2
import java.net.InetSocketAddress
import java.nio.channels.{ SelectionKey, ServerSocketChannel, Selector}
/**
* Created by yangguo on 15-3-5.
*/
object Start {
def main(args:Array[String]): Unit ={
val mainSelector=Selector.open()
val serverChannel=ServerSocketChannel.open()
serverChannel.bind(new InetSocketAddress(10003))
serverChannel.configureBlocking(false)
serverChannel.register(mainSelector,SelectionKey.OP_ACCEPT)
val subSelectors=(1 to 4).map(index=>new SubNioSelector(Selector.open())).toArray
val subCircleSelectorNodes=ConsistentHash(subSelectors)
val reactorContext=ReactorContext(mainSelector,subCircleSelectorNodes)
val reactorAction=new MainNioSelector(reactorContext)
doListener(reactorAction.doAccept)//ServerSocketChannel do accept remote channel event
subSelectors.foreach(s=>doListener(s.doRead))//Sub Reactor Pool Do Read and Write Channel Network I/O
}
def doListener[L](listenerHandler: =>Unit): Unit ={
new Thread(new Runnable {
override def run(): Unit = {
while(true){
listenerHandler
}
}
}).start()
}
}
| guoyang2011/nio-MultipleReactors | src/main/scala/cn/changhong/nio/multi/selector/demo/v2/Start.scala | Scala | gpl-2.0 | 1,190 |
package rpg
import language.implicitConversions
/** Contains hit-point-related messages and implicit conversions. */
object HitPoints {
// -----------------------------------------------------------------------------------------------
// messages
// -----------------------------------------------------------------------------------------------
/** Message wrapping hit-points. */
case class HP(value: Int)
/** Message for hurting.
*
* @param amount has to be positive
*/
case class Damage(amount: Int) { require(amount > 0) }
/** Message for healing.
*
* @param amount has to be positive
*/
case class Life(amount: Int) { require(amount > 0) }
// -----------------------------------------------------------------------------------------------
// implicit conversions
// -----------------------------------------------------------------------------------------------
/** Converts an `Int` to [[rpg.HitPoints.Damage]]. */
implicit def int2Damage(amount: Int): Damage = Damage(amount)
/** Converts an `Int` to [[rpg.HitPoints.Life]]. */
implicit def int2Life(amount: Int): Life = Life(amount)
}
import HitPoints._
/** Provides hit-points.
*
* @todo low hit-point modificator
* @todo damage - armor
* @todo not heal over maxhp (unless explicitly specified)
*/
trait HitPoints {
// -----------------------------------------------------------------------------------------------
// mutable state primitive access and mutation
// -----------------------------------------------------------------------------------------------
/** Returns current hit-points, initialised with `maxhp`. */
private var curhp: Int = maxhp
/** Returns current hit-points. */
def hp: Int = curhp
/** Current hit-points mutator. */
def hp_=(hp: Int) {
curhp = hp
}
// -----------------------------------------------------------------------------------------------
// mutable state convenience access and mutation
// -----------------------------------------------------------------------------------------------
/** Returns current hit-points wrapped in [[rpg.HitPoints.HP]]. */
def apply(): HP = HP(hp)
/** Decreases hit-points by given amount of damage and returns the result. */
def hurt(dmg: Damage) = {
hp -= dmg.amount
hp
}
/** Increases hit-points by given amount of life and returns the result. */
def heal(life: Life) = {
hp += life.amount
hp
}
// -----------------------------------------------------------------------------------------------
// abstract definitions
// -----------------------------------------------------------------------------------------------
/** Returns maximum hit-points. */
def maxhp: Int
}
| wookietreiber/arpgt | src/main/scala/HitPoints.scala | Scala | gpl-3.0 | 2,760 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.variable
import collection.mutable
import cc.factorie.util.Cubbie
/** A Domain for sequences of CategoricalValues.
@author Andrew McCallum */
class CategoricalSeqDomain[C] extends DiscreteSeqDomain with Domain {
type Value = Seq[CategoricalValue[C]]
lazy val elementDomain: CategoricalDomain[C] = new CategoricalDomain[C]
}
/** A Cubbie for serializing a CategoricalSeqDomain.
It saves the elementDomain containing the sequence of categories.
@author Luke Vilnis */
class CategoricalSeqDomainCubbie[T](val csd: CategoricalSeqDomain[T]) extends Cubbie {
val elementDomainCubbie = new CategoricalDomainCubbie[T](csd.elementDomain)
setMap(new mutable.Map[String, Any] {
override def update(key: String, value: Any): Unit = {
if (key == "elementDomain") {
val map = value.asInstanceOf[mutable.Map[String, Any]]
for((k,v) <- map) elementDomainCubbie._map(k) = v
} else sys.error("Unknown cubbie slot key: \\"%s\\"" format key)
}
// TODO We should be using CategoricalDomain.stringToCategory somewhere here. -akm
def += (kv: (String, Any)): this.type = { update(kv._1, kv._2); this }
def -= (key: String): this.type = sys.error("Can't remove slots from cubbie map!")
def get(key: String): Option[Any] =
if (key == "elementDomain") Some(elementDomainCubbie._map)
else None
def iterator: Iterator[(String, Any)] = List("elementDomain").map(s => (s, get(s).get)).iterator
})
}
/** A variable whose values are sequences of CategoricalValues.
The method 'domain' is abstract.
@author Andrew McCallum */
abstract class CategoricalSeqVariable[C] extends MutableDiscreteSeqVar[CategoricalValue[C]] with IndexedSeqVar[CategoricalValue[C]] /*VarAndElementType[CategoricalSeqVariable[C],CategoricalValue[C]]*/ {
type Value = IndexedSeq[CategoricalValue[C]]
def this(initialValue:Seq[C]) = {
this()
_setCapacity(if (initialValue.length > 0) initialValue.length else 1)
val d = domain.elementDomain
initialValue.foreach(c => this += d.value(c))
}
def domain: CategoricalSeqDomain[C]
def skipNonCategories = domain.elementDomain.frozen
def appendCategory(x:C): Unit = {
val index = domain.elementDomain.index(x)
if (index >= 0) _append(index)
else if (!skipNonCategories) throw new Error("appendCategory "+x+" not found in domain.")
}
def appendCategories(xs:Iterable[C]): Unit = xs.foreach(appendCategory) //_appendAll(xs.map(c => domain.elementDomain.index(c)).toArray)
def categoryValue(seqIndex:Int): C = domain.elementDomain.category(_apply(seqIndex))
def categoryValues: Seq[C] = Seq.tabulate(length)(i => categoryValue(i))
}
| hlin117/factorie | src/main/scala/cc/factorie/variable/CategoricalSeqVariable.scala | Scala | apache-2.0 | 3,429 |
package domain.util.crypto
object Md5 {
def hex(str: String) = {
import org.apache.commons.codec.digest.DigestUtils
DigestUtils.md5Hex(str)
}
}
| rysh/scalatrader | scalatrader/app/domain/util/crypto/Md5.scala | Scala | mit | 158 |
package compiler
import java.io.File
import java.net.{URL, URLClassLoader}
import sbt.internal.inc.ScalaInstance
import sbt.io.Path
object ScalaLocator {
def scalaLoader(jars: Seq[File]) =
new URLClassLoader(
Path.toURLs(jars),
sbt.internal.inc.classpath.ClasspathUtilities.rootLoader
)
val scalaHome = sys.env.getOrElse("SCALA_HOME", "")
def scalaInstance = {
val libJar = getJar("scala-library")
val compileJar = getJar("scala-compiler")
val reflectJar = getJar("scala-reflect")
val allJars = Array(libJar, compileJar, reflectJar)
val loader = scalaLoader(allJars)
new ScalaInstance(scalaVersion(loader).getOrElse("unknown"), loader, libJar, compileJar, allJars, Option.empty)
}
def getJar(name: String): File = {
val classPathOption = urls.filter(_.toString.contains(name))
if (classPathOption.length == 1) {
new File(classPathOption(0).getFile)
} else {
if (scalaHome != "") {
new File(s"$scalaHome/lib/$name.jar")
} else {
throw new RuntimeException(s"Cannot locate $name jar")
}
}
}
def getClasspathUrls(cl: ClassLoader): Array[java.net.URL] = cl match {
case null => Array()
case u: java.net.URLClassLoader => u.getURLs ++ getClasspathUrls(cl.getParent)
case _ => getClasspathUrls(cl.getParent)
}
def urls: Array[URL] = getClasspathUrls(getClass.getClassLoader)
def scalaVersion(scalaLoader: ClassLoader): Option[String] = {
propertyFromResource("compiler.properties", "version.number", scalaLoader)
}
/**
* Get a property from a properties file resource in the classloader.
*/
def propertyFromResource(resource: String, property: String, classLoader: ClassLoader): Option[String] = {
val props = propertiesFromResource(resource, classLoader)
Option(props.getProperty(property))
}
/**
* Get all properties from a properties file resource in the classloader.
*/
def propertiesFromResource(resource: String, classLoader: ClassLoader): java.util.Properties = {
val props = new java.util.Properties
val stream = classLoader.getResourceAsStream(resource)
try {
props.load(stream)
}
catch {
case e: Exception =>
}
finally {
if (stream ne null) stream.close
}
props
}
}
| Humblehound/fsbt | server/src/main/scala/compiler/ScalaLocator.scala | Scala | mit | 2,311 |
package org.openurp.edu.eams.core.service.event
import org.beangle.commons.event.BusinessEvent
import org.openurp.edu.base.Student
@SerialVersionUID(6912654970490765968L)
class CreateStudentEvent(source: Student) extends BusinessEvent(source)
| openurp/edu-eams-webapp | core/src/main/scala/org/openurp/edu/eams/core/service/event/CreateStudentEvent.scala | Scala | gpl-3.0 | 247 |
package net.sansa_stack.rdf.spark.streaming
import org.apache.jena.graph.Triple
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
/**
* Abstract class for loading a DStream of Triples.
*
* @author Gezim Sejdiu
*/
abstract class StreamReader {
/**
* Load a stream of triples.
*
* @param ssc a Spark Streaming context
* @return a stream of Triples
*/
def load(ssc: StreamingContext): DStream[Triple]
}
| SANSA-Stack/SANSA-RDF | sansa-rdf/sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/streaming/StreamReader.scala | Scala | apache-2.0 | 478 |
package deburnat.transade.core.storages
import collection.mutable.ListBuffer
import deburnat.transade.core.admins.CoreAdmin.{a, c, cc, br, brr, tb1, tb2, tb3, tb4, tb5, tb6, tb7}
/**
* An algorithm for dynamic programming. It uses internally a two-dimensional
* matrix to store the previous results.
* Project name: deburnat
* Date: 7/26/13
* Time: 11:56 PM
* @author Patrick Meppe ([email protected])
*/
protected[storages] final class ExcelStorage extends AbsStorage {
//http://viralpatel.net/blogs/java-read-write-excel-file-apache-poi/
private val (_f, _wb, _sh, _rowIt, _cellIt, _cell, map, _keys, _stream) = (
"file", "workBook", "sheet", "rowIt", "cellIt", "cell", "innerMap", "keys",
"stream"
)
private lazy val (path, loc, i, cellIt, keys, wb, sh, stream, rowIt) = (
getDef("path"), getDef("location"),
getAttr(_i), getAttr(_cellIt), getAttr(_keys), getAttr(_wb), getAttr(_sh), getAttr(_stream), getAttr(_rowIt)
)
/********** constructors - start **********/
setAttr(_f, _wb, _sh, _rowIt, _cellIt, _cell, _i, _keys, _stream)
private val left = "%s(%s".format(map, a)
private def right(of: String) = "%s).asInstanceOf[%s]".format(a, of)
setPRs(tb5, left, a+")", left, right("Boolean"), left, right("Double"), left, right("String"))
//1=: tabPr, 2=: aLeftPr, 3=: aRightPr, 4=: bLeftPr, 5=: bRightPr, 6=: nLeftPr, 7=: nRightPr, 8=: sLeftPr, 9=: sRightPr
/********** constructors - end **********/
/********** overridden attributes & methods - start **********/
/**
* #####Instructions - start#####
* --legend
* The usage of all presented attributes and methods in this comment block is mandatory useless
* it is stated otherwise.
* ph = placeholder.
* pr = place replacer.
* source storage = a storage created using a "source" node.
* target storage = a storage created using a "target" node.
*
* --default attributes/values
* If any default attribute (outside of an overriding method or attribute) needs to be set by using
* the "getDef" method, the "lazy" prefix should be added.
* This assures that the attribute is set after the "definitions" map setting.
*
* --constructor
* setAttr =:
* This method is used to set the attributes the will be present in the parse .scala class/file.
* Its purpose is to avoid collisions between two or more storage within the same set.
* setPRs =:
* This method is used to define how the storage should replace the placeholders occuring during
* the parsing of the XML source node.
* The are 9 different placeholders, ergo up to 9 possible different replacers.
*
* --support attributes/methods (optional)
* d =: The regular expression of a positive integer.
* ro, i & j are counters.
* ro =: round counter
* i =: inner counter
* j =: additional counter (additional to i or used in additional methods)
* break =: "break"
* An attribute that can be handy while writing the exit-query of the big loop.
* fe =: "firstEntry"
* An attribute that can be handy in case a command should only be proceeded
* during the first entry in the big loop.
* getAttr =:
* Method used to retrieve an attribute set with the "setAttr" method.
* getDef =:
* Method used to retrieve values from the "definitions" map.
* getSource =:
* Method used to get the current source storage.
* addSupMethod =:
* Method used to add an additional method to the parsed class.
* getDynCounter =:
* Method used to get the position of the current storage relative to its source storage.
*
* --overriding attributes/methods
* isQueriable =:
* The "isQueriable" attribute needs the "lazy" prefix for
* the same reason as the other default attributes.
* checkup =:
* The checkup whether an empty/invalid query should be return or not is done implicitly.
* Therefore there's no need the include the "isQueriable" attr. in each overriding method.
* line break =:
* There's equally no need to include line breaks at the beginning & end of each returned query.
* This is done implicitly.
* order of invocation =:
* The methods below are listed according to the order in which they are invoked.
* source storage: beforeParse -> buildImpQuery -> buildConQuery -> buildReadQuery -> buildLoopQuery -> buildDisconQuery
* target storage: beforeParse -> buildImpQuery -> buildConQuery -> buildWriteQuery -> buildDisconQuery
* #####Instructions - end#####
*
*
* #####Definitions - start#####
* --both
* ...
*
* --source storage
* ...
*
* --target storage
* ...
* #####Definitions - end#####
*
*
* #####isQueriable - start#####
* This attribute determines whether the returned queries will be empty or not.
* The decision should be made according to the state of the mandatory definitions.
* A definition is mandatory, if its value is indispensable for the proper compilation of the
* class to be created by parsing a source node.
* This attribute is used more than once.
* #####isQueriable - end#####
*/
protected lazy val isQueriable = path.endsWith(".xls")
/**
* This attribute represents the set of .jar file, that are to be move to the parsed class directory,
* to enable its proper compilation. It is used once.
* The following tasks are implicitly executed.:
* - The insertion of the ".jar" suffix at the end of each file name.
* - This attribute invocation.
*/
override protected val jarFileNames = ListBuffer("poi-3.9")
/**
* This method returns a set import queries based on the chosen storage type.
* Each member of this set represent one simple/combined import statement in the Scala language.
* Since the "import" prefix is implicitly included to each member of the returned set,
* there is no need to use it.
* Here are some set member examples:
* - "java.io.File" (simple)
* - "scala.collection.mutable.{Map, ListBuffer}" (combined)
* This method is invoked once.
* @return A set of non empty queries
*/
protected def buildImpQuery: ListBuffer[String] = {
val (poi1, poi2) = (
"org.apache.poi.hssf.usermodel.{HSSFWorkbook, HSSFSheet}",
"org.apache.poi.ss.usermodel.{Row, Cell}"
)
def stream(s: String) = "java.io.{File, File%sputStream}".format(s)
source match{
case null => ListBuffer("scala.collection.mutable.{Map, ListBuffer}", poi1, poi2, stream("In"))
case st: ExcelStorage => ListBuffer(stream("Out"))
case _ => ListBuffer(poi1, poi2, "java.io.{File, FileInputStream, FileOutputStream}")
}
}
/**
* This method returns the query containing all initialisations necessary
* for the parsed node to connect. It is invoked once.
* @return A non empty query
*/
protected def buildConQuery: String = {
source match{
case null => //this object is a source storage
val (s1, s2) = if(loc.matches(d)) ("getSheetAt", loc) else ("getSheet", a+loc+a)
"%sval %s = new FileInputStream(new File(%s))%s".format(//first the input stream
tb3, stream, a+path+a, br
) +
"%sval %s = new HSSFWorkbook(%s).%s(%s)".format(//second the workbook and the sheet
tb3, sh, stream, s1, s2
)
case _ => //this object is a target storage
val f = getAttr(_f)
"%sval %s = new File(%s)%s%sval %s = new FileOutputStream(%s)%s".format(//first the output stream
tb3, f, a+path+a, br,
tb3, stream, f, br
) + //second the workbook and the sheet
"%sval (%s: HSSFWorkbook, %s: HSSFSheet, _%s: Int) = if(%s.exists)(%s ".format(
tb3, wb, sh, i, f, br
) +
"%snew HSSFWorkbook(new FileInputStream(%s)),%s%s.getSheet%s,%s%s.getLastRowNum%s".format(
tb4, f, br,
tb4+wb, if(loc.matches(d)) "At(%s)".format(loc) else "(%s)".format(a+loc+a), br,
tb4+sh, br
) +
"%s)else (new HSSFWorkbook(), %s.createSheet%s, 0)%s".format(
tb3, wb, if(loc.nonEmpty) "(%s)".format(a+loc+a) else "", br
) +
"%svar %s = _%s".format(tb3, i, i)
}
}
/**
* This method returns the query necessary to read the required part of the storage.
* It is only invoked (once) by the source storage.
* @return A non empty query.
*/
protected def buildReadQuery: String =
"%sval %s = %s.iterator%s%sval %s = ListBuffer[String]()".format(
tb3, rowIt, sh, br,
tb3, keys
)
/**
* This method returns the query necessary to write the values obtained from the
* source storage in the target storage. It is only invoked (ore than once) by the target storage.
* @param tbn A sequence made of a certain amount of tabs.
* The amount is set by the source storage.
* @param cols The names of the columns in which the values should be writen/inserted
* Template =: a,b,c,d,...,z
* @param values The values to insert/write
* Template =: ${a},,${b},,${c},,${d},,...${z}
* @return A non empty query.
*/
protected def buildWriteQuery(tbn: String, cols: String, values: String): String = {
val smq=
"%sprivate def write(sheet: HSSFSheet, i: Int, cols: String){%s".format(tb1, br)+
"%sval row = sheet.createRow(i)%s".format(tb2, br)+
"%svar j = 0%s".format(tb2, br)+
"%scols.split(%s,%s).foreach(col => {%s".format(tb2, a, a, br)+
"%sj += 1%s".format(tb3, br)+
"%srow.createCell(j).setCellValue(col)%s".format(tb3, br)+
"%s})%s".format(tb2, br)+
tb1+"}"
addSupMethod(smq)
//first write the column names if the file is new
"%sif(%s == 0){%s%s += 1%s%swrite(%s, %s, %s)%s%s}%s".format(
tbn, i, br,
tbn+tb1+i, br,
tbn+tb1, sh, i, a+cols+a, br,
tbn, br
) +
//second continuously write the values
"%s += 1%swrite(%s, %s, %s)".format(
tbn+i, br+tbn,
sh, i, values.replace(cc, "+"+a+c+a+"+")
)
}
/**
* This method returns the query used to insert row by row the values of the source
* storage in the target storage. It is only invoked (once) by the source storage.
* @param loopBodyPh The placeholder of the sequence provided by parsing all the target nodes
* residing within the source node.
* @return A non empty query.
*/
protected def buildLoopQuery(loopBodyPh: String): String = {
val namesRow = if(getDef("colnamesrow").matches(d)) getDef("colnamesrow") else "1"
val (start, end) = (if(getDef("start").matches(d)) getDef("start") else namesRow, getDef("end"))
val ini = "%svar %s%s".format(
tb3, if(end.matches(d)) "(%s, %s) = (0, false)".format(ro, break) else "%s = 0".format(ro), br
)
val innerStatement = {
def ifStatements(s: String) = {
val cell = getAttr(_cell)
def _case(s1: String, s2: String) = "%scase %s => %s.get%sCellValue%s".format(
tb7, s1, cell, s2, br
)
val switch =
"%sval %s = %s.next%s".format(tb6, cell, cellIt, br)+
"%s%s(%s(%s)) = %s.getCellType match{%s".format(tb6, map, keys, j, cell, br)+
_case("Cell.CELL_TYPE_BOOLEAN","Boolean")+
_case("Cell.CELL_TYPE_NUMERIC","Numeric")+
_case("_","String")+
"%s}%s".format(tb6, br)
val itToMap =
"%svar %s = -1%s%sval %s = Map[String, Any]()%s%swhile(%s.hasNext){%s%s%s += 1%s%s%s}%s".format(
tb5, j, br,
tb5, map, br,
tb5, cellIt, br,
tb6, j, br,
switch,
tb5, brr
)
"%sif(%s >= %s){%s%s%s}%s".format(
tb4+s, ro, start, br,
itToMap + loopBodyPh + br,
tb4, br
)
}
//store column names to be able to use them as keys.
"%sif(%s == %s) while(%s.hasNext) %s += %s.next.getStringCellValue%s".format(
tb4, ro, namesRow, cellIt, keys, cellIt, brr
)+( //break out of the big while-loop once the limit reached.
if(end.matches(d)) "%sif(%s == %s + 1) %s = true%s%s".format(
tb4, ro, end, break, br,
ifStatements("else ")
)
else ifStatements("")
)
}
"%s%swhile(%s.hasNext%s){%s%s%s += 1%s%sval %s = %s.next.cellIterator%s%s%s}".format(
ini,
tb3, rowIt, if(end.matches(d)) " && !%s".format(break) else "", br,
tb4, ro, br,
tb4, cellIt, rowIt, brr,
innerStatement,
tb3
)
}
/**
* This method returns the query required to disconnect all previously connected
* attributes/instances. It is invoked once.
* @return A non empty query.
*/
protected def buildDisconQuery: String = source match{
case null => "%s%s.close".format(tb3, stream)
case _ => "%s%s.write(%s)%s%s%s.close".format(
tb3, wb, stream, br,
tb3, stream
)
}
/********** overwritten attributes & methods - end **********/
}
| deburnatshazem/deburnat | core/src/main/scala/deburnat/transade/core/storages/ExcelStorage.scala | Scala | apache-2.0 | 12,963 |
/*
*
* * Copyright 2016 HM Revenue & Customs
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*
*/
package addressbase
import ingest.Ingester
import uk.gov.hmrc.address.osgb.Document
import uk.gov.hmrc.address.uk.Postcode
object OSBlpu {
val RecordId = "21"
// scalastyle:off
var idx = OSBlpuIdx(
uprn = 3,
parentUprn = 7,
logicalState = 4,
blpuState = 5,
subdivision = 14,
localCustodian = 13,
postalCode = 19,
postcode = 20,
latitude = 10,
longitude = 11)
def isUsefulPostcode(csv: Array[String]): Boolean = {
csv(idx.postalCode) != "N" // not a postal address
}
def apply(csv: Array[String]): OSBlpu = {
val subdivision = csv(idx.subdivision).head
val lat = csv(idx.latitude)
val long = csv(idx.longitude)
OSBlpu(
csv(idx.uprn).toLong,
blankToOptLong(csv(idx.parentUprn)),
toChar(csv(idx.logicalState)),
toChar(csv(idx.blpuState)),
subdivision,
csv(idx.postcode),
csv(idx.localCustodian).toInt,
lat,
long
)
}
private def toChar(s: String) = if (s.isEmpty) ' ' else s.head
}
case class OSBlpuIdx(uprn: Int, parentUprn: Int, logicalState: Int, blpuState: Int, subdivision: Int,
localCustodian: Int, postalCode: Int, postcode: Int, latitude: Int, longitude: Int)
case class OSBlpu(uprn: Long, parentUprn: Option[Long], logicalState: Char, blpuState: Char, subdivision: Char,
postcode: String, localCustodianCode: Int, latitude: String, longitude: String) extends Document {
// For use as input to MongoDbObject (hence it's not a Map)
def tupled: List[(String, Any)] = List(
"uprn" -> uprn,
"logicalState" -> logicalState,
"blpuState" -> blpuState,
"subdivision" -> subdivision,
"localCustodianCode" -> localCustodianCode,
"postcode" -> postcode,
"location" -> location) ++ parentUprn.map("parentUprn" -> _)
def location: String = {
s"$latitude,$longitude"
}
def normalise: OSBlpu = new OSBlpu(uprn, parentUprn, logicalState, blpuState, subdivision,
Postcode.normalisePostcode(postcode), localCustodianCode, latitude, longitude)
}
| andywhardy/address-reputation-ingester | app/addressbase/OSBlpu.scala | Scala | apache-2.0 | 2,714 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalatest._
import org.scalatest.prop.Checkers
import org.scalacheck._
import Arbitrary._
import Prop._
trait CustomFileBePropertyMatchers {
class FileBePropertyMatcher extends BePropertyMatcher[java.io.File] {
def apply(left: java.io.File) = BePropertyMatchResult(left.isFile, "file")
}
class DirectoryBePropertyMatcher extends BePropertyMatcher[java.io.File] {
def apply(left: java.io.File) = BePropertyMatchResult(left.isDirectory, "directory")
}
val file = new FileBePropertyMatcher
val directory = new DirectoryBePropertyMatcher
}
class ShouldFileBePropertyMatcherSpec extends Spec with ShouldMatchers with CustomFileBePropertyMatchers {
object `A temp file` {
def `should be a file, not a directory` {
val tempFile = java.io.File.createTempFile("delete", "me")
try {
tempFile should be a (file)
tempFile should not be a (directory)
}
finally {
tempFile.delete()
}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/matchers/ShouldFileBePropertyMatcherSpec.scala | Scala | apache-2.0 | 1,618 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @builder scalation.linalgebra.mem_mapped.bld.BldMatrix
* @version 1.2
* @date Mon Sep 28 11:18:16 EDT 2015
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra.mem_mapped
import java.io.PrintWriter
import io.Source.fromFile
import scalation.math.Complex.{abs => ABS, _}
import scalation.math.{Complex, oneIf}
import scalation.util.{Error, MM_ArrayC, PackageInfo}
import MatrixC.eye
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MatrixC` class stores and operates on Numeric Matrices of type `Complex`.
* This class follows the `gen.MatrixN` framework and is provided for efficiency.
* @param d1 the first/row dimension
* @param d2 the second/column dimension
* @param v the 2D array used to store matrix elements
*/
class MatrixC (val d1: Int,
val d2: Int,
private var v: Array [MM_ArrayC] = null)
extends MatriC with Error with Serializable
{
/** Dimension 1
*/
lazy val dim1 = d1
/** Dimension 2
*/
lazy val dim2 = d2
if (v == null) {
v = Array.ofDim [MM_ArrayC] (dim1)
for (i <- 0 until dim1) v(i) = MM_ArrayC.ofDim (dim2)
} else if (dim1 != v.length || dim2 != v(0).length) {
flaw ("constructor", "dimensions are wrong")
} // if
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a 'dim1' by 'dim1' square matrix.
* @param dim1 the row and column dimension
*/
def this (dim1: Int) { this (dim1, dim1) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a 'dim1' by 'dim2' matrix and assign each element the value 'x'.
* @param dim1 the row dimension
* @param dim2 the column dimesion
* @param x the scalar value to assign
*/
def this (dim1: Int, dim2: Int, x: Complex)
{
this (dim1, dim2)
for (i <- range1; j <- range2) v(i)(j) = x
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a matrix and assign values from array of arrays 'u'.
* @param u the 2D array of values to assign
*/
def this (u: Array [MM_ArrayC]) { this (u.length, u(0).length, u) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a matrix from repeated values.
* @param dim the (row, column) dimensions
* @param u the repeated values
*/
def this (dim: Tuple2 [Int, Int], u: Complex*)
{
this (dim._1, dim._2)
for (i <- range1; j <- range2) v(i)(j) = u(i * dim2 + j)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a matrix and assign values from matrix 'b'.
* @param b the matrix of values to assign
*/
def this (b: MatrixC)
{
this (b.d1, b.d2)
for (i <- range1; j <- range2) v(i)(j) = b.v(i)(j)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' matrix's element at the 'i,j'-th index position.
* @param i the row index
* @param j the column index
*/
def apply (i: Int, j: Int): Complex = v(i)(j)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' matrix's vector at the 'i'-th index position ('i'-th row).
* @param i the row index
*/
def apply (i: Int): VectorC = new VectorC (v(i))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get a slice 'this' matrix row-wise on range 'ir' and column-wise on range 'jr'.
* Ex: b = a(2..4, 3..5)
* @param ir the row range
* @param jr the column range
*/
def apply (ir: Range, jr: Range): MatrixC = slice (ir.start, ir.end, jr.start, jr.end)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' matrix's element at the 'i,j'-th index position to the scalar 'x'.
* @param i the row index
* @param j the column index
* @param x the scalar value to assign
*/
def update (i: Int, j: Int, x: Complex) { v(i)(j) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' matrix's row at the 'i'-th index position to the vector 'u'.
* @param i the row index
* @param u the vector value to assign
*/
def update (i: Int, u: VectorC) { v(i) = u() }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set a slice 'this' matrix row-wise on range ir and column-wise on range 'jr'.
* Ex: a(2..4, 3..5) = b
* @param ir the row range
* @param jr the column range
* @param b the matrix to assign
*/
def update (ir: Range, jr: Range, b: MatriC)
{
if (b.isInstanceOf [MatrixC]) {
val bb = b.asInstanceOf [MatrixC]
for (i <- ir; j <- jr) v(i)(j) = bb.v(i - ir.start)(j - jr.start)
} else {
flaw ("update", "must convert b to a MatrixC first")
} // if
} // update
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set all the elements in 'this' matrix to the scalar 'x'.
* @param x the scalar value to assign
*/
def set (x: Complex) { for (i <- range1; j <- range2) v(i)(j) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set all the values in 'this' matrix as copies of the values in 2D array 'u'.
* @param u the 2D array of values to assign
*/
def set (u: Array [Array [Complex]]) { for (i <- range1; j <- range2) v(i)(j) = u(i)(j) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' matrix's 'i'-th row starting at column 'j' to the vector 'u'.
* @param i the row index
* @param u the vector value to assign
* @param j the starting column index
*/
def set (i: Int, u: VectorC, j: Int = 0) { for (k <- 0 until u.dim) v(i)(k+j) = u(k) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix row-wise 'from' to 'end'.
* @param from the start row of the slice (inclusive)
* @param end the end row of the slice (exclusive)
*/
def slice (from: Int, end: Int): MatrixC =
{
new MatrixC (end - from, dim2, v.slice (from, end))
} // slice
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix column-wise 'from' to 'end'.
* @param from the start column of the slice (inclusive)
* @param end the end column of the slice (exclusive)
*/
def sliceCol (from: Int, end: Int): MatrixC =
{
val c = new MatrixC (dim1, end - from)
for (i <- c.range1; j <- c.range2) c.v(i)(j) = v(i)(j + from)
c
} // sliceCol
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix row-wise 'r_from' to 'r_end' and column-wise 'c_from' to 'c_end'.
* @param r_from the start of the row slice
* @param r_end the end of the row slice
* @param c_from the start of the column slice
* @param c_end the end of the column slice
*/
def slice (r_from: Int, r_end: Int, c_from: Int, c_end: Int): MatrixC =
{
val c = new MatrixC (r_end - r_from, c_end - c_from)
for (i <- c.range1; j <- c.range2) c.v(i)(j) = v(i + r_from)(j + c_from)
c
} // slice
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' matrix excluding the given row and/or column.
* @param row the row to exclude (0 until dim1, set to dim1 to keep all rows)
* @param col the column to exclude (0 until dim2, set to dim2 to keep all columns)
*/
def sliceExclude (row: Int, col: Int): MatrixC =
{
val c = new MatrixC (dim1 - oneIf (row < dim1), dim2 - oneIf (col < dim2))
for (i <- range1 if i != row) for (j <- range2 if j != col) {
c.v(i - oneIf (i > row))(j - oneIf (j > col)) = v(i)(j)
} // for
c
} // sliceExclude
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select rows from 'this' matrix according to the given index/basis.
* @param rowIndex the row index positions (e.g., (0, 2, 5))
*/
def selectRows (rowIndex: Array [Int]): MatrixC =
{
val c = new MatrixC (rowIndex.length, dim2)
for (i <- c.range1) c.v(i) = v(rowIndex(i))
c
} // selectRows
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get column 'col' from the matrix, returning it as a vector.
* @param col the column to extract from the matrix
* @param from the position to start extracting from
*/
def col (col: Int, from: Int = 0): VectorC =
{
val u = new VectorC (dim1 - from)
for (i <- from until dim1) u(i-from) = v(i)(col)
u
} // col
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set column 'col' of the matrix to a vector.
* @param col the column to set
* @param u the vector to assign to the column
*/
def setCol (col: Int, u: VectorC) { for (i <- range1) v(i)(col) = u(i) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select columns from 'this' matrix according to the given index/basis.
* Ex: Can be used to divide a matrix into a basis and a non-basis.
* @param colIndex the column index positions (e.g., (0, 2, 5))
*/
def selectCols (colIndex: Array [Int]): MatrixC =
{
val c = new MatrixC (dim1, colIndex.length)
for (j <- c.range2) c.setCol (j, col(colIndex(j)))
c
} // selectCols
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Transpose 'this' matrix (rows => columns).
*/
def t: MatrixC =
{
val b = new MatrixC (dim2, dim1)
for (i <- b.range1; j <- b.range2) b.v(i)(j) = v(j)(i)
b
} // t
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'.
* @param u the vector to be prepended as the new first row in new matrix
*/
def +: (u: VectorC): MatrixC =
{
if (u.dim != dim2) flaw ("+:", "vector does not match row dimension")
val c = new MatrixC (dim1 + 1, dim2)
for (i <- c.range1) c(i) = if (i == 0) u else this(i - 1)
c
} // +:
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'.
* @param u the vector to be prepended as the new first column in new matrix
*/
def +^: (u: VectorC): MatrixC =
{
if (u.dim != dim1) flaw ("+^:", "vector does not match column dimension")
val c = new MatrixC (dim1, dim2 + 1)
for (j <- c.range2) c.setCol (j, if (j == 0) u else col (j - 1))
c
} // +^:
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' matrix and (row) vector 'u', i.e., append 'u' to 'this'.
* @param u the vector to be appended as the new last row in new matrix
*/
def :+ (u: VectorC): MatrixC =
{
if (u.dim != dim2) flaw (":+", "vector does not match row dimension")
val c = new MatrixC (dim1 + 1, dim2)
for (i <- c.range1) c(i) = if (i < dim1) this(i) else u
c
} // :+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' matrix and (column) vector 'u', i.e., append 'u' to 'this'.
* @param u the vector to be appended as the new last column in new matrix
*/
def :^+ (u: VectorC): MatrixC =
{
if (u.dim != dim1) flaw (":^+", "vector does not match column dimension")
val c = new MatrixC (dim1, dim2 + 1)
for (j <- c.range2) c.setCol (j, if (j < dim2) col (j) else u)
c
} // :^+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row-wise) 'this' matrix and matrix 'b'.
* @param b the matrix to be concatenated as the new last rows in new matrix
*/
def ++ (b: MatriC): MatrixC =
{
if (b.dim2 != dim2) flaw ("++", "matrix b does not match row dimension")
val c = new MatrixC (dim1 + b.dim1, dim2)
for (i <- c.range1) c(i) = if (i < dim1) this(i) else b(i - dim1)
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column-wise) 'this' matrix and matrix 'b'.
* @param b the matrix to be concatenated as the new last columns in new matrix
*/
def ++^ (b: MatriC): MatrixC =
{
if (b.dim1 != dim1) flaw ("++^", "matrix b does not match column dimension")
val c = new MatrixC (dim1, dim2 + b.dim2)
for (j <- c.range2) c.setCol (j, if (j < dim2) col (j) else b.col (j - dim2))
c
} // ++^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and matrix 'b'.
* @param b the matrix to add (requires leDimensions)
*/
def + (b: MatrixC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + b.v(i)(j)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and matrix 'b' for any type extending MatriC.
* @param b the matrix to add (requires leDimensions)
*/
def + (b: MatriC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + b(i, j)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and (row) vector 'u'.
* @param u the vector to add
*/
def + (u: VectorC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + u(j)
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' matrix and scalar 'x'.
* @param x the scalar to add
*/
def + (x: Complex): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) + x
c
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and matrix 'b'.
* @param b the matrix to add (requires leDimensions)
*/
def += (b: MatrixC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) += b.v(i)(j)
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and matrix 'b' for any type extending MatriC.
* @param b the matrix to add (requires leDimensions)
*/
def += (b: MatriC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) += b(i, j)
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and (row) vector 'u'.
* @param u the vector to add
*/
def += (u: VectorC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) += u(j)
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' matrix and scalar 'x'.
* @param x the scalar to add
*/
def += (x: Complex): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) += x
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract matrix 'b'.
* @param b the matrix to subtract (requires leDimensions)
*/
def - (b: MatrixC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) - b.v(i)(j)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract matrix 'b' for any type extending MatriC.
* @param b the matrix to subtract (requires leDimensions)
*/
def - (b: MatriC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) - b(i, j)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract (row) vector 'u'.
* @param b the vector to subtract
*/
def - (u: VectorC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) - u(j)
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract scalar 'x'.
* @param x the scalar to subtract
*/
def - (x: Complex): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- c.range1; j <- c.range2) c.v(i)(j) = v(i)(j) - x
c
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place matrix 'b'.
* @param b the matrix to subtract (requires leDimensions)
*/
def -= (b: MatrixC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) -= b.v(i)(j)
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place matrix 'b'.
* @param b the matrix to subtract (requires leDimensions)
*/
def -= (b: MatriC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) -= b(i, j)
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place (row) vector 'u'.
* @param b the vector to subtract
*/
def -= (u: VectorC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) -= u(j)
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' matrix subtract in-place scalar 'x'.
* @param x the scalar to subtract
*/
def -= (x: Complex): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) -= x
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b', transposing 'b' to improve efficiency.
* Use 'times' method to skip the transpose step.
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def * (b: MatrixC): MatrixC =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new MatrixC (dim1, b.dim2)
val bt = b.t // transpose the b matrix
for (i <- range1; j <- c.range2) {
val va = v(i); val vb = bt.v(j)
var sum = _0
for (k <- range2) sum += va(k) * vb(k)
c.v(i)(j) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b', transposing 'b' to improve efficiency.
* Use 'times' method to skip the transpose step.
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def * (b: MatriC): MatrixC =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new MatrixC (dim1, b.dim2)
val bt = b.t // transpose the b matrix
for (i <- range1; j <- c.range2) {
val va = v(i); val vb = bt(j)
var sum = _0
for (k <- range2) sum += va(k) * vb(k)
c.v(i)(j) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by vector 'u' (vector elements beyond 'dim2' ignored).
* @param u the vector to multiply by
*/
def * (u: VectorC): VectorC =
{
if (dim2 > u.dim) flaw ("*", "matrix * vector - vector dimension too small")
val c = new VectorC (dim1)
for (i <- range1) {
var sum = _0
for (k <- range2) sum += v(i)(k) * u(k)
c(i) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by scalar 'x'.
* @param x the scalar to multiply by
*/
def * (x: Complex): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) * x
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by matrix 'b', transposing 'b' to improve
* efficiency. Use 'times_ip' method to skip the transpose step.
* @param b the matrix to multiply by (requires square and sameCrossDimensions)
*/
def *= (b: MatrixC): MatrixC =
{
if (! b.isSquare) flaw ("*=", "matrix 'b' must be square")
if (dim2 != b.dim1) flaw ("*=", "matrix *= matrix - incompatible cross dimensions")
val bt = b.t // use the transpose of b
for (i <- range1) {
val row_i = new VectorC (dim2) // save ith row so not overwritten
for (j <- range2) row_i(j) = v(i)(j) // copy values from ith row of 'this' matrix
for (j <- range2) {
val vb = bt.v(j)
var sum = _0
for (k <- range2) sum += row_i(k) * vb(k)
v(i)(j) = sum
} // for
} // for
this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by matrix 'b', transposing 'b' to improve
* efficiency. Use 'times_ip' method to skip the transpose step.
* @param b the matrix to multiply by (requires square and sameCrossDimensions)
*/
def *= (b: MatriC): MatrixC =
{
if (! b.isSquare) flaw ("*=", "matrix 'b' must be square")
if (dim2 != b.dim1) flaw ("*=", "matrix *= matrix - incompatible cross dimensions")
val bt = b.t // use the transpose of b
for (i <- range1) {
val row_i = new VectorC (dim2) // save ith row so not overwritten
for (j <- range2) row_i(j) = v(i)(j) // copy values from ith row of 'this' matrix
for (j <- range2) {
val vb = bt(j)
var sum = _0
for (k <- range2) sum += row_i(k) * vb(k)
v(i)(j) = sum
} // for
} // for
this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by scalar 'x'.
* @param x the scalar to multiply by
*/
def *= (x: Complex): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) *= x
this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product of 'this' matrix and vector 'u', by first transposing
* 'this' matrix and then multiplying by 'u' (ie., 'a dot u = a.t * u').
* @param u the vector to multiply by (requires same first dimensions)
*/
def dot (u: VectorC): VectorC =
{
if (dim1 != u.dim) flaw ("dot", "matrix dot vector - incompatible first dimensions")
val c = new VectorC (dim2)
val at = this.t // transpose the 'this' matrix
for (i <- range2) {
var sum = _0
for (k <- range1) sum += at.v(i)(k) * u(k)
c(i) = sum
} // for
c
} // dot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b' without first transposing 'b'.
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def times (b: MatrixC): MatrixC =
{
if (dim2 != b.dim1) flaw ("times", "matrix * matrix - incompatible cross dimensions")
val c = new MatrixC (dim1, b.dim2)
for (i <- range1; j <- c.range2) {
var sum = _0
for (k <- range2) sum += v(i)(k) * b.v(k)(j)
c.v(i)(j) = sum
} // for
c
} // times
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by matrix 'b' without first transposing 'b'.
* If b and this reference the same matrix (b == this), a copy of the this
* matrix is made.
* @param b the matrix to multiply by (requires square and sameCrossDimensions)
*/
def times_ip (b: MatrixC)
{
if (! b.isSquare) flaw ("times_ip", "matrix 'b' must be square")
if (dim2 != b.dim1) flaw ("times_ip", "matrix * matrix - incompatible cross dimensions")
val bb = if (b == this) new MatrixC (this) else b
for (i <- range1) {
val row_i = new VectorC (dim2) // save ith row so not overwritten
for (j <- range2) row_i(j) = v(i)(j) // copy values from ith row of 'this' matrix
for (j <- range2) {
var sum = _0
for (k <- range2) sum += row_i(k) * bb.v(k)(j)
v(i)(j) = sum
} // for
} // for
} // times_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix 'b' using 'dot' product (concise solution).
* @param b the matrix to multiply by (requires sameCrossDimensions)
*/
def times_d (b: MatriC): MatrixC =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new MatrixC (dim1, b.dim2)
for (i <- range1; j <- c.range2) c.v(i)(j) = this(i) dot b.col(j)
c
} // times_d
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by matrix b using the Strassen matrix multiplication
* algorithm. Both matrices ('this' and 'b') must be square. Although the
* algorithm is faster than the traditional cubic algorithm, its requires
* more memory and is often less stable (due to round-off errors).
* FIX: could be make more efficient using a virtual slice (vslice) method.
* @see http://en.wikipedia.org/wiki/Strassen_algorithm
* @param b the matrix to multiply by (it has to be a square matrix)
*/
def times_s (b: MatrixC): MatrixC =
{
if (dim2 != b.dim1) flaw ("*", "matrix * matrix - incompatible cross dimensions")
val c = new MatrixC (dim1, dim1) // allocate result matrix
var d = dim1 / 2 // half dim1
if (d + d < dim1) d += 1 // if not even, increment by 1
val evenDim = d + d // equals dim1 if even, else dim1 + 1
// decompose to blocks (use vslice method if available)
val a11 = slice (0, d, 0, d)
val a12 = slice (0, d, d, evenDim)
val a21 = slice (d, evenDim, 0, d)
val a22 = slice (d, evenDim, d, evenDim)
val b11 = b.slice (0, d, 0, d)
val b12 = b.slice (0, d, d, evenDim)
val b21 = b.slice (d, evenDim, 0, d)
val b22 = b.slice (d, evenDim, d, evenDim)
// compute intermediate sub-matrices
val p1 = (a11 + a22) * (b11 + b22)
val p2 = (a21 + a22) * b11
val p3 = a11 * (b12 - b22)
val p4 = a22 * (b21 - b11)
val p5 = (a11 + a12) * b22
val p6 = (a21 - a11) * (b11 + b12)
val p7 = (a12 - a22) * (b21 + b22)
for (i <- c.range1; j <- c.range2) {
c.v(i)(j) = if (i < d && j < d) p1.v(i)(j) + p4.v(i)(j)- p5.v(i)(j) + p7.v(i)(j)
else if (i < d) p3.v(i)(j-d) + p5.v(i)(j-d)
else if (i >= d && j < d) p2.v(i-d)(j) + p4.v(i-d)(j)
else p1.v(i-d)(j-d) - p2.v(i-d)(j-d) + p3.v(i-d)(j-d) + p6.v(i-d)(j-d)
} // for
c // return result matrix
} // times_s
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' matrix by vector 'u' to produce another matrix '(a_ij * u_j)'.
* E.g., multiply a matrix by a diagonal matrix represented as a vector.
* @param u the vector to multiply by
*/
def ** (u: VectorC): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) * u(j)
c
} // **
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' matrix by vector 'u' to produce another matrix '(a_ij * u_j)'.
* @param u the vector to multiply by
*/
def **= (u: VectorC): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) = v(i)(j) * u(j)
this
} // **=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' matrix by scalar 'x'.
* @param x the scalar to divide by
*/
def / (x: Complex): MatrixC =
{
val c = new MatrixC (dim1, dim2)
for (i <- range1; j <- range2) c.v(i)(j) = v(i)(j) / x
c
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' matrix by scalar 'x'.
* @param x the scalar to divide by
*/
def /= (x: Complex): MatrixC =
{
for (i <- range1; j <- range2) v(i)(j) = v(i)(j) / x
this
} // /=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Raise 'this' matrix to the 'p'th power (for some integer 'p' >= 2).
* Caveat: should be replace by a divide and conquer algorithm.
* @param p the power to raise 'this' matrix to
*/
def ~^ (p: Int): MatrixC =
{
if (p < 2) flaw ("~^", "p must be an integer >= 2")
if (! isSquare) flaw ("~^", "only defined on square matrices")
val c = new MatrixC (dim1, dim1)
for (i <- range1; j <- range1) {
var sum = _0
for (k <- range1) sum += v(i)(k) * v(k)(j)
c.v(i)(j) = sum
} // for
if (p > 2) c ~^ (p-1) else c
} // ~^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the maximum element in 'this' matrix.
* @param e the ending row index (exclusive) for the search
*/
def max (e: Int = dim1): Complex =
{
var x = v(0)(0)
for (i <- 1 until e; j <- range2 if v(i)(j) > x) x = v(i)(j)
x
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the minimum element in 'this' matrix.
* @param e the ending row index (exclusive) for the search
*/
def min (e: Int = dim1): Complex =
{
var x = v(0)(0)
for (i <- 1 until e; j <- range2 if v(i)(j) < x) x = v(i)(j)
x
} // min
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor 'this' matrix into the product of upper and lower triangular
* matrices '(l, u)' using the LU Factorization algorithm. This version uses
* no partial pivoting.
*/
def lud_npp: Tuple2 [MatrixC, MatrixC] =
{
val l = new MatrixC (dim1, dim2) // lower triangular matrix
val u = new MatrixC (this) // upper triangular matrix (a copy of this)
for (i <- u.range1) {
val pivot = u.v(i)(i)
if (pivot =~ _0) flaw ("lud_npp", "use lud since you have a zero pivot")
l.v(i)(i) = _1
for (j <- i + 1 until u.dim2) l.v(i)(j) = _0
for (k <- i + 1 until u.dim1) {
val mul = u.v(k)(i) / pivot
l.v(k)(i) = mul
for (j <- u.range2) u.v(k)(j) = u.v(k)(j) - mul * u.v(i)(j)
} // for
} // for
Tuple2 (l, u)
} // lud_npp
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor 'this' matrix into the product of lower and upper triangular
* matrices '(l, u)' using the LU Factorization algorithm. This version uses
* partial pivoting.
*/
def lud: Tuple2 [MatrixC, MatrixC] =
{
val l = new MatrixC (dim1, dim2) // lower triangular matrix
val u = new MatrixC (this) // upper triangular matrix (a copy of this)
for (i <- u.range1) {
var pivot = u.v(i)(i)
if (pivot =~ _0) {
val k = partialPivoting (u, i) // find the maxiumum element below pivot
u.swap (i, k, i) // swap rows i and k from column k
pivot = u.v(i)(i) // reset the pivot
} // if
l.v(i)(i) = _1
for (j <- i + 1 until u.dim2) l.v(i)(j) = _0
for (k <- i + 1 until u.dim1) {
val mul = u.v(k)(i) / pivot
l.v(k)(i) = mul
for (j <- u.range2) u.v(k)(j) = u.v(k)(j) - mul * u.v(i)(j)
} // for
} // for
Tuple2 (l, u)
} // lud
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor in-place 'this' matrix into the product of lower and upper triangular
* matrices '(l, u)' using the LU Factorization algorithm. This version uses
* partial pivoting.
*/
def lud_ip: Tuple2 [MatrixC, MatrixC] =
{
val l = new MatrixC (dim1, dim2) // lower triangular matrix
val u = this // upper triangular matrix (this)
for (i <- u.range1) {
var pivot = u.v(i)(i)
if (pivot =~ _0) {
val k = partialPivoting (u, i) // find the maxiumum element below pivot
u.swap (i, k, i) // swap rows i and k from column k
pivot = u.v(i)(i) // reset the pivot
} // if
l.v(i)(i) = _1
for (j <- i + 1 until u.dim2) l.v(i)(j) = _0
for (k <- i + 1 until u.dim1) {
val mul = u.v(k)(i) / pivot
l.v(k)(i) = mul
for (j <- u.range2) u.v(k)(j) = u.v(k)(j) - mul * u.v(i)(j)
} // for
} // for
Tuple2 (l, u)
} // lud_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Use partial pivoting to find a maximal non-zero pivot and return its row
* index, i.e., find the maximum element '(k, i)' below the pivot '(i, i)'.
* @param a the matrix to perform partial pivoting on
* @param i the row and column index for the current pivot
*/
private def partialPivoting (a: MatrixC, i: Int): Int =
{
var max = a.v(i)(i) // initially set to the pivot
var kMax = i // initially the pivot row
for (k <- i + 1 until a.dim1 if ABS (a.v(k)(i)) > max) {
max = ABS (a.v(k)(i))
kMax = k
} // for
if (kMax == i) {
flaw ("partialPivoting", "unable to find a non-zero pivot for row " + i)
} // if
kMax
} // partialPivoting
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'l*u*x = b' (see lud above).
* @param l the lower triangular matrix
* @param u the upper triangular matrix
* @param b the constant vector
*/
def solve (l: MatriC, u: MatriC, b: VectorC): VectorC =
{
val y = new VectorC (l.dim2)
for (k <- 0 until y.dim) { // solve for y in l*y = b
y(k) = b(k) - (l(k) dot y)
} // for
val x = new VectorC (u.dim2)
for (k <- x.dim - 1 to 0 by -1) { // solve for x in u*x = y
x(k) = (y(k) - (u(k) dot x)) / u(k, k)
} // for
x
} // solve
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'l*u*x = b' where 'l = this'. Requires
* 'l' to be lower triangular.
* @param u the upper triangular matrix
* @param b the constant vector
*/
def solve (u: MatriC, b: VectorC): VectorC = solve (this, u, b)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'a*x = b' where 'a' is 'this' matrix.
* @param b the constant vector.
*/
def solve (b: VectorC): VectorC = solve (lud, b)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Combine 'this' matrix with matrix 'b', placing them along the diagonal and
* filling in the bottom left and top right regions with zeros; '[this, b]'.
* @param b the matrix to combine with 'this' matrix
*/
def diag (b: MatriC): MatrixC =
{
val m = dim1 + b.dim1
val n = dim2 + b.dim2
val c = new MatrixC (m, n)
for (i <- 0 until m; j <- 0 until n) {
c.v(i)(j) = if (i < dim1 && j < dim2) v(i)(j)
else if (i >= dim1 && j >= dim2) b(i-dim1, j-dim2)
else _0
} // for
c
} // diag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix '[Ip, this, Iq]' where Ir is a r-by-r identity matrix, by
* positioning the three matrices 'Ip', 'this' and 'Iq' along the diagonal.
* Fill the rest of matrix with zeros.
* @param p the size of identity matrix Ip
* @param q the size of identity matrix Iq
*/
def diag (p: Int, q: Int = 0): MatrixC =
{
if (! isSquare) flaw ("diag", "'this' matrix must be square")
val n = dim1 + p + q
val c = new MatrixC (n, n)
for (i <- 0 until p) c.v(i)(i) = _1 // Ip
for (i <- 0 until dim1; j <- 0 until dim1) c.v(i+p)(j+p) = v(i)(j) // this
for (i <- p + dim1 until n) c.v(i)(i) = _1 // Iq
c
} // diag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the kth diagonal of 'this' matrix. Assumes 'dim2 >= dim1'.
* @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super)
*/
def getDiag (k: Int = 0): VectorC =
{
val c = new VectorC (dim1 - math.abs (k))
val (j, l) = (math.max (-k, 0), math.min (dim1-k, dim1))
for (i <- j until l) c(i-j) = v(i)(i+k)
c
} // getDiag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the kth diagonal of 'this' matrix to the vector 'u'. Assumes 'dim2 >= dim1'.
* @param u the vector to set the diagonal to
* @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super)
*/
def setDiag (u: VectorC, k: Int = 0)
{
val (j, l) = (math.max (-k, 0), math.min (dim1-k, dim1))
for (i <- j until l) v(i)(i+k) = u(i-j)
} // setDiag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the main diagonal of 'this' matrix to the scalar 'x'. Assumes 'dim2 >= dim1'.
* @param x the scalar to set the diagonal to
*/
def setDiag (x: Complex) { for (i <- range1) v(i)(i) = x }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert 'this' matrix (requires a square matrix) and does not use partial pivoting.
*/
def inverse_npp: MatrixC =
{
val b = new MatrixC (this) // copy 'this' matrix into b
val c = eye (dim1) // let c represent the augmentation
for (i <- b.range1) {
val pivot = b.v(i)(i)
if (pivot =~ _0) flaw ("inverse_npp", "use inverse since you have a zero pivot")
for (j <- b.range2) {
b.v(i)(j) /= pivot
c.v(i)(j) /= pivot
} // for
for (k <- 0 until b.dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) {
b.v(k)(j) -= mul * b.v(i)(j)
c.v(k)(j) -= mul * c.v(i)(j)
} // for
} // for
} // for
c
} // inverse_npp
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert 'this' matrix (requires a square matrix) and use partial pivoting.
*/
def inverse: MatrixC =
{
val b = new MatrixC (this) // copy 'this' matrix into b
val c = eye (dim1) // let c represent the augmentation
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ _0) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
c.swap (i, k, 0) // in c, swap rows i and k from column 0
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) {
b.v(i)(j) /= pivot
c.v(i)(j) /= pivot
} // for
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) {
b.v(k)(j) -= mul * b.v(i)(j)
c.v(k)(j) -= mul * c.v(i)(j)
} // for
} // for
} // for
c
} // inverse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert in-place 'this' matrix (requires a square matrix) and uses partial pivoting.
* Note: this method turns the orginal matrix into the identity matrix.
* The inverse is returned and is captured by assignment.
*/
def inverse_ip: MatrixC =
{
var b = this // use 'this' matrix for b
val c = eye (dim1) // let c represent the augmentation
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ _0) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
c.swap (i, k, 0) // in c, swap rows i and k from column 0
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) {
b.v(i)(j) /= pivot
c.v(i)(j) /= pivot
} // for
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) {
b.v(k)(j) -= mul * b.v(i)(j)
c.v(k)(j) -= mul * c.v(i)(j)
} // for
} // for
} // for
c // return the solution
} // inverse_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Use Gauss-Jordan reduction on 'this' matrix to make the left part embed an
* identity matrix. A constraint on this 'm-by-n' matrix is that 'n >= m'.
* It can be used to solve 'a * x = b': augment 'a' with 'b' and call reduce.
* Takes '[a | b]' to '[I | x]'.
*/
def reduce: MatrixC =
{
if (dim2 < dim1) flaw ("reduce", "requires n (columns) >= m (rows)")
val b = new MatrixC (this) // copy 'this' matrix into b
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ _0) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) b.v(i)(j) /= pivot
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) b.v(k)(j) -= mul * b.v(i)(j)
} // for
} // for
b
} // reduce
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Use Gauss-Jordan reduction in-place on 'this' matrix to make the left part
* embed an identity matrix. A constraint on this 'm-by-n' matrix is that 'n >= m'.
* It can be used to solve 'a * x = b': augment 'a' with 'b' and call reduce.
* Takes '[a | b]' to '[I | x]'.
*/
def reduce_ip ()
{
if (dim2 < dim1) flaw ("reduce", "requires n (columns) >= m (rows)")
val b = this // use 'this' matrix for b
for (i <- b.range1) {
var pivot = b.v(i)(i)
if (pivot =~ _0) {
val k = partialPivoting (b, i) // find the maxiumum element below pivot
b.swap (i, k, i) // in b, swap rows i and k from column i
pivot = b.v(i)(i) // reset the pivot
} // if
for (j <- b.range2) b.v(i)(j) /= pivot
for (k <- 0 until dim1 if k != i) {
val mul = b.v(k)(i)
for (j <- b.range2) b.v(k)(j) -= mul * b.v(i)(j)
} // for
} // for
} // reduce_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Clean values in 'this' matrix at or below the threshold 'thres' by setting
* them to zero. Iterative algorithms give approximate values and if very close
* to zero, may throw off other calculations, e.g., in computing eigenvectors.
* @param thres the cutoff threshold (a small value)
* @param relative whether to use relative or absolute cutoff
*/
def clean (thres: Double, relative: Boolean = true): MatrixC =
{
val s = if (relative) mag else _1 // use matrix magnitude or 1
for (i <- range1; j <- range2) if (ABS (v(i)(j)) <= thres * s) v(i)(j) = _0
this
} // clean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1')
* by performing Gauss-Jordan reduction and extracting the negation of the
* last column augmented by 1.
* <p>
* nullspace (a) = set of orthogonal vectors v s.t. a * v = 0
* <p>
* The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'.
* FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala).
* FIX: remove the 'n = m+1' restriction.
* @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces
* /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf
*/
def nullspace: VectorC =
{
if (dim2 != dim1 + 1) flaw ("nullspace", "requires n (columns) = m (rows) + 1")
reduce.col(dim2 - 1) * -_1 ++ _1
} // nullspace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute in-place the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1')
* by performing Gauss-Jordan reduction and extracting the negation of the
* last column augmented by 1.
* <p>
* nullspace (a) = set of orthogonal vectors v s.t. a * v = 0
* <p>
* The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'.
* FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala).
* FIX: remove the 'n = m+1' restriction.
* @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces
* /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf
*/
def nullspace_ip: VectorC =
{
if (dim2 != dim1 + 1) flaw ("nullspace", "requires n (columns) = m (rows) + 1")
reduce_ip
col(dim2 - 1) * -_1 ++ _1
} // nullspace_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the trace of 'this' matrix, i.e., the sum of the elements on the
* main diagonal. Should also equal the sum of the eigenvalues.
* @see Eigen.scala
*/
def trace: Complex =
{
if ( ! isSquare) flaw ("trace", "trace only works on square matrices")
var sum = _0
for (i <- range1) sum += v(i)(i)
sum
} // trace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of 'this' matrix, i.e., the sum of its elements.
*/
def sum: Complex =
{
var sum = _0
for (i <- range1; j <- range2) sum += v(i)(j)
sum
} // sum
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the abs sum of 'this' matrix, i.e., the sum of the absolute value
* of its elements. This is useful for comparing matrices '(a - b).sumAbs'.
*/
def sumAbs: Complex =
{
var sum = _0
for (i <- range1; j <- range2) sum += ABS (v(i)(j))
sum
} // sumAbs
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of the lower triangular region of 'this' matrix.
*/
def sumLower: Complex =
{
var sum = _0
for (i <- range1; j <- 0 until i) sum += v(i)(j)
sum
} // sumLower
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the determinant of 'this' matrix. The value of the determinant
* indicates, among other things, whether there is a unique solution to a
* system of linear equations (a nonzero determinant).
*/
def det: Complex =
{
if ( ! isSquare) flaw ("det", "determinant only works on square matrices")
var sum = _0
var b: MatrixC = null
for (j <- range2) {
b = sliceExclude (0, j) // the submatrix that excludes row 0 and column j
sum += (if (j % 2 == 0) v(0)(j) * (if (b.dim1 == 1) b.v(0)(0) else b.det)
else -v(0)(j) * (if (b.dim1 == 1) b.v(0)(0) else b.det))
} // for
sum
} // det
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' matrix is rectangular (all rows have the same number
* of columns).
*/
def isRectangular: Boolean =
{
for (i <- range1 if v(i).length != dim2) return false
true
} // isRectangular
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' real (double precision) matrix to a string.
*/
override def toString: String =
{
var sb = new StringBuilder ("\nMatrixC(")
if (dim1 == 0) return sb.append (")").mkString
for (i <- range1) {
for (j <- range2) {
sb.append (fString.format (v(i)(j)))
if (j == dim2-1) sb.replace (sb.length-1, sb.length, "\n\t")
} // for
} // for
sb.replace (sb.length-3, sb.length, ")").mkString
} // toString
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Write 'this' matrix to a CSV-formatted text file with name 'fileName'.
* @param fileName the name of file to hold the data
*/
def write (fileName: String)
{
val out = new PrintWriter (fileName)
for (i <- range1) {
for (j <- range2) { out.print (v(i)(j)); if (j < dim2-1) out.print (",") }
out.println ()
} // for
out.close
} // write
} // MatrixC class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MatrixC` companion object provides operations for `MatrixC` that don't require
* 'this' (like static methods in Java). It provides factory methods for building
* matrices from files or vectors.
*/
object MatrixC extends Error
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix and assign values from the array of vectors 'u'.
* @param u the array of vectors to assign
* @param columnwise whether the vectors are treated as column or row vectors
*/
def apply (u: Array [VectorC], columnwise: Boolean = true): MatrixC =
{
var x: MatrixC = null
val u_dim = u(0).dim
if (columnwise) {
x = new MatrixC (u_dim, u.length)
for (j <- 0 until u.length) x.setCol (j, u(j)) // assign column vectors
} else {
x = new MatrixC (u.length, u_dim)
for (i <- 0 until u_dim) x(i) = u(i) // assign row vectors
} // if
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix and assign values from the Scala `Vector` of vectors 'u'.
* Assumes vectors are columwise.
* @param u the Vector of vectors to assign
*/
def apply (u: Vector [VectorC]): MatrixC =
{
val u_dim = u(0).dim
val x = new MatrixC (u_dim, u.length)
for (j <- 0 until u.length) x.setCol (j, u(j)) // assign column vectors
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix by reading from a text file, e.g., a CSV file.
* @param fileName the name of file holding the data
*/
def apply (fileName: String): MatrixC =
{
val sp = ',' // character separating the values
val lines = fromFile (fileName).getLines.toArray // get the lines from file
val (m, n) = (lines.length, lines(0).split (sp).length)
val x = new MatrixC (m, n)
for (i <- 0 until m) x(i) = VectorC (lines(i).split (sp))
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an 'm-by-n' identity matrix I (ones on main diagonal, zeros elsewhere).
* If 'n' is <= 0, set it to 'm' for a square identity matrix.
* @param m the row dimension of the matrix
* @param n the column dimension of the matrix (defaults to 0 => square matrix)
*/
def eye (m: Int, n: Int = 0): MatrixC =
{
val nn = if (n <= 0) m else n // square matrix, if n <= 0
val mn = if (m <= nn) m else nn // length of main diagonal
val c = new MatrixC (m, nn)
for (i <- 0 until mn) c.v(i)(i) = _1
c
} // eye
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row) vectors 'u' and 'w' to form a matrix with 2 rows.
* @param u the vector to be concatenated as the new first row in matrix
* @param w the vector to be concatenated as the new second row in matrix
*/
def ++ (u: VectorC, w: VectorC): MatrixC =
{
if (u.dim != w.dim) flaw ("++", "vector dimensions do not match")
val c = new MatrixC (2, u.dim)
c(0) = u
c(1) = w
c
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column) vectors 'u' and 'w' to form a matrix with 2 columns.
* @param u the vector to be concatenated as the new first column in matrix
* @param w the vector to be concatenated as the new second column in matrix
*/
def ++^ (u: VectorC, w: VectorC): MatrixC =
{
if (u.dim != w.dim) flaw ("++^", "vector dimensions do not match")
val c = new MatrixC (u.dim, 2)
c.setCol (0, u)
c.setCol (1, w)
c
} // ++^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply vector 'u' by matrix 'a'. Treat 'u' as a row vector.
* @param u the vector to multiply by
* @param a the matrix to multiply by (requires sameCrossDimensions)
*/
def times (u: VectorC, a: MatrixC): VectorC =
{
if (u.dim != a.dim1) flaw ("times", "vector * matrix - incompatible cross dimensions")
val c = new VectorC (a.dim2)
for (j <- a.range2) {
var sum = _0
for (k <- a.range1) sum += u(k) * a.v(k)(j)
c(j) = sum
} // for
c
} // times
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the outer product of vector 'x' and vector 'y'. The result of the
* outer product is a matrix where 'c(i, j)' is the product of 'i'-th element
* of 'x' with the 'j'-th element of 'y'.
* @param x the first vector
* @param y the second vector
*/
def outer (x: VectorC, y: VectorC): MatrixC =
{
val c = new MatrixC (x.dim, y.dim)
for (i <- 0 until x.dim; j <- 0 until y.dim) c(i, j) = x(i) * y(j)
c
} // outer
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from two vectors 'x' and 'y', row-wise.
* @param x the first vector -> row 0
* @param y the second vector -> row 1
*/
def form_rw (x: VectorC, y: VectorC): MatrixC =
{
if (x.dim != y.dim) flaw ("form_rw", "dimensions of x and y must be the same")
val cols = x.dim
val c = new MatrixC (2, cols)
c(0) = x
c(1) = y
c
} // form_rw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from scalar 'x' and a vector 'y', row-wise.
* @param x the first scalar -> row 0 (repeat scalar)
* @param y the second vector -> row 1
*/
def form_rw (x: Complex, y: VectorC): MatrixC =
{
val cols = y.dim
val c = new MatrixC (2, cols)
for (j <- 0 until cols) c(0, j) = x
c(1) = y
c
} // form_rw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from a vector 'x' and a scalar 'y', row-wise.
* @param x the first vector -> row 0
* @param y the second scalar -> row 1 (repeat scalar)
*/
def form_rw (x: VectorC, y: Complex): MatrixC =
{
val cols = x.dim
val c = new MatrixC (2, cols)
c(0) = x
for (j <- 0 until cols) c(1, j) = y
c
} // form_rw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from two vectors 'x' and 'y', column-wise.
* @param x the first vector -> column 0
* @param y the second vector -> column 1
*/
def form_cw (x: VectorC, y: VectorC): MatrixC =
{
if (x.dim != y.dim) flaw ("form_cw", "dimensions of x and y must be the same")
val rows = x.dim
val c = new MatrixC (rows, 2)
c.setCol(0, x)
c.setCol(1, y)
c
} // form_cw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from a scalar 'x' and a vector 'y', column-wise.
* @param x the first scalar -> column 0 (repeat scalar)
* @param y the second vector -> column 1
*/
def form_cw (x: Complex, y: VectorC): MatrixC =
{
val rows = y.dim
val c = new MatrixC (rows, 2)
for (i <- 0 until rows) c(i, 0) = x
c.setCol(1, y)
c
} // form_cw
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix from a vector 'x' and a scalar 'y', column-wise.
* @param x the first vector -> column 0
* @param y the second scalar -> column 1 (repeat scalar)
*/
def form_cw (x: VectorC, y: Complex): MatrixC =
{
val rows = x.dim
val c = new MatrixC (rows, 2)
c.setCol(0, x)
for (i <- 0 until rows) c(i, 1) = y
c
} // form_cw
} // MatrixC companion object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MatrixCTest` object tests the operations provided by `MatrixC` class.
* > run-main scalation.linalgebra.MatrixCTest
*/
object MatrixCTest extends App with PackageInfo
{
for (l <- 1 to 4) {
println ("\n\tTest MatrixC on real matrices of dim " + l)
val x = new MatrixC (l, l)
val y = new MatrixC (l, l)
x.set (2)
y.set (3)
println ("x + y = " + (x + y))
println ("x - y = " + (x - y))
println ("x * y = " + (x * y))
println ("x * 4 = " + (x * 4))
} // for
println ("\n\tTest MatrixC on additional operations")
val z = new MatrixC ((2, 2), 1, 2,
3, 2)
val t = new MatrixC ((3, 3), 1, 2, 3,
4, 3, 2,
1, 3, 1)
val zz = new MatrixC ((3, 3), 3, 1, 0,
1, 4, 2,
0, 2, 5)
val bz = VectorC (5, 3, 6)
val b = VectorC (8, 7)
val lu = z.lud
val lu2 = z.lud_npp
println ("z = " + z)
println ("z.t = " + z.t)
println ("z.lud = " + lu)
println ("z.lud_npp = " + lu2)
println ("z.solve = " + z.solve (lu._1, lu._2, b))
println ("zz.solve = " + zz.solve (zz.lud, bz))
println ("z.inverse = " + z.inverse)
println ("z.inverse_ip = " + z.inverse_ip)
println ("t.inverse = " + t.inverse)
println ("t.inverse_ip = " + t.inverse_ip)
println ("z.inv * b = " + z.inverse * b)
println ("z.det = " + z.det)
println ("z = " + z)
z *= z // in-place matrix multiplication
println ("z squared = " + z)
val w = new MatrixC ((2, 3), 2, 3, 5,
-4, 2, 3)
val v = new MatrixC ((3, 2), 2, -4,
3, 2,
5, 3)
println ("w = " + w)
println ("v = " + v)
println ("w.reduce = " + w.reduce)
println ("right: w.nullspace = " + w.nullspace)
println ("check right nullspace = " + w * w.nullspace)
println ("left: v.t.nullspace = " + v.t.nullspace)
println ("check left nullspace = " + MatrixC.times (v.t.nullspace, v))
for (row <- z) println ("row = " + row.deep)
val aa = new MatrixC ((3, 2), 1, 2,
3, 4,
5, 6)
val bb = new MatrixC ((2, 2), 1, 2,
3, 4)
println ("aa = " + aa)
println ("bb = " + bb)
println ("aa * bb = " + aa * bb)
aa *= bb
println ("aa *= bb = " + aa)
println ("aa dot bz = " + (aa dot bz))
println ("aa.t * bz = " + aa.t * bz)
val filename = getDataPath + "bb_matrix.csv"
bb.write (filename)
println ("bb_csv = " + MatrixC (filename))
} // MatrixCTest object
| scalation/fda | scalation_1.2/src/main/scala/scalation/linalgebra/mem_mapped/MatrixC.scala | Scala | mit | 64,452 |
/*******************************************************************************
Copyright (c) 2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMEvent
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
object EventListener extends DOM {
private val name = "EventListener"
/* predefined locatoins */
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("addEventListener", AbsBuiltinFunc("EventListener.handleEvent", 1))
)
/* initial property list */
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_proto, prop_proto)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
//case "EventListener.handleEvent"
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
//case "EventListener.handleEvent"
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
//case "EventListener.handleEvent"
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
//case "EventListener.handleEvent"
)
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMEvent/EventListener.scala | Scala | bsd-3-clause | 1,907 |
package mesosphere.marathon
package state
import mesosphere.marathon.core.readiness.ReadinessCheck
import mesosphere.marathon.stream.Implicits._
import scala.concurrent.duration._
object ReadinessCheckSerializer {
def fromProto(proto: Protos.ReadinessCheckDefinition): ReadinessCheck = {
def opt[T](
hasValue: Protos.ReadinessCheckDefinition => Boolean,
getValue: Protos.ReadinessCheckDefinition => T): Option[T] = {
if (hasValue(proto)) Some(getValue(proto))
else None
}
ReadinessCheck(
name = opt(_.hasName, _.getName).getOrElse(ReadinessCheck.DefaultName),
protocol =
opt(_.hasProtocol, _.getProtocol).map(ProtocolSerializer.fromProto).getOrElse(ReadinessCheck.DefaultProtocol),
path = opt(_.hasPath, _.getPath).getOrElse(ReadinessCheck.DefaultPath),
portName = opt(_.hasPortName, _.getPortName).getOrElse(ReadinessCheck.DefaultPortName),
interval = opt(_.hasIntervalMillis, _.getIntervalMillis.millis).getOrElse(ReadinessCheck.DefaultInterval),
timeout = opt(_.hasTimeoutMillis, _.getTimeoutMillis.millis).getOrElse(ReadinessCheck.DefaultTimeout),
httpStatusCodesForReady =
opt(
_.getHttpStatusCodeForReadyCount > 0,
_.getHttpStatusCodeForReadyList.map(_.intValue()).to[Set]
).getOrElse(ReadinessCheck.DefaultHttpStatusCodesForReady),
preserveLastResponse =
opt(_.hasPreserveLastResponse, _.getPreserveLastResponse).getOrElse(ReadinessCheck.DefaultPreserveLastResponse)
)
}
def toProto(check: ReadinessCheck): Protos.ReadinessCheckDefinition = {
Protos.ReadinessCheckDefinition.newBuilder()
.setName(check.name)
.setProtocol(ProtocolSerializer.toProto(check.protocol))
.setPath(check.path)
.setPortName(check.portName)
.setIntervalMillis(check.interval.toMillis)
.setTimeoutMillis(check.timeout.toMillis)
.addAllHttpStatusCodeForReady(check.httpStatusCodesForReady.map(java.lang.Integer.valueOf).asJava)
.setPreserveLastResponse(check.preserveLastResponse)
.build()
}
object ProtocolSerializer {
def fromProto(proto: Protos.ReadinessCheckDefinition.Protocol): ReadinessCheck.Protocol = {
proto match {
case Protos.ReadinessCheckDefinition.Protocol.HTTP => ReadinessCheck.Protocol.HTTP
case Protos.ReadinessCheckDefinition.Protocol.HTTPS => ReadinessCheck.Protocol.HTTPS
}
}
def toProto(protocol: ReadinessCheck.Protocol): Protos.ReadinessCheckDefinition.Protocol = {
protocol match {
case ReadinessCheck.Protocol.HTTP => Protos.ReadinessCheckDefinition.Protocol.HTTP
case ReadinessCheck.Protocol.HTTPS => Protos.ReadinessCheckDefinition.Protocol.HTTPS
}
}
}
}
| guenter/marathon | src/main/scala/mesosphere/marathon/state/ReadinessCheckSerializer.scala | Scala | apache-2.0 | 2,753 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.termstructures
import org.quantintel.ql.time.daycounters.DayCounter
import org.quantintel.ql.time.{Calendar, Date}
import org.quantintel.ql.util.{Observability, Observer}
/**
* @author Paul Bernard
*/
abstract class TermStructure extends Observer with Observability {
def maxDate() : Date
def calendar() : Calendar
def settlementDays() : Int
def timeFromReference(date: Date) : Double
def dayCounter() : DayCounter
def maxTime() : Double
def referenceDate() : Date
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/termstructures/TermStructure.scala | Scala | apache-2.0 | 1,209 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.jobs.accumulo.mapreduce
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.hadoop.mapreduce.AccumuloFileOutputFormat
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{Text, Writable}
import org.apache.hadoop.mapreduce.lib.output.{LazyOutputFormat, MultipleOutputs}
import org.apache.hadoop.mapreduce.{Counter, Job, Mapper, Reducer}
import org.geotools.data.DataStoreFinder
import org.locationtech.geomesa.accumulo.data.AccumuloIndexAdapter.VisibilityCache
import org.locationtech.geomesa.accumulo.data.{AccumuloDataStore, AccumuloWritableFeature}
import org.locationtech.geomesa.index.api.WritableFeature.FeatureWrapper
import org.locationtech.geomesa.index.api._
import org.locationtech.geomesa.index.conf.partition.TablePartition
import org.locationtech.geomesa.jobs.GeoMesaConfigurator
import org.locationtech.geomesa.jobs.mapreduce.GeoMesaOutputFormat.OutputCounters
import org.locationtech.geomesa.utils.index.IndexMode
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.util.control.NonFatal
/**
* Output format for writing RFiles directly to hdfs instead of using batch writers
*/
object GeoMesaAccumuloFileOutputFormat extends LazyLogging {
import scala.collection.JavaConverters._
val FilesPath = "files"
val SplitsPath = "splits"
/**
* Sets mapper class, reducer class, output format and associated options
*
* @param job job
* @param ds data store for output data
* @param params data store parameters for output data
* @param sft feature type to write (schema must exist already)
* @param output output path for rFiles
* @param index optional index to write
* @param partitions if writing to a partitioned store, the partitions being written to
*/
def configure(
job: Job,
ds: AccumuloDataStore,
params: Map[String, String],
sft: SimpleFeatureType,
output: Path,
index: Option[String],
partitions: Option[Seq[String]]): Unit = {
val indices = index match {
case None => ds.manager.indices(sft, IndexMode.Write)
case Some(i) => Seq(ds.manager.index(sft, i, IndexMode.Write))
}
val tables = partitions match {
case None => indices.flatMap(_.getTableNames(None))
case Some(parts) =>
Configurator.setPartitions(job.getConfiguration, parts)
logger.debug(s"Creating index tables for ${parts.length} partitions")
parts.flatMap { p =>
// create the partitions up front so we know the number of splits and reducers - this call is idempotent
indices.par.foreach(index => ds.adapter.createTable(index, Some(p), index.getSplits(Some(p))))
indices.flatMap(_.getTableNames(Some(p)))
}
}
if (tables.isEmpty) {
throw new IllegalArgumentException("No tables found for output")
}
GeoMesaConfigurator.setDataStoreOutParams(job.getConfiguration, params)
GeoMesaConfigurator.setIndicesOut(job.getConfiguration, indices.map(_.identifier))
GeoMesaConfigurator.setSerialization(job.getConfiguration, sft)
Configurator.setTypeName(job.getConfiguration, sft.getTypeName)
// using LazyOutputFormat prevents creating empty output files for regions with no data
LazyOutputFormat.setOutputFormatClass(job, classOf[AccumuloFileOutputFormat])
// note: this is equivalent to FileOutputFormat.setOutputPath(job, output)
AccumuloFileOutputFormat.configure.outputPath(new Path(output, FilesPath)).store(job)
job.setPartitionerClass(classOf[TableRangePartitioner])
TableRangePartitioner.setSplitsPath(job.getConfiguration, new Path(output, SplitsPath).toString)
var numReducers = 0
tables.foreach { table =>
val splits = ds.connector.tableOperations.listSplits(table).asScala
TableRangePartitioner.setTableOffset(job.getConfiguration, table, numReducers)
TableRangePartitioner.setTableSplits(job, table, splits)
numReducers += (splits.size + 1) // add one for the region before the first split point
}
job.setMapperClass(classOf[AccumuloFileMapper])
job.setMapOutputKeyClass(classOf[TableAndKey])
job.setMapOutputValueClass(classOf[Value])
job.setReducerClass(classOf[AccumuloFileReducer])
job.setOutputKeyClass(classOf[Key])
job.setOutputValueClass(classOf[Value])
job.setNumReduceTasks(numReducers)
}
class AccumuloFileMapper extends Mapper[Writable, SimpleFeature, TableAndKey, Value] with LazyLogging {
type MapContext = Mapper[Writable, SimpleFeature, TableAndKey, Value]#Context
private var ds: AccumuloDataStore = _
private var sft: SimpleFeatureType = _
private var wrapper: FeatureWrapper[WritableFeature] = _
private var partitioner: Option[TablePartition] = _
private var writers: Seq[(GeoMesaFeatureIndex[_, _], WriteConverter[_])] = _
private val visCache = new VisibilityCache()
private val tableAndKey = new TableAndKey(new Text(), null)
private var features: Counter = _
private var entries: Counter = _
private var failed: Counter = _
override def setup(context: MapContext): Unit = {
val params = GeoMesaConfigurator.getDataStoreOutParams(context.getConfiguration).asJava
ds = DataStoreFinder.getDataStore(params).asInstanceOf[AccumuloDataStore]
require(ds != null, "Could not find data store - check your configuration and hbase-site.xml")
sft = ds.getSchema(Configurator.getTypeName(context.getConfiguration))
require(sft != null, "Could not find schema - check your configuration")
val indexIds = GeoMesaConfigurator.getIndicesOut(context.getConfiguration).orNull
require(indexIds != null, "Indices to write was not set in the job configuration")
val indices = indexIds.map(ds.manager.index(sft, _, IndexMode.Write))
wrapper = AccumuloWritableFeature.wrapper(sft, ds.adapter.groups, indices)
partitioner = TablePartition(ds, sft)
writers = indices.map(i => (i, i.createConverter()))
features = context.getCounter(OutputCounters.Group, OutputCounters.Written)
entries = context.getCounter(OutputCounters.Group, "entries")
failed = context.getCounter(OutputCounters.Group, OutputCounters.Failed)
}
override def cleanup(context: MapContext): Unit = if (ds != null) { ds.dispose() }
override def map(key: Writable, value: SimpleFeature, context: MapContext): Unit = {
try {
val feature = wrapper.wrap(value)
val partition = partitioner.map(_.partition(value))
writers.foreach { case (index, writer) =>
index.getTableNames(partition) match {
case Seq(table) => tableAndKey.getTable.set(table)
case tables =>
val msg = if (tables.isEmpty) { "No table found" } else { "Multiple tables found" }
throw new IllegalStateException(msg + partition.map(p => s" for partition $p").getOrElse(""))
}
writer.convert(feature) match {
case kv: SingleRowKeyValue[_] =>
kv.values.foreach { value =>
tableAndKey.setKey(new Key(kv.row, value.cf, value.cq, visCache(value.vis), Long.MaxValue))
context.write(tableAndKey, new Value(value.value))
entries.increment(1L)
}
case mkv: MultiRowKeyValue[_] =>
mkv.rows.foreach { row =>
mkv.values.foreach { value =>
tableAndKey.setKey(new Key(row, value.cf, value.cq, visCache(value.vis), Long.MaxValue))
context.write(tableAndKey, new Value(value.value))
entries.increment(1L)
}
}
}
}
features.increment(1L)
} catch {
case NonFatal(e) =>
logger.error(s"Error writing feature ${Option(value).orNull}", e)
failed.increment(1L)
}
}
}
class AccumuloFileReducer extends Reducer[TableAndKey, Value, Key, Value] {
type ReducerContext = Reducer[TableAndKey, Value, Key, Value]#Context
private var id: String = _
private var out: MultipleOutputs[Key, Value] = _
override def setup(context: ReducerContext): Unit = {
id = context.getJobID.appendTo(new java.lang.StringBuilder("gm")).toString
out = new MultipleOutputs(context)
}
override def cleanup(context: ReducerContext): Unit = if (out != null) { out.close() }
override def reduce(key: TableAndKey, values: java.lang.Iterable[Value], context: ReducerContext): Unit = {
val path = s"${key.getTable}/$id"
val iter = values.iterator()
while (iter.hasNext) {
out.write(key.getKey, iter.next, path)
}
}
}
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-jobs/src/main/scala/org/locationtech/geomesa/jobs/accumulo/mapreduce/GeoMesaAccumuloFileOutputFormat.scala | Scala | apache-2.0 | 9,240 |
package com.theseventhsense.collections
import com.theseventhsense.utils.collections.spark._
import com.theseventhsense.utils.collections.{BulkCollection, KVBulkCollection}
import com.theseventhsense.utils.collections.stdlib._
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.scalatest.{MustMatchers, WordSpec}
/**
* Created by erik on 12/8/16.
*/
trait BulkCollectionSpec extends WordSpec with MustMatchers {
val input = Seq("b", "a")
val result = Seq("a", "b")
def inputCollection: BulkCollection[String]
"utils-collectinos" should {
"construct and collect a collection" in {
inputCollection.collect mustEqual input
}
lazy val withKey = inputCollection.mapWithKey(x β x.hashCode)
"map keys from a collection" in {
withKey mustBe a[KVBulkCollection[Int, String]]
}
lazy val inputSorted: KVBulkCollection[Int, String] = withKey.sorted
"sort a kv collection by key and get its value" in {
inputSorted.values.collect mustEqual result
}
lazy val amapped: KVBulkCollection[Int, String] = inputSorted.mapKV { case (k, v) β (k, "a" + v) }
"map over a collection's keys and values" in {
amapped.values.collect mustEqual List("aa", "ab")
}
lazy val bmapped: KVBulkCollection[Int, String] = inputSorted.mapValues("b" + _)
"map over a collection's values" in {
bmapped.values.collect mustEqual List("ba", "bb")
}
"inner join two collections sharing a key" in {
lazy val innerJoined: KVBulkCollection[Int, (String, String)] = inputSorted.innerJoin(amapped)
innerJoined.collect mustEqual Seq((97, ("a", "aa")), (98, ("b", "ab")))
}
"left join two collections sharing a key" in {
lazy val leftJoined: KVBulkCollection[Int, (String, Option[String])] = inputSorted.leftOuterJoin(amapped)
leftJoined.collect mustEqual Seq((97, ("a", Some("aa"))), (98, ("b", Some("ab"))))
}
lazy val unioned = amapped.unionKV(bmapped)
"union two identical collections" in {
unioned.collect mustEqual Seq((97, "aa"), (98, "ab"), (97, "ba"), (98, "bb"))
}
"fold a collection by key" in {
lazy val folded = unioned.foldByKey(0)((acc, item) β acc + 1, (i1, i2) β 2)
folded.values.collect mustEqual Seq(2, 2)
}
}
}
class StdLibBulkCollectionSpec extends BulkCollectionSpec {
override def inputCollection = StdLibBulkCollection(input)
}
class SparkBulkCollectionSpec extends BulkCollectionSpec {
private lazy val localConf = new SparkConf().setMaster("local[*]")
private implicit lazy val spark = SparkSession.builder.config(localConf).getOrCreate()
override def inputCollection = SparkBulkCollection(spark.sparkContext.parallelize(input))
}
| 7thsense/utils-collections | src/test/scala/com/theseventhsense/collections/BulkCollectionSpec.scala | Scala | mit | 2,773 |
package lila.hub
import scala.concurrent.duration._
import actorApi.map._
import akka.actor._
import akka.pattern.{ ask, pipe }
import makeTimeout.short
trait ActorMap extends Actor {
private val actors = scala.collection.mutable.Map.empty[String, ActorRef]
def mkActor(id: String): Actor
def actorMapReceive: Receive = {
case Get(id) => sender ! getOrMake(id)
case Tell(id, msg) => getOrMake(id) forward msg
case TellAll(msg) => actors.values foreach (_ forward msg)
case TellIds(ids, msg) => ids foreach { id =>
actors get id foreach (_ forward msg)
}
case Ask(id, msg) => getOrMake(id) forward msg
case Terminated(actor) =>
context unwatch actor
actors foreach {
case (id, a) => if (a == actor) actors -= id
}
}
protected def size = actors.size
private def getOrMake(id: String) = actors get id getOrElse {
context.actorOf(Props(mkActor(id)), name = id) ~ { actor =>
actors += (id -> actor)
context watch actor
}
}
}
object ActorMap {
def apply(make: String => Actor) = new ActorMap {
def mkActor(id: String) = make(id)
def receive = actorMapReceive
}
}
| Happy0/lila | modules/hub/src/main/ActorMap.scala | Scala | mit | 1,186 |
package com.github.blemale.scaffeine
import org.scalatest._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class LoadingCacheSpec extends AnyWordSpec with Matchers with OptionValues {
"LoadingCache" should {
"be a cache" in {
val cache = Scaffeine().build[String, String]((_: String) => "computed")
cache shouldBe a[Cache[_, _]]
}
"get or load value" in {
val cache = Scaffeine().build[String, String]((_: String) => "computed")
cache.put("foo", "present")
val fooValue = cache.get("foo")
val barValue = cache.get("bar")
fooValue should be("present")
barValue should be("computed")
}
"get or load all given values" in {
val cache = Scaffeine().build[String, String]((_: String) => "computed")
cache.put("foo", "present")
val values = cache.getAll(List("foo", "bar"))
values should contain only ("foo" -> "present", "bar" -> "computed")
}
"get or bulk load all given values" in {
val cache =
Scaffeine()
.build[String, String](
loader = (_: String) => "computed",
allLoader =
Some((keys: Iterable[String]) => keys.map(_ -> "bulked").toMap)
)
cache.put("foo", "present")
val values = cache.getAll(List("foo", "bar"))
values should contain only ("foo" -> "present", "bar" -> "bulked")
}
"refresh value with loader when no refresh loader provided" in {
val cache =
Scaffeine()
.executor(DirectExecutor)
.build[String, String]((_: String) => "computed")
cache.put("foo", "present")
cache.refresh("foo")
val fooValue = cache.get("foo")
fooValue should be("computed")
}
"refresh value wih refresh loader when provided" in {
val cache =
Scaffeine()
.executor(DirectExecutor)
.build[String, String](
loader = (_: String) => "computed",
reloadLoader = Some((_: String, _: String) => "reload")
)
cache.put("foo", "present")
cache.refresh("foo")
val fooValue = cache.get("foo")
fooValue should be("reload")
}
}
}
| blemale/scaffeine | src/test/scala/com/github/blemale/scaffeine/LoadingCacheSpec.scala | Scala | apache-2.0 | 2,226 |
package interretis.utils
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
object SparkContextBuilder {
def buildContext(appName: String): SparkContext = {
val config = new SparkConf
config setAppName appName
new SparkContext(config)
}
def buildSqlContext(appName: String): SQLContext = {
val context = buildContext(appName)
new SQLContext(context)
}
}
| MarekDudek/spark-certification | src/main/scala/interretis/utils/SparkContextBuilder.scala | Scala | mit | 449 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogdebugger.ui.components
import scala.swing._
import javax.swing.JPopupMenu
import javax.swing.event.PopupMenuListener
/*
* Created with IntelliJ IDEA.
* User: gonztobi
* Date: 2/4/13
* Time: 3:46 PM
*
* An attempt to build a custom drop-down menu button. Swing already has
* dropdown menus, but they can only be added to a JMenuBar, which must be
* installed at the top of the window/frame. I'd rather make use of space on
* our existing toolbars, and allow for custom menu items to be installed.
*/
/** A button that, when clicked, pops up a menu anchored at this button (as
* opposed to the mouse's current position).
*
* Add scala.swing.Menu Menus and scala.swing.MenuItem MenuItems to
* it just like you would an ordinary menu. */
class PopupButton(label: String) extends Button(label) {
val menu = new PopupMenu
action = Action(label) {
menu.show(PopupButton.this, 0, PopupButton.this.bounds.height)
}
def contents = menu.contents
}
// The below classes were submitted for integration with the Scala Swing
// library, but hadn't been accepted by the time 2.9.2 was released. They'll
// probably show up in 2.10.
// Bizarrely, PopupMenu was part of 2.10.1-RC1, but not the final 2.10.1
object PopupMenu {
private[PopupMenu] trait JPopupMenuMixin { def popupMenuWrapper: PopupMenu }
}
/**
* A popup menu.
*
* Example usage:
*
* {{{
* val popupMenu = new PopupMenu {
* contents += new Menu("menu 1") {
* contents += new RadioMenuItem("radio 1.1")
* contents += new RadioMenuItem("radio 1.2")
* }
* contents += new Menu("menu 2") {
* contents += new RadioMenuItem("radio 2.1")
* contents += new RadioMenuItem("radio 2.2")
* }
* }
* val button = new Button("Show Popup Menu")
* reactions += {
* case e: ButtonClicked => popupMenu.show(button, 0, button.bounds.height)
* }
* listenTo(button)
* }}}
*
* @see javax.swing.JPopupMenu
*/
class PopupMenu extends Component with SequentialContainer.Wrapper with Publisher {
override lazy val peer: JPopupMenu = new JPopupMenu with PopupMenu.JPopupMenuMixin with SuperMixin {
def popupMenuWrapper = PopupMenu.this
}
peer.addPopupMenuListener(new PopupMenuListener {
def popupMenuWillBecomeVisible(e: javax.swing.event.PopupMenuEvent) {
publish(PopupMenuWillBecomeVisible(PopupMenu.this))
}
def popupMenuWillBecomeInvisible(e: javax.swing.event.PopupMenuEvent) {
publish(PopupMenuWillBecomeInvisible(PopupMenu.this))
}
def popupMenuCanceled(e: javax.swing.event.PopupMenuEvent) {
publish(PopupMenuCanceled(PopupMenu.this))
}
})
def show(invoker: Component, x: Int, y: Int) { peer.show(invoker.peer, x, y) }
def margin: Insets = peer.getMargin
def label: String = peer.getLabel
def label_=(s: String) { peer.setLabel(s) }
}
abstract class PopupMenuEvent extends scala.swing.event.ComponentEvent
case class PopupMenuWillBecomeVisible(source: PopupMenu) extends PopupMenuEvent
case class PopupMenuWillBecomeInvisible(source: PopupMenu) extends PopupMenuEvent
case class PopupMenuCanceled(source: PopupMenu) extends PopupMenuEvent
| hpe-cct/cct-core | src/main/scala/cogdebugger/ui/components/PopupButton.scala | Scala | apache-2.0 | 3,764 |
package temportalist.compression.main.client
import net.minecraft.client.gui.GuiScreen
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
import temportalist.compression.main.common.Compression
import temportalist.origin.foundation.client.gui.GuiConfigBase
/**
*
* Created by TheTemportalist on 4/14/2016.
*
* @author TheTemportalist
*/
@SideOnly(Side.CLIENT)
class GuiConfig(guiScreen: GuiScreen) extends GuiConfigBase(guiScreen, Compression) {}
| TheTemportalist/Compression | src/main/scala/temportalist/compression/main/client/GuiConfig.scala | Scala | apache-2.0 | 466 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal
import java.util.concurrent.TimeUnit
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.metrics.GarbageCollectionMetrics
import org.apache.spark.network.shuffle.Constants
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.{EventLoggingListener, SchedulingMode}
import org.apache.spark.shuffle.sort.io.LocalDiskShuffleDataIO
import org.apache.spark.storage.{DefaultTopologyMapper, RandomBlockReplicationPolicy}
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.unsafe.sort.UnsafeSorterSpillReader.MAX_BUFFER_SIZE_BYTES
package object config {
private[spark] val SPARK_DRIVER_PREFIX = "spark.driver"
private[spark] val SPARK_EXECUTOR_PREFIX = "spark.executor"
private[spark] val SPARK_TASK_PREFIX = "spark.task"
private[spark] val SPARK_RESOURCES_COORDINATE =
ConfigBuilder("spark.resources.coordinate.enable")
.doc("Whether to coordinate resources automatically among workers/drivers(client only) " +
"in Standalone. If false, the user is responsible for configuring different resources " +
"for workers/drivers that run on the same host.")
.booleanConf
.createWithDefault(true)
private[spark] val SPARK_RESOURCES_DIR =
ConfigBuilder("spark.resources.dir")
.doc("Directory used to coordinate resources among workers/drivers(client only) in " +
"Standalone. Default is SPARK_HOME. Make sure to use the same directory for worker " +
"and drivers in client mode that run on the same host. Don't clean up this directory " +
"while workers/drivers are still alive to avoid the most likely resources conflict. ")
.stringConf
.createOptional
private[spark] val DRIVER_RESOURCES_FILE =
ConfigBuilder("spark.driver.resourcesFile")
.internal()
.doc("Path to a file containing the resources allocated to the driver. " +
"The file should be formatted as a JSON array of ResourceAllocation objects. " +
"Only used internally in standalone mode.")
.stringConf
.createOptional
private[spark] val DRIVER_CLASS_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val DRIVER_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS)
.withPrepended(SparkLauncher.DRIVER_DEFAULT_JAVA_OPTIONS)
.stringConf
.createOptional
private[spark] val DRIVER_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val DRIVER_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.driver.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val DRIVER_CORES = ConfigBuilder("spark.driver.cores")
.doc("Number of cores to use for the driver process, only in cluster mode.")
.intConf
.createWithDefault(1)
private[spark] val DRIVER_MEMORY = ConfigBuilder(SparkLauncher.DRIVER_MEMORY)
.doc("Amount of memory to use for the driver process, in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.driver.memoryOverhead")
.doc("The amount of non-heap memory to be allocated per driver in cluster mode, " +
"in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val DRIVER_LOG_DFS_DIR =
ConfigBuilder("spark.driver.log.dfsDir").stringConf.createOptional
private[spark] val DRIVER_LOG_LAYOUT =
ConfigBuilder("spark.driver.log.layout")
.stringConf
.createOptional
private[spark] val DRIVER_LOG_PERSISTTODFS =
ConfigBuilder("spark.driver.log.persistToDfs.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val DRIVER_LOG_ALLOW_EC =
ConfigBuilder("spark.driver.log.allowErasureCoding")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ENABLED = ConfigBuilder("spark.eventLog.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_DIR = ConfigBuilder("spark.eventLog.dir")
.stringConf
.createWithDefault(EventLoggingListener.DEFAULT_LOG_DIR)
private[spark] val EVENT_LOG_COMPRESS =
ConfigBuilder("spark.eventLog.compress")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_BLOCK_UPDATES =
ConfigBuilder("spark.eventLog.logBlockUpdates.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ALLOW_EC =
ConfigBuilder("spark.eventLog.allowErasureCoding")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_TESTING =
ConfigBuilder("spark.eventLog.testing")
.internal()
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_OUTPUT_BUFFER_SIZE = ConfigBuilder("spark.eventLog.buffer.kb")
.doc("Buffer size to use when writing to output streams, in KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("100k")
private[spark] val EVENT_LOG_STAGE_EXECUTOR_METRICS =
ConfigBuilder("spark.eventLog.logStageExecutorMetrics.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_PROCESS_TREE_METRICS =
ConfigBuilder("spark.eventLog.logStageExecutorProcessTreeMetrics.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_GC_METRICS_YOUNG_GENERATION_GARBAGE_COLLECTORS =
ConfigBuilder("spark.eventLog.gcMetrics.youngGenerationGarbageCollectors")
.doc("Names of supported young generation garbage collector. A name usually is " +
" the return of GarbageCollectorMXBean.getName. The built-in young generation garbage " +
s"collectors are ${GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
.stringConf
.toSequence
.createWithDefault(GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
private[spark] val EVENT_LOG_GC_METRICS_OLD_GENERATION_GARBAGE_COLLECTORS =
ConfigBuilder("spark.eventLog.gcMetrics.oldGenerationGarbageCollectors")
.doc("Names of supported old generation garbage collector. A name usually is " +
"the return of GarbageCollectorMXBean.getName. The built-in old generation garbage " +
s"collectors are ${GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
.stringConf
.toSequence
.createWithDefault(GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
private[spark] val EVENT_LOG_OVERWRITE =
ConfigBuilder("spark.eventLog.overwrite").booleanConf.createWithDefault(false)
private[spark] val EVENT_LOG_CALLSITE_LONG_FORM =
ConfigBuilder("spark.eventLog.longForm.enabled").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_ID =
ConfigBuilder("spark.executor.id").stringConf.createOptional
private[spark] val EXECUTOR_CLASS_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val EXECUTOR_HEARTBEAT_DROP_ZERO_ACCUMULATOR_UPDATES =
ConfigBuilder("spark.executor.heartbeat.dropZeroAccumulatorUpdates")
.internal()
.booleanConf
.createWithDefault(true)
private[spark] val EXECUTOR_HEARTBEAT_INTERVAL =
ConfigBuilder("spark.executor.heartbeatInterval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10s")
private[spark] val EXECUTOR_HEARTBEAT_MAX_FAILURES =
ConfigBuilder("spark.executor.heartbeat.maxFailures").internal().intConf.createWithDefault(60)
private[spark] val EXECUTOR_METRICS_POLLING_INTERVAL =
ConfigBuilder("spark.executor.metrics.pollingInterval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("0")
private[spark] val EXECUTOR_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS)
.withPrepended(SparkLauncher.EXECUTOR_DEFAULT_JAVA_OPTIONS)
.stringConf
.createOptional
private[spark] val EXECUTOR_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val EXECUTOR_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.executor.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_CORES = ConfigBuilder(SparkLauncher.EXECUTOR_CORES)
.intConf
.createWithDefault(1)
private[spark] val EXECUTOR_MEMORY = ConfigBuilder(SparkLauncher.EXECUTOR_MEMORY)
.doc("Amount of memory to use per executor process, in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead")
.doc("The amount of non-heap memory to be allocated per executor in cluster mode, " +
"in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val CORES_MAX = ConfigBuilder("spark.cores.max")
.doc("When running on a standalone deploy cluster or a Mesos cluster in coarse-grained " +
"sharing mode, the maximum amount of CPU cores to request for the application from across " +
"the cluster (not from each machine). If not set, the default will be " +
"`spark.deploy.defaultCores` on Spark's standalone cluster manager, or infinite " +
"(all available cores) on Mesos.")
.intConf
.createOptional
private[spark] val MEMORY_OFFHEAP_ENABLED = ConfigBuilder("spark.memory.offHeap.enabled")
.doc("If true, Spark will attempt to use off-heap memory for certain operations. " +
"If off-heap memory use is enabled, then spark.memory.offHeap.size must be positive.")
.withAlternative("spark.unsafe.offHeap")
.booleanConf
.createWithDefault(false)
private[spark] val MEMORY_OFFHEAP_SIZE = ConfigBuilder("spark.memory.offHeap.size")
.doc("The absolute amount of memory which can be used for off-heap allocation, " +
" in bytes unless otherwise specified. " +
"This setting has no impact on heap memory usage, so if your executors' total memory " +
"consumption must fit within some hard limit then be sure to shrink your JVM heap size " +
"accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ >= 0, "The off-heap memory size must not be negative")
.createWithDefault(0)
private[spark] val MEMORY_STORAGE_FRACTION = ConfigBuilder("spark.memory.storageFraction")
.doc("Amount of storage memory immune to eviction, expressed as a fraction of the " +
"size of the region set aside by spark.memory.fraction. The higher this is, the " +
"less working memory may be available to execution and tasks may spill to disk more " +
"often. Leaving this at the default value is recommended. ")
.doubleConf
.checkValue(v => v >= 0.0 && v < 1.0, "Storage fraction must be in [0,1)")
.createWithDefault(0.5)
private[spark] val MEMORY_FRACTION = ConfigBuilder("spark.memory.fraction")
.doc("Fraction of (heap space - 300MB) used for execution and storage. The " +
"lower this is, the more frequently spills and cached data eviction occur. " +
"The purpose of this config is to set aside memory for internal metadata, " +
"user data structures, and imprecise size estimation in the case of sparse, " +
"unusually large records. Leaving this at the default value is recommended. ")
.doubleConf
.createWithDefault(0.6)
private[spark] val STORAGE_SAFETY_FRACTION = ConfigBuilder("spark.storage.safetyFraction")
.doubleConf
.createWithDefault(0.9)
private[spark] val STORAGE_UNROLL_MEMORY_THRESHOLD =
ConfigBuilder("spark.storage.unrollMemoryThreshold")
.doc("Initial memory to request before unrolling any block")
.longConf
.createWithDefault(1024 * 1024)
private[spark] val STORAGE_REPLICATION_PROACTIVE =
ConfigBuilder("spark.storage.replication.proactive")
.doc("Enables proactive block replication for RDD blocks. " +
"Cached RDD block replicas lost due to executor failures are replenished " +
"if there are any existing available replicas. This tries to " +
"get the replication level of the block to the initial number")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_MEMORY_MAP_THRESHOLD =
ConfigBuilder("spark.storage.memoryMapThreshold")
.doc("Size in bytes of a block above which Spark memory maps when " +
"reading a block from disk. " +
"This prevents Spark from memory mapping very small blocks. " +
"In general, memory mapping has high overhead for blocks close to or below " +
"the page size of the operating system.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("2m")
private[spark] val STORAGE_REPLICATION_POLICY =
ConfigBuilder("spark.storage.replication.policy")
.stringConf
.createWithDefaultString(classOf[RandomBlockReplicationPolicy].getName)
private[spark] val STORAGE_REPLICATION_TOPOLOGY_MAPPER =
ConfigBuilder("spark.storage.replication.topologyMapper")
.stringConf
.createWithDefaultString(classOf[DefaultTopologyMapper].getName)
private[spark] val STORAGE_CACHED_PEERS_TTL = ConfigBuilder("spark.storage.cachedPeersTtl")
.intConf.createWithDefault(60 * 1000)
private[spark] val STORAGE_MAX_REPLICATION_FAILURE =
ConfigBuilder("spark.storage.maxReplicationFailures")
.intConf.createWithDefault(1)
private[spark] val STORAGE_REPLICATION_TOPOLOGY_FILE =
ConfigBuilder("spark.storage.replication.topologyFile").stringConf.createOptional
private[spark] val STORAGE_EXCEPTION_PIN_LEAK =
ConfigBuilder("spark.storage.exceptionOnPinLeak")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_BLOCKMANAGER_TIMEOUTINTERVAL =
ConfigBuilder("spark.storage.blockManagerTimeoutIntervalMs")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("60s")
private[spark] val STORAGE_BLOCKMANAGER_SLAVE_TIMEOUT =
ConfigBuilder("spark.storage.blockManagerSlaveTimeoutMs")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString(Network.NETWORK_TIMEOUT.defaultValueString)
private[spark] val STORAGE_CLEANUP_FILES_AFTER_EXECUTOR_EXIT =
ConfigBuilder("spark.storage.cleanupFilesAfterExecutorExit")
.doc("Whether or not cleanup the files not served by the external shuffle service " +
"on executor exits.")
.booleanConf
.createWithDefault(true)
private[spark] val DISKSTORE_SUB_DIRECTORIES =
ConfigBuilder("spark.diskStore.subDirectories")
.doc("Number of subdirectories inside each path listed in spark.local.dir for " +
"hashing Block files into.")
.intConf
.checkValue(_ > 0, "The number of subdirectories must be positive.")
.createWithDefault(64)
private[spark] val BLOCK_FAILURES_BEFORE_LOCATION_REFRESH =
ConfigBuilder("spark.block.failures.beforeLocationRefresh")
.doc("Max number of failures before this block manager refreshes " +
"the block locations from the driver.")
.intConf
.createWithDefault(5)
private[spark] val IS_PYTHON_APP = ConfigBuilder("spark.yarn.isPython").internal()
.booleanConf.createWithDefault(false)
private[spark] val CPUS_PER_TASK = ConfigBuilder("spark.task.cpus").intConf.createWithDefault(1)
private[spark] val DYN_ALLOCATION_ENABLED =
ConfigBuilder("spark.dynamicAllocation.enabled").booleanConf.createWithDefault(false)
private[spark] val DYN_ALLOCATION_TESTING =
ConfigBuilder("spark.dynamicAllocation.testing").booleanConf.createWithDefault(false)
private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.minExecutors").intConf.createWithDefault(0)
private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.initialExecutors")
.fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.maxExecutors").intConf.createWithDefault(Int.MaxValue)
private[spark] val DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO =
ConfigBuilder("spark.dynamicAllocation.executorAllocationRatio")
.doubleConf.createWithDefault(1.0)
private[spark] val DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.cachedExecutorIdleTimeout")
.timeConf(TimeUnit.SECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefault(Integer.MAX_VALUE)
private[spark] val DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.executorIdleTimeout")
.timeConf(TimeUnit.SECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefault(60)
private[spark] val DYN_ALLOCATION_SHUFFLE_TRACKING =
ConfigBuilder("spark.dynamicAllocation.shuffleTracking.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val DYN_ALLOCATION_SHUFFLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.shuffleTimeout")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefault(Long.MaxValue)
private[spark] val DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.schedulerBacklogTimeout")
.timeConf(TimeUnit.SECONDS).createWithDefault(1)
private[spark] val DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.sustainedSchedulerBacklogTimeout")
.fallbackConf(DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT)
private[spark] val LOCALITY_WAIT = ConfigBuilder("spark.locality.wait")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("3s")
private[spark] val SHUFFLE_SERVICE_ENABLED =
ConfigBuilder("spark.shuffle.service.enabled").booleanConf.createWithDefault(false)
private[spark] val SHUFFLE_SERVICE_FETCH_RDD_ENABLED =
ConfigBuilder(Constants.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)
.doc("Whether to use the ExternalShuffleService for fetching disk persisted RDD blocks. " +
"In case of dynamic allocation if this feature is enabled executors having only disk " +
"persisted blocks are considered idle after " +
"'spark.dynamicAllocation.executorIdleTimeout' and will be released accordingly.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_SERVICE_DB_ENABLED =
ConfigBuilder("spark.shuffle.service.db.enabled")
.doc("Whether to use db in ExternalShuffleService. Note that this only affects " +
"standalone mode.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SERVICE_PORT =
ConfigBuilder("spark.shuffle.service.port").intConf.createWithDefault(7337)
private[spark] val KEYTAB = ConfigBuilder("spark.kerberos.keytab")
.doc("Location of user's keytab.")
.stringConf.createOptional
private[spark] val PRINCIPAL = ConfigBuilder("spark.kerberos.principal")
.doc("Name of the Kerberos principal.")
.stringConf.createOptional
private[spark] val KERBEROS_RELOGIN_PERIOD = ConfigBuilder("spark.kerberos.relogin.period")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1m")
private[spark] val KERBEROS_RENEWAL_CREDENTIALS =
ConfigBuilder("spark.kerberos.renewal.credentials")
.doc(
"Which credentials to use when renewing delegation tokens for executors. Can be either " +
"'keytab', the default, which requires a keytab to be provided, or 'ccache', which uses " +
"the local credentials cache.")
.stringConf
.checkValues(Set("keytab", "ccache"))
.createWithDefault("keytab")
private[spark] val KERBEROS_FILESYSTEMS_TO_ACCESS =
ConfigBuilder("spark.kerberos.access.hadoopFileSystems")
.doc("Extra Hadoop filesystem URLs for which to request delegation tokens. The filesystem " +
"that hosts fs.defaultFS does not need to be listed here.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val EXECUTOR_INSTANCES = ConfigBuilder("spark.executor.instances")
.intConf
.createOptional
private[spark] val PY_FILES = ConfigBuilder("spark.yarn.dist.pyFiles")
.internal()
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val TASK_MAX_DIRECT_RESULT_SIZE =
ConfigBuilder("spark.task.maxDirectResultSize")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(1L << 20)
private[spark] val TASK_MAX_FAILURES =
ConfigBuilder("spark.task.maxFailures")
.intConf
.createWithDefault(4)
private[spark] val TASK_REAPER_ENABLED =
ConfigBuilder("spark.task.reaper.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val TASK_REAPER_KILL_TIMEOUT =
ConfigBuilder("spark.task.reaper.killTimeout")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(-1)
private[spark] val TASK_REAPER_POLLING_INTERVAL =
ConfigBuilder("spark.task.reaper.pollingInterval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10s")
private[spark] val TASK_REAPER_THREAD_DUMP =
ConfigBuilder("spark.task.reaper.threadDump")
.booleanConf
.createWithDefault(true)
// Blacklist confs
private[spark] val BLACKLIST_ENABLED =
ConfigBuilder("spark.blacklist.enabled")
.booleanConf
.createOptional
private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor")
.intConf
.createWithDefault(1)
private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC =
ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE =
ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val BLACKLIST_TIMEOUT_CONF =
ConfigBuilder("spark.blacklist.timeout")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_KILL_ENABLED =
ConfigBuilder("spark.blacklist.killBlacklistedExecutors")
.booleanConf
.createWithDefault(false)
private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF =
ConfigBuilder("spark.scheduler.executorTaskBlacklistTime")
.internal()
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_FETCH_FAILURE_ENABLED =
ConfigBuilder("spark.blacklist.application.fetchFailure.enabled")
.booleanConf
.createWithDefault(false)
// End blacklist confs
private[spark] val UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE =
ConfigBuilder("spark.files.fetchFailure.unRegisterOutputOnHost")
.doc("Whether to un-register all the outputs on the host in condition that we receive " +
" a FetchFailure. This is set default to false, which means, we only un-register the " +
" outputs related to the exact executor(instead of the host) on a FetchFailure.")
.booleanConf
.createWithDefault(false)
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
.createWithDefault(10000)
private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
ConfigBuilder("spark.scheduler.listenerbus.metrics.maxListenerClassesTimed")
.internal()
.intConf
.createWithDefault(128)
// This property sets the root namespace for metrics reporting
private[spark] val METRICS_NAMESPACE = ConfigBuilder("spark.metrics.namespace")
.stringConf
.createOptional
private[spark] val METRICS_CONF = ConfigBuilder("spark.metrics.conf")
.stringConf
.createOptional
private[spark] val PYSPARK_DRIVER_PYTHON = ConfigBuilder("spark.pyspark.driver.python")
.stringConf
.createOptional
private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python")
.stringConf
.createOptional
// To limit how many applications are shown in the History Server summary ui
private[spark] val HISTORY_UI_MAX_APPS =
ConfigBuilder("spark.history.ui.maxApplications").intConf.createWithDefault(Integer.MAX_VALUE)
private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM =
ConfigBuilder("spark.io.encryption.keygen.algorithm")
.stringConf
.createWithDefault("HmacSHA1")
private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits")
.intConf
.checkValues(Set(128, 192, 256))
.createWithDefault(128)
private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION =
ConfigBuilder("spark.io.crypto.cipher.transformation")
.internal()
.stringConf
.createWithDefaultString("AES/CTR/NoPadding")
private[spark] val DRIVER_HOST_ADDRESS = ConfigBuilder("spark.driver.host")
.doc("Address of driver endpoints.")
.stringConf
.createWithDefault(Utils.localCanonicalHostName())
private[spark] val DRIVER_PORT = ConfigBuilder("spark.driver.port")
.doc("Port of driver endpoints.")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_SUPERVISE = ConfigBuilder("spark.driver.supervise")
.doc("If true, restarts the driver automatically if it fails with a non-zero exit status. " +
"Only has effect in Spark standalone mode or Mesos cluster deploy mode.")
.booleanConf
.createWithDefault(false)
private[spark] val DRIVER_BIND_ADDRESS = ConfigBuilder("spark.driver.bindAddress")
.doc("Address where to bind network listen sockets on the driver.")
.fallbackConf(DRIVER_HOST_ADDRESS)
private[spark] val BLOCK_MANAGER_PORT = ConfigBuilder("spark.blockManager.port")
.doc("Port to use for the block manager when a more specific setting is not provided.")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_BLOCK_MANAGER_PORT = ConfigBuilder("spark.driver.blockManager.port")
.doc("Port to use for the block manager on the driver.")
.fallbackConf(BLOCK_MANAGER_PORT)
private[spark] val IGNORE_CORRUPT_FILES = ConfigBuilder("spark.files.ignoreCorruptFiles")
.doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " +
"encountering corrupted or non-existing files and contents that have been read will still " +
"be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val IGNORE_MISSING_FILES = ConfigBuilder("spark.files.ignoreMissingFiles")
.doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " +
"encountering missing files and the contents that have been read will still be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val APP_CALLER_CONTEXT = ConfigBuilder("spark.log.callerContext")
.stringConf
.createOptional
private[spark] val FILES_MAX_PARTITION_BYTES = ConfigBuilder("spark.files.maxPartitionBytes")
.doc("The maximum number of bytes to pack into a single partition when reading files.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(128 * 1024 * 1024)
private[spark] val FILES_OPEN_COST_IN_BYTES = ConfigBuilder("spark.files.openCostInBytes")
.doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" +
" the same time. This is used when putting multiple files into a partition. It's better to" +
" over estimate, then the partitions with small files will be faster than partitions with" +
" bigger files.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(4 * 1024 * 1024)
private[spark] val HADOOP_RDD_IGNORE_EMPTY_SPLITS =
ConfigBuilder("spark.hadoopRDD.ignoreEmptySplits")
.internal()
.doc("When true, HadoopRDD/NewHadoopRDD will not create partitions for empty input splits.")
.booleanConf
.createWithDefault(false)
private[spark] val SECRET_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.regex")
.doc("Regex to decide which Spark configuration properties and environment variables in " +
"driver and executor environments contain sensitive information. When this regex matches " +
"a property key or value, the value is redacted from the environment UI and various logs " +
"like YARN and event logs.")
.regexConf
.createWithDefault("(?i)secret|password|token".r)
private[spark] val STRING_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.string.regex")
.doc("Regex to decide which parts of strings produced by Spark contain sensitive " +
"information. When this regex matches a string part, that string part is replaced by a " +
"dummy value. This is currently used to redact the output of SQL explain commands.")
.regexConf
.createOptional
private[spark] val AUTH_SECRET =
ConfigBuilder("spark.authenticate.secret")
.stringConf
.createOptional
private[spark] val AUTH_SECRET_BIT_LENGTH =
ConfigBuilder("spark.authenticate.secretBitLength")
.intConf
.createWithDefault(256)
private[spark] val NETWORK_AUTH_ENABLED =
ConfigBuilder("spark.authenticate")
.booleanConf
.createWithDefault(false)
private[spark] val SASL_ENCRYPTION_ENABLED =
ConfigBuilder("spark.authenticate.enableSaslEncryption")
.booleanConf
.createWithDefault(false)
private[spark] val AUTH_SECRET_FILE =
ConfigBuilder("spark.authenticate.secret.file")
.doc("Path to a file that contains the authentication secret to use. The secret key is " +
"loaded from this path on both the driver and the executors if overrides are not set for " +
"either entity (see below). File-based secret keys are only allowed when using " +
"Kubernetes.")
.stringConf
.createOptional
private[spark] val AUTH_SECRET_FILE_DRIVER =
ConfigBuilder("spark.authenticate.secret.driver.file")
.doc("Path to a file that contains the authentication secret to use. Loaded by the " +
"driver. In Kubernetes client mode it is often useful to set a different secret " +
"path for the driver vs. the executors, since the driver may not be running in " +
"a pod unlike the executors. If this is set, an accompanying secret file must " +
"be specified for the executors. The fallback configuration allows the same path to be " +
"used for both the driver and the executors when running in cluster mode. File-based " +
"secret keys are only allowed when using Kubernetes.")
.fallbackConf(AUTH_SECRET_FILE)
private[spark] val AUTH_SECRET_FILE_EXECUTOR =
ConfigBuilder("spark.authenticate.secret.executor.file")
.doc("Path to a file that contains the authentication secret to use. Loaded by the " +
"executors only. In Kubernetes client mode it is often useful to set a different " +
"secret path for the driver vs. the executors, since the driver may not be running " +
"in a pod unlike the executors. If this is set, an accompanying secret file must be " +
"specified for the executors. The fallback configuration allows the same path to be " +
"used for both the driver and the executors when running in cluster mode. File-based " +
"secret keys are only allowed when using Kubernetes.")
.fallbackConf(AUTH_SECRET_FILE)
private[spark] val BUFFER_WRITE_CHUNK_SIZE =
ConfigBuilder("spark.buffer.write.chunkSize")
.internal()
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
"The chunk size during writing out the bytes of ChunkedByteBuffer should" +
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(64 * 1024 * 1024)
private[spark] val CHECKPOINT_COMPRESS =
ConfigBuilder("spark.checkpoint.compress")
.doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_ACCURATE_BLOCK_THRESHOLD =
ConfigBuilder("spark.shuffle.accurateBlockThreshold")
.doc("Threshold in bytes above which the size of shuffle blocks in " +
"HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM " +
"by avoiding underestimating shuffle block size when fetch shuffle blocks.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(100 * 1024 * 1024)
private[spark] val SHUFFLE_REGISTRATION_TIMEOUT =
ConfigBuilder("spark.shuffle.registration.timeout")
.doc("Timeout in milliseconds for registration to the external shuffle service.")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(5000)
private[spark] val SHUFFLE_REGISTRATION_MAX_ATTEMPTS =
ConfigBuilder("spark.shuffle.registration.maxAttempts")
.doc("When we fail to register to the external shuffle service, we will " +
"retry for maxAttempts times.")
.intConf
.createWithDefault(3)
private[spark] val REDUCER_MAX_BLOCKS_IN_FLIGHT_PER_ADDRESS =
ConfigBuilder("spark.reducer.maxBlocksInFlightPerAddress")
.doc("This configuration limits the number of remote blocks being fetched per reduce task " +
"from a given host port. When a large number of blocks are being requested from a given " +
"address in a single fetch or simultaneously, this could crash the serving executor or " +
"Node Manager. This is especially useful to reduce the load on the Node Manager when " +
"external shuffle is enabled. You can mitigate the issue by setting it to a lower value.")
.intConf
.checkValue(_ > 0, "The max no. of blocks in flight cannot be non-positive.")
.createWithDefault(Int.MaxValue)
private[spark] val MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM =
ConfigBuilder("spark.maxRemoteBlockSizeFetchToMem")
.doc("Remote block will be fetched to disk when size of the block is above this threshold " +
"in bytes. This is to avoid a giant request takes too much memory. Note this " +
"configuration will affect both shuffle fetch and block manager remote block fetch. " +
"For users who enabled external shuffle service, this feature can only work when " +
"external shuffle service is at least 2.3.0.")
.bytesConf(ByteUnit.BYTE)
// fetch-to-mem is guaranteed to fail if the message is bigger than 2 GB, so we might
// as well use fetch-to-disk in that case. The message includes some metadata in addition
// to the block data itself (in particular UploadBlock has a lot of metadata), so we leave
// extra room.
.checkValue(
_ <= Int.MaxValue - 512,
"maxRemoteBlockSizeFetchToMem cannot be larger than (Int.MaxValue - 512) bytes.")
.createWithDefaultString("200m")
private[spark] val TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES =
ConfigBuilder("spark.taskMetrics.trackUpdatedBlockStatuses")
.doc("Enable tracking of updatedBlockStatuses in the TaskMetrics. Off by default since " +
"tracking the block statuses can use a lot of memory and its not used anywhere within " +
"spark.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_IO_PLUGIN_CLASS =
ConfigBuilder("spark.shuffle.sort.io.plugin.class")
.doc("Name of the class to use for shuffle IO.")
.stringConf
.createWithDefault(classOf[LocalDiskShuffleDataIO].getName)
private[spark] val SHUFFLE_FILE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.file.buffer")
.doc("Size of the in-memory buffer for each shuffle file output stream, in KiB unless " +
"otherwise specified. These buffers reduce the number of disk seeks and system calls " +
"made in creating intermediate shuffle files.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The file buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_UNSAFE_FILE_OUTPUT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.unsafe.file.output.buffer")
.doc("The file system for this buffer size after each partition " +
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_DISK_WRITE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.spill.diskWriteBufferSize")
.doc("The buffer size, in bytes, to use when writing the sorted records to an on-disk file.")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v > 12 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
s"The buffer size must be greater than 12 and less than or equal to " +
s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(1024 * 1024)
private[spark] val UNROLL_MEMORY_CHECK_PERIOD =
ConfigBuilder("spark.storage.unrollMemoryCheckPeriod")
.internal()
.doc("The memory check period is used to determine how often we should check whether "
+ "there is a need to request more memory when we try to unroll the given block in memory.")
.longConf
.createWithDefault(16)
private[spark] val UNROLL_MEMORY_GROWTH_FACTOR =
ConfigBuilder("spark.storage.unrollMemoryGrowthFactor")
.internal()
.doc("Memory to request as a multiple of the size that used to unroll the block.")
.doubleConf
.createWithDefault(1.5)
private[spark] val FORCE_DOWNLOAD_SCHEMES =
ConfigBuilder("spark.yarn.dist.forceDownloadSchemes")
.doc("Comma-separated list of schemes for which resources will be downloaded to the " +
"local disk prior to being added to YARN's distributed cache. For use in cases " +
"where the YARN service does not support schemes that are supported by Spark, like http, " +
"https and ftp, or jars required to be in the local YARN client's classpath. Wildcard " +
"'*' is denoted to download resources for all the schemes.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val EXTRA_LISTENERS = ConfigBuilder("spark.extraListeners")
.doc("Class names of listeners to add to SparkContext during initialization.")
.stringConf
.toSequence
.createOptional
private[spark] val SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.numElementsForceSpillThreshold")
.internal()
.doc("The maximum number of elements in memory before forcing the shuffle sorter to spill. " +
"By default it's Integer.MAX_VALUE, which means we never force the sorter to spill, " +
"until we reach some limitations, like the max page size limitation for the pointer " +
"array in the sorter.")
.intConf
.createWithDefault(Integer.MAX_VALUE)
private[spark] val SHUFFLE_MAP_OUTPUT_PARALLEL_AGGREGATION_THRESHOLD =
ConfigBuilder("spark.shuffle.mapOutput.parallelAggregationThreshold")
.internal()
.doc("Multi-thread is used when the number of mappers * shuffle partitions is greater than " +
"or equal to this threshold. Note that the actual parallelism is calculated by number of " +
"mappers * shuffle partitions / this threshold + 1, so this threshold should be positive.")
.intConf
.checkValue(v => v > 0, "The threshold should be positive.")
.createWithDefault(10000000)
private[spark] val MAX_RESULT_SIZE = ConfigBuilder("spark.driver.maxResultSize")
.doc("Size limit for results.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("1g")
private[spark] val CREDENTIALS_RENEWAL_INTERVAL_RATIO =
ConfigBuilder("spark.security.credentials.renewalRatio")
.doc("Ratio of the credential's expiration time when Spark should fetch new credentials.")
.doubleConf
.createWithDefault(0.75d)
private[spark] val CREDENTIALS_RENEWAL_RETRY_WAIT =
ConfigBuilder("spark.security.credentials.retryWait")
.doc("How long to wait before retrying to fetch new credentials after a failure.")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1h")
private[spark] val SHUFFLE_SORT_INIT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.sort.initialBufferSize")
.internal()
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v > 0 && v <= Int.MaxValue,
s"The buffer size must be greater than 0 and less than or equal to ${Int.MaxValue}.")
.createWithDefault(4096)
private[spark] val SHUFFLE_COMPRESS =
ConfigBuilder("spark.shuffle.compress")
.doc("Whether to compress shuffle output. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SPILL_COMPRESS =
ConfigBuilder("spark.shuffle.spill.compress")
.doc("Whether to compress data spilled during shuffles. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SPILL_INITIAL_MEM_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.initialMemoryThreshold")
.internal()
.doc("Initial threshold for the size of a collection before we start tracking its " +
"memory usage.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(5 * 1024 * 1024)
private[spark] val SHUFFLE_SPILL_BATCH_SIZE =
ConfigBuilder("spark.shuffle.spill.batchSize")
.internal()
.doc("Size of object batches when reading/writing from serializers.")
.longConf
.createWithDefault(10000)
private[spark] val SHUFFLE_SORT_BYPASS_MERGE_THRESHOLD =
ConfigBuilder("spark.shuffle.sort.bypassMergeThreshold")
.doc("In the sort-based shuffle manager, avoid merge-sorting data if there is no " +
"map-side aggregation and there are at most this many reduce partitions")
.intConf
.createWithDefault(200)
private[spark] val SHUFFLE_MANAGER =
ConfigBuilder("spark.shuffle.manager")
.stringConf
.createWithDefault("sort")
private[spark] val SHUFFLE_REDUCE_LOCALITY_ENABLE =
ConfigBuilder("spark.shuffle.reduceLocality.enabled")
.doc("Whether to compute locality preferences for reduce tasks")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_MAPOUTPUT_MIN_SIZE_FOR_BROADCAST =
ConfigBuilder("spark.shuffle.mapOutput.minSizeForBroadcast")
.doc("The size at which we use Broadcast to send the map output statuses to the executors.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("512k")
private[spark] val SHUFFLE_MAPOUTPUT_DISPATCHER_NUM_THREADS =
ConfigBuilder("spark.shuffle.mapOutput.dispatcher.numThreads")
.intConf
.createWithDefault(8)
private[spark] val SHUFFLE_DETECT_CORRUPT =
ConfigBuilder("spark.shuffle.detectCorrupt")
.doc("Whether to detect any corruption in fetched blocks.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_DETECT_CORRUPT_MEMORY =
ConfigBuilder("spark.shuffle.detectCorrupt.useExtraMemory")
.doc("If enabled, part of a compressed/encrypted stream will be de-compressed/de-crypted " +
"by using extra memory to detect early corruption. Any IOException thrown will cause " +
"the task to be retried once and if it fails again with same exception, then " +
"FetchFailedException will be thrown to retry previous stage")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_SYNC =
ConfigBuilder("spark.shuffle.sync")
.doc("Whether to force outstanding writes to disk.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_UNSAFE_FAST_MERGE_ENABLE =
ConfigBuilder("spark.shuffle.unsafe.fastMergeEnabled")
.doc("Whether to perform a fast spill merge.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SORT_USE_RADIXSORT =
ConfigBuilder("spark.shuffle.sort.useRadixSort")
.doc("Whether to use radix sort for sorting in-memory partition ids. Radix sort is much " +
"faster, but requires additional memory to be reserved memory as pointers are added.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS =
ConfigBuilder("spark.shuffle.minNumPartitionsToHighlyCompress")
.internal()
.doc("Number of partitions to determine if MapStatus should use HighlyCompressedMapStatus")
.intConf
.checkValue(v => v > 0, "The value should be a positive integer.")
.createWithDefault(2000)
private[spark] val SHUFFLE_USE_OLD_FETCH_PROTOCOL =
ConfigBuilder("spark.shuffle.useOldFetchProtocol")
.doc("Whether to use the old protocol while doing the shuffle block fetching. " +
"It is only enabled while we need the compatibility in the scenario of new Spark " +
"version job fetching shuffle blocks from old version external shuffle service.")
.booleanConf
.createWithDefault(false)
private[spark] val MEMORY_MAP_LIMIT_FOR_TESTS =
ConfigBuilder("spark.storage.memoryMapLimitForTests")
.internal()
.doc("For testing only, controls the size of chunks when memory mapping a file")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
private[spark] val BARRIER_SYNC_TIMEOUT =
ConfigBuilder("spark.barrier.sync.timeout")
.doc("The timeout in seconds for each barrier() call from a barrier task. If the " +
"coordinator didn't receive all the sync messages from barrier tasks within the " +
"configured time, throw a SparkException to fail all the tasks. The default value is set " +
"to 31536000(3600 * 24 * 365) so the barrier() call shall wait for one year.")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v > 0, "The value should be a positive time value.")
.createWithDefaultString("365d")
private[spark] val UNSCHEDULABLE_TASKSET_TIMEOUT =
ConfigBuilder("spark.scheduler.blacklist.unschedulableTaskSetTimeout")
.doc("The timeout in seconds to wait to acquire a new executor and schedule a task " +
"before aborting a TaskSet which is unschedulable because of being completely blacklisted.")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v >= 0, "The value should be a non negative time value.")
.createWithDefault(120)
private[spark] val BARRIER_MAX_CONCURRENT_TASKS_CHECK_INTERVAL =
ConfigBuilder("spark.scheduler.barrier.maxConcurrentTasksCheck.interval")
.doc("Time in seconds to wait between a max concurrent tasks check failure and the next " +
"check. A max concurrent tasks check ensures the cluster can launch more concurrent " +
"tasks than required by a barrier stage on job submitted. The check can fail in case " +
"a cluster has just started and not enough executors have registered, so we wait for a " +
"little while and try to perform the check again. If the check fails more than a " +
"configured max failure times for a job then fail current job submission. Note this " +
"config only applies to jobs that contain one or more barrier stages, we won't perform " +
"the check on non-barrier jobs.")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("15s")
private[spark] val BARRIER_MAX_CONCURRENT_TASKS_CHECK_MAX_FAILURES =
ConfigBuilder("spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures")
.doc("Number of max concurrent tasks check failures allowed before fail a job submission. " +
"A max concurrent tasks check ensures the cluster can launch more concurrent tasks than " +
"required by a barrier stage on job submitted. The check can fail in case a cluster " +
"has just started and not enough executors have registered, so we wait for a little " +
"while and try to perform the check again. If the check fails more than a configured " +
"max failure times for a job then fail current job submission. Note this config only " +
"applies to jobs that contain one or more barrier stages, we won't perform the check on " +
"non-barrier jobs.")
.intConf
.checkValue(v => v > 0, "The max failures should be a positive value.")
.createWithDefault(40)
private[spark] val UNSAFE_EXCEPTION_ON_MEMORY_LEAK =
ConfigBuilder("spark.unsafe.exceptionOnMemoryLeak")
.internal()
.booleanConf
.createWithDefault(false)
private[spark] val UNSAFE_SORTER_SPILL_READ_AHEAD_ENABLED =
ConfigBuilder("spark.unsafe.sorter.spill.read.ahead.enabled")
.internal()
.booleanConf
.createWithDefault(true)
private[spark] val UNSAFE_SORTER_SPILL_READER_BUFFER_SIZE =
ConfigBuilder("spark.unsafe.sorter.spill.reader.buffer.size")
.internal()
.bytesConf(ByteUnit.BYTE)
.checkValue(v => 1024 * 1024 <= v && v <= MAX_BUFFER_SIZE_BYTES,
s"The value must be in allowed range [1,048,576, ${MAX_BUFFER_SIZE_BYTES}].")
.createWithDefault(1024 * 1024)
private[spark] val EXECUTOR_PLUGINS =
ConfigBuilder("spark.executor.plugins")
.doc("Comma-separated list of class names for \\"plugins\\" implementing " +
"org.apache.spark.ExecutorPlugin. Plugins have the same privileges as any task " +
"in a Spark executor. They can also interfere with task execution and fail in " +
"unexpected ways. So be sure to only use this for trusted plugins.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val CLEANER_PERIODIC_GC_INTERVAL =
ConfigBuilder("spark.cleaner.periodicGC.interval")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("30min")
private[spark] val CLEANER_REFERENCE_TRACKING =
ConfigBuilder("spark.cleaner.referenceTracking")
.booleanConf
.createWithDefault(true)
private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING =
ConfigBuilder("spark.cleaner.referenceTracking.blocking")
.booleanConf
.createWithDefault(true)
private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING_SHUFFLE =
ConfigBuilder("spark.cleaner.referenceTracking.blocking.shuffle")
.booleanConf
.createWithDefault(false)
private[spark] val CLEANER_REFERENCE_TRACKING_CLEAN_CHECKPOINTS =
ConfigBuilder("spark.cleaner.referenceTracking.cleanCheckpoints")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_LOGS_ROLLING_STRATEGY =
ConfigBuilder("spark.executor.logs.rolling.strategy").stringConf.createWithDefault("")
private[spark] val EXECUTOR_LOGS_ROLLING_TIME_INTERVAL =
ConfigBuilder("spark.executor.logs.rolling.time.interval").stringConf.createWithDefault("daily")
private[spark] val EXECUTOR_LOGS_ROLLING_MAX_SIZE =
ConfigBuilder("spark.executor.logs.rolling.maxSize")
.stringConf
.createWithDefault((1024 * 1024).toString)
private[spark] val EXECUTOR_LOGS_ROLLING_MAX_RETAINED_FILES =
ConfigBuilder("spark.executor.logs.rolling.maxRetainedFiles").intConf.createWithDefault(-1)
private[spark] val EXECUTOR_LOGS_ROLLING_ENABLE_COMPRESSION =
ConfigBuilder("spark.executor.logs.rolling.enableCompression")
.booleanConf
.createWithDefault(false)
private[spark] val MASTER_REST_SERVER_ENABLED = ConfigBuilder("spark.master.rest.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val MASTER_REST_SERVER_PORT = ConfigBuilder("spark.master.rest.port")
.intConf
.createWithDefault(6066)
private[spark] val MASTER_UI_PORT = ConfigBuilder("spark.master.ui.port")
.intConf
.createWithDefault(8080)
private[spark] val IO_COMPRESSION_SNAPPY_BLOCKSIZE =
ConfigBuilder("spark.io.compression.snappy.blockSize")
.doc("Block size in bytes used in Snappy compression, in the case when " +
"Snappy compression codec is used. Lowering this block size " +
"will also lower shuffle memory usage when Snappy is used")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_LZ4_BLOCKSIZE =
ConfigBuilder("spark.io.compression.lz4.blockSize")
.doc("Block size in bytes used in LZ4 compression, in the case when LZ4 compression" +
"codec is used. Lowering this block size will also lower shuffle memory " +
"usage when LZ4 is used.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_CODEC =
ConfigBuilder("spark.io.compression.codec")
.doc("The codec used to compress internal data such as RDD partitions, event log, " +
"broadcast variables and shuffle outputs. By default, Spark provides four codecs: " +
"lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
"the codec")
.stringConf
.createWithDefaultString("lz4")
private[spark] val IO_COMPRESSION_ZSTD_BUFFERSIZE =
ConfigBuilder("spark.io.compression.zstd.bufferSize")
.doc("Buffer size in bytes used in Zstd compression, in the case when Zstd " +
"compression codec is used. Lowering this size will lower the shuffle " +
"memory usage when Zstd is used, but it might increase the compression " +
"cost because of excessive JNI call overhead")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_ZSTD_LEVEL =
ConfigBuilder("spark.io.compression.zstd.level")
.doc("Compression level for Zstd compression codec. Increasing the compression " +
"level will result in better compression at the expense of more CPU and memory")
.intConf
.createWithDefault(1)
private[spark] val IO_WARNING_LARGEFILETHRESHOLD =
ConfigBuilder("spark.io.warning.largeFileThreshold")
.internal()
.doc("When spark loading one single large file, if file size exceed this " +
"threshold, then log warning with possible reasons.")
.longConf
.createWithDefault(1024 * 1024 * 1024)
private[spark] val EVENT_LOG_COMPRESSION_CODEC =
ConfigBuilder("spark.eventLog.compression.codec")
.doc("The codec used to compress event log. By default, Spark provides four codecs: " +
"lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
"the codec. If this is not given, spark.io.compression.codec will be used.")
.fallbackConf(IO_COMPRESSION_CODEC)
private[spark] val BUFFER_SIZE =
ConfigBuilder("spark.buffer.size")
.intConf
.checkValue(_ >= 0, "The buffer size must not be negative")
.createWithDefault(65536)
private[spark] val LOCALITY_WAIT_PROCESS = ConfigBuilder("spark.locality.wait.process")
.fallbackConf(LOCALITY_WAIT)
private[spark] val LOCALITY_WAIT_NODE = ConfigBuilder("spark.locality.wait.node")
.fallbackConf(LOCALITY_WAIT)
private[spark] val LOCALITY_WAIT_RACK = ConfigBuilder("spark.locality.wait.rack")
.fallbackConf(LOCALITY_WAIT)
private[spark] val REDUCER_MAX_SIZE_IN_FLIGHT = ConfigBuilder("spark.reducer.maxSizeInFlight")
.doc("Maximum size of map outputs to fetch simultaneously from each reduce task, " +
"in MiB unless otherwise specified. Since each output requires us to create a " +
"buffer to receive it, this represents a fixed memory overhead per reduce task, " +
"so keep it small unless you have a large amount of memory")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("48m")
private[spark] val REDUCER_MAX_REQS_IN_FLIGHT = ConfigBuilder("spark.reducer.maxReqsInFlight")
.doc("This configuration limits the number of remote requests to fetch blocks at " +
"any given point. When the number of hosts in the cluster increase, " +
"it might lead to very large number of inbound connections to one or more nodes, " +
"causing the workers to fail under load. By allowing it to limit the number of " +
"fetch requests, this scenario can be mitigated")
.intConf
.createWithDefault(Int.MaxValue)
private[spark] val BROADCAST_COMPRESS = ConfigBuilder("spark.broadcast.compress")
.doc("Whether to compress broadcast variables before sending them. " +
"Generally a good idea. Compression will use spark.io.compression.codec")
.booleanConf.createWithDefault(true)
private[spark] val BROADCAST_BLOCKSIZE = ConfigBuilder("spark.broadcast.blockSize")
.doc("Size of each piece of a block for TorrentBroadcastFactory, in " +
"KiB unless otherwise specified. Too large a value decreases " +
"parallelism during broadcast (makes it slower); however, " +
"if it is too small, BlockManager might take a performance hit")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("4m")
private[spark] val BROADCAST_CHECKSUM = ConfigBuilder("spark.broadcast.checksum")
.doc("Whether to enable checksum for broadcast. If enabled, " +
"broadcasts will include a checksum, which can help detect " +
"corrupted blocks, at the cost of computing and sending a little " +
"more data. It's possible to disable it if the network has other " +
"mechanisms to guarantee data won't be corrupted during broadcast")
.booleanConf.createWithDefault(true)
private[spark] val BROADCAST_FOR_UDF_COMPRESSION_THRESHOLD =
ConfigBuilder("spark.broadcast.UDFCompressionThreshold")
.doc("The threshold at which user-defined functions (UDFs) and Python RDD commands " +
"are compressed by broadcast in bytes unless otherwise specified")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v >= 0, "The threshold should be non-negative.")
.createWithDefault(1L * 1024 * 1024)
private[spark] val RDD_COMPRESS = ConfigBuilder("spark.rdd.compress")
.doc("Whether to compress serialized RDD partitions " +
"(e.g. for StorageLevel.MEMORY_ONLY_SER in Scala " +
"or StorageLevel.MEMORY_ONLY in Python). Can save substantial " +
"space at the cost of some extra CPU time. " +
"Compression will use spark.io.compression.codec")
.booleanConf.createWithDefault(false)
private[spark] val RDD_PARALLEL_LISTING_THRESHOLD =
ConfigBuilder("spark.rdd.parallelListingThreshold")
.intConf
.createWithDefault(10)
private[spark] val RDD_LIMIT_SCALE_UP_FACTOR =
ConfigBuilder("spark.rdd.limit.scaleUpFactor")
.intConf
.createWithDefault(4)
private[spark] val SERIALIZER = ConfigBuilder("spark.serializer")
.stringConf
.createWithDefault("org.apache.spark.serializer.JavaSerializer")
private[spark] val SERIALIZER_OBJECT_STREAM_RESET =
ConfigBuilder("spark.serializer.objectStreamReset")
.intConf
.createWithDefault(100)
private[spark] val SERIALIZER_EXTRA_DEBUG_INFO = ConfigBuilder("spark.serializer.extraDebugInfo")
.booleanConf
.createWithDefault(true)
private[spark] val JARS = ConfigBuilder("spark.jars")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val FILES = ConfigBuilder("spark.files")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val SUBMIT_DEPLOY_MODE = ConfigBuilder("spark.submit.deployMode")
.stringConf
.createWithDefault("client")
private[spark] val SUBMIT_PYTHON_FILES = ConfigBuilder("spark.submit.pyFiles")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val SCHEDULER_ALLOCATION_FILE =
ConfigBuilder("spark.scheduler.allocation.file")
.stringConf
.createOptional
private[spark] val SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO =
ConfigBuilder("spark.scheduler.minRegisteredResourcesRatio")
.doubleConf
.createOptional
private[spark] val SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME =
ConfigBuilder("spark.scheduler.maxRegisteredResourcesWaitingTime")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("30s")
private[spark] val SCHEDULER_MODE =
ConfigBuilder("spark.scheduler.mode")
.stringConf
.createWithDefault(SchedulingMode.FIFO.toString)
private[spark] val SCHEDULER_REVIVE_INTERVAL =
ConfigBuilder("spark.scheduler.revive.interval")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val SPECULATION_ENABLED =
ConfigBuilder("spark.speculation")
.booleanConf
.createWithDefault(false)
private[spark] val SPECULATION_INTERVAL =
ConfigBuilder("spark.speculation.interval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(100)
private[spark] val SPECULATION_MULTIPLIER =
ConfigBuilder("spark.speculation.multiplier")
.doubleConf
.createWithDefault(1.5)
private[spark] val SPECULATION_QUANTILE =
ConfigBuilder("spark.speculation.quantile")
.doubleConf
.createWithDefault(0.75)
private[spark] val STAGING_DIR = ConfigBuilder("spark.yarn.stagingDir")
.doc("Staging directory used while submitting applications.")
.stringConf
.createOptional
private[spark] val BUFFER_PAGESIZE = ConfigBuilder("spark.buffer.pageSize")
.doc("The amount of memory used per page in bytes")
.bytesConf(ByteUnit.BYTE)
.createOptional
}
| bdrillard/spark | core/src/main/scala/org/apache/spark/internal/config/package.scala | Scala | apache-2.0 | 63,569 |
package easeml.text.preprocess
import easeml.text.StopWords
import org.apache.spark.rdd.RDD
import easeml.util.IO.{readMap, writeMap}
/**
* Transform raw documents to TFIDF representations
* @author YanXiaohui on 2016-09-30.
*/
object TFIDFProc {
/**
* Transform documents into word id sequences
*
* @param raw_docs raw document collection
* @param language "cn": Chinese, "en:" English (default)
* @param mid_dir middle file paths, default is "/tmp"
* @return document list transformed from raw_docs one by one
*/
def fitAndTransform(raw_docs: RDD[String],
language: String = "en",
mid_dir: String = "/tmp",
stop_word_pt: String = ""): RDD[Map[Int, Double]] = {
val stop_words: Set[String] = if (!stop_word_pt.isEmpty)
StopWords.getFromFile(stop_word_pt)
else
StopWords.getEnglish
// main process
val seg_docs = DocWordSeg.transform(raw_docs, language)
val filter_docs = WordFilter.transform(seg_docs, stop_words)
val (dwids, w2ids) = WordIndex.fitAndTransform(filter_docs)
val (new_docs, idfs) = TFIDF.fitAndTransform(dwids)
// write middle files
val dir = if (mid_dir.last=='/') mid_dir else mid_dir + "/"
writeMap(dir + "w2id.txt", w2ids)
writeMap(dir + "idf.txt", idfs)
new_docs
}
/**
* Transform documents with existing vocabulary
* @param raw_docs documents to be transformed
* @param mid_dir The directory contains vocabulary file, i.e., w2id.txt
*/
def transform(raw_docs: RDD[String],
language: String = "en",
mid_dir: String = "/tmp"): RDD[Map[Int, Double]] = {
val dir = if (mid_dir.last=='/') mid_dir else mid_dir + "/"
val seg_docs = DocWordSeg.transform(raw_docs, language)
val w2ids: Map[String, Int] = readMap(dir + "w2id.txt").mapValues(_.toInt).map(identity)
val idfs: Map[Int, Double] = readMap(dir + "idf.txt").map {
case (k, v) => (k.toInt, v.toDouble)
}
val dwids = WordIndex.transform(seg_docs, w2ids)
TFIDF.transform(dwids, idfs)
}
}
| xiaohuiyan/scalaml | src/main/scala/easeml/text/preprocess/TFIDFProc.scala | Scala | apache-2.0 | 2,123 |
package com.twitter.finagle.mysql.transport
import com.twitter.io.{Buf, BufByteWriter, ProxyByteWriter}
import java.nio.charset.{StandardCharsets, Charset => JCharset}
/**
* A `ByteWriter` specialized for dealing with MySQL protocol messages.
*/
class MysqlBufWriter(underlying: BufByteWriter)
extends ProxyByteWriter(underlying)
with BufByteWriter {
/**
* Writes `b` to the buffer `num` times
*/
def fill(num: Int, b: Byte): MysqlBufWriter = {
var i = 0
while (i < num) {
writeByte(b)
i += 1
}
this
}
/**
* Writes a variable length integer according the the MySQL
* Client/Server protocol. Refer to MySQL documentation for
* more information.
*/
def writeVariableLong(length: Long): MysqlBufWriter = {
if (length < 0) throw new IllegalStateException(s"Negative length-encoded integer: $length")
if (length < 251) {
writeByte(length.toInt)
} else if (length < 65536) {
writeByte(252)
writeShortLE(length.toInt)
} else if (length < 16777216) {
writeByte(253)
writeMediumLE(length.toInt)
} else {
writeByte(254)
writeLongLE(length)
}
this
}
/**
* Writes a null terminated string onto the buffer encoded as UTF-8
*
* @param s String to write.
*/
def writeNullTerminatedString(s: String): MysqlBufWriter = {
writeBytes(s.getBytes(StandardCharsets.UTF_8))
writeByte(0x00)
this
}
/**
* Writes a length coded string using the MySQL Client/Server
* protocol in the given charset.
*
* @param s String to write to buffer.
*/
def writeLengthCodedString(s: String, charset: JCharset): MysqlBufWriter = {
writeLengthCodedBytes(s.getBytes(charset))
}
/**
* Writes a length coded set of bytes according to the MySQL
* client/server protocol.
*/
def writeLengthCodedBytes(bytes: Array[Byte]): MysqlBufWriter = {
writeVariableLong(bytes.length)
writeBytes(bytes)
this
}
def owned(): Buf = underlying.owned()
}
object MysqlBufWriter {
/**
* Create a new [[MysqlBufWriter]] from an array of bytes.
*/
def apply(bytes: Array[Byte]): MysqlBufWriter =
new MysqlBufWriter(BufByteWriter(bytes))
}
| twitter/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/transport/MysqlBufWriter.scala | Scala | apache-2.0 | 2,220 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.utils
import org.geotools.data.Query
import org.geotools.feature.FeatureTypes
import org.geotools.geometry.jts.GeometryCoordinateSequenceTransformer
import org.geotools.referencing.CRS
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.jts.geom.Geometry
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.referencing.crs.CoordinateReferenceSystem
/**
* Reproject the geometries in a simple feature to a different CRS
*/
trait Reprojection {
def apply(feature: SimpleFeature): SimpleFeature
}
object Reprojection {
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
/**
* Create a reprojection function
*
* @param returnSft simple feature type being returned
* @param crs crs information from a query
* @return
*/
def apply(returnSft: SimpleFeatureType, crs: QueryReferenceSystems): Reprojection = {
if (crs.target != crs.user) {
val transformer = new GeometryCoordinateSequenceTransformer
transformer.setMathTransform(CRS.findMathTransform(crs.user, crs.target, true))
val transformed = FeatureTypes.transform(returnSft, crs.target) // note: drops user data
new TransformReprojection(SimpleFeatureTypes.immutable(transformed, returnSft.getUserData), transformer)
} else if (crs.user != crs.native) {
val transformed = FeatureTypes.transform(returnSft, crs.user) // note: drops user data
new UserReprojection(SimpleFeatureTypes.immutable(transformed, returnSft.getUserData))
} else {
throw new IllegalArgumentException(s"Trying to reproject to the same CRS: $crs")
}
}
/**
* Holds query projection info
*
* @param native native crs of the data
* @param user user crs for the query (data will be treated as this crs but without any transform)
* @param target target crs for the query (data will be transformed to this crs)
*/
case class QueryReferenceSystems(
native: CoordinateReferenceSystem,
user: CoordinateReferenceSystem,
target: CoordinateReferenceSystem)
object QueryReferenceSystems {
def apply(query: Query): Option[QueryReferenceSystems] = {
Option(query.getHints.getReturnSft.getGeometryDescriptor).flatMap { descriptor =>
val native = descriptor.getCoordinateReferenceSystem
val source = Option(query.getCoordinateSystem).getOrElse(native)
val target = Option(query.getCoordinateSystemReproject).getOrElse(native)
if (target == source && source == native) { None } else {
Some(QueryReferenceSystems(native, source, target))
}
}
}
}
/**
* Applies a geometric transform to any geometry attributes
*
* @param sft simple feature type being projected to
* @param transformer transformer
*/
private class TransformReprojection(sft: SimpleFeatureType, transformer: GeometryCoordinateSequenceTransformer)
extends Reprojection {
override def apply(feature: SimpleFeature): SimpleFeature = {
val values = Array.tabulate(sft.getAttributeCount) { i =>
feature.getAttribute(i) match {
case g: Geometry => transformer.transform(g)
case a => a
}
}
new ScalaSimpleFeature(sft, feature.getID, values, feature.getUserData)
}
}
/**
* Changes the defined crs but does not do any actual geometric transforms
*
* @param sft simple feature type being projected to
*/
private class UserReprojection(sft: SimpleFeatureType) extends Reprojection {
override def apply(feature: SimpleFeature): SimpleFeature = ScalaSimpleFeature.copy(sft, feature)
}
}
| aheyne/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/utils/Reprojection.scala | Scala | apache-2.0 | 4,233 |
package hooktest
/*
* Copyright (c) 2016 Yuki Ono
* Licensed under the MIT License.
*/
import java.util.concurrent.SynchronousQueue
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import scala.annotation.tailrec
object EventWaiter {
// http://hsmemo.github.io/articles/no3059sSJ.html
private val THREAD_PRIORITY = 7 // THREAD_PRIORITY_ABOVE_NORMAL
private val ctx = Context
private val logger = Logger.getLogger()
private val sync = new SynchronousQueue[MouseEvent](true)
private val waiting = new AtomicBoolean(false)
private var waitingEvent: MouseEvent = NonEvent(null)
private def setFlagsOffer(me: MouseEvent) {
//logger.debug("setFlagsOffer")
me match {
case Move(_) => {
logger.debug(s"setFlagsOffer - setResent (Move): ${waitingEvent.name}")
ctx.LastFlags.setResent(waitingEvent)
//Thread.sleep(1)
}
case LeftUp(_) | RightUp(_) => {
logger.debug(s"setFlagsOffer - setResent (Up): ${waitingEvent.name}")
ctx.LastFlags.setResent(waitingEvent)
}
case LeftDown(_) | RightDown(_) => {
logger.debug(s"setFlagsOffer - setSuppressed: ${waitingEvent.name}")
ctx.LastFlags.setSuppressed(waitingEvent)
ctx.LastFlags.setSuppressed(me)
ctx.setStartingScrollMode
}
case Cancel(_) => logger.debug("setFlagsOffer: cancel")
case _ => throw new IllegalStateException(me.name)
}
}
def offer(me: MouseEvent): Boolean = {
if (waiting.get) {
@tailrec
def loop(): Boolean = {
if (sync.offer(me)) {
setFlagsOffer(me)
true
}
else {
if (waiting.get) {
Thread.sleep(0)
loop()
}
else
false
}
}
loop()
}
else
false
}
private def poll(timeout: Long): Option[MouseEvent] = {
try {
Option(sync.poll(timeout, TimeUnit.MILLISECONDS))
}
finally {
waiting.set(false)
}
}
private def fromTimeout(down: MouseEvent) {
ctx.LastFlags.setResent(down)
logger.debug(s"wait Trigger (${down.name} -->> Timeout): resend ${down.name}")
Windows.resendDown(down)
}
private def fromMove(down: MouseEvent) {
//ctx.LastFlags.setResent(down)
logger.debug(s"wait Trigger (${down.name} -->> Move): resend ${down.name}")
Windows.resendDown(down)
}
private def fromUp(down: MouseEvent, up: MouseEvent) {
//ctx.LastFlags.setResent(down)
def resendC(mc: MouseClick) = {
logger.debug(s"wait Trigger (${down.name} -->> ${up.name}): resend ${mc.name}")
Windows.resendClick(mc)
}
def resendUD = {
logger.debug(s"wait Trigger (${down.name} -->> ${up.name}): resend ${down.name}, ${up.name}")
Windows.resendDown(down)
Windows.resendUp(up)
}
(down, up) match {
case (LeftDown(_), LeftUp(_)) => {
if (Mouse.samePoint(down, up))
resendC(LeftClick(down.info))
else
resendUD
}
case (LeftDown(_), RightUp(_)) => resendUD
case (RightDown(_), RightUp(_)) => {
if (Mouse.samePoint(down, up))
resendC(RightClick(down.info))
else
resendUD
}
case (RightDown(_), LeftUp(_)) => resendUD
case x => {
throw new IllegalStateException("Not matched: " + x);
}
}
}
private def fromDown(d1: MouseEvent, d2: MouseEvent) {
//ctx.LastFlags.setSuppressed(d1)
//ctx.LastFlags.setSuppressed(d2)
logger.debug(s"wait Trigger (${d1.name} -->> ${d2.name}): start scroll mode")
Context.startScrollMode(d2.info)
}
private def dispatchEvent(down: MouseEvent, res: MouseEvent) = res match {
case Move(_) => fromMove(down)
case LeftUp(_) | RightUp(_) => fromUp(down, res)
case LeftDown(_) | RightDown(_) => fromDown(down, res)
case Cancel(_) => logger.debug("dispatchEvent: cancel")
case _ => throw new IllegalStateException()
}
private val waiterQueue = new SynchronousQueue[MouseEvent](true)
private val waiterThread = new Thread(() =>
while (true) {
val down = waiterQueue.take
poll(Context.getPollTimeout) match {
case Some(res) => dispatchEvent(down, res)
case None => fromTimeout(down)
}
}
)
waiterThread.setDaemon(true)
waiterThread.setPriority(THREAD_PRIORITY)
waiterThread.start
// RightDown or LeftDown
def start(down: MouseEvent) {
if (!down.isDown)
throw new IllegalArgumentException
waitingEvent = down
waiterQueue.put(down)
waiting.set(true)
}
} | ykon/w10wheel | src/main/scala/hooktest/EventWaiter.scala | Scala | mit | 5,321 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.net.InetAddress
import java.nio.charset.StandardCharsets
import java.util
import java.util.{Collections, Optional}
import java.util.Arrays.asList
import kafka.api.{ApiVersion, KAFKA_0_10_2_IV0}
import kafka.controller.KafkaController
import kafka.coordinator.group.GroupCoordinator
import kafka.coordinator.transaction.TransactionCoordinator
import kafka.network.RequestChannel
import kafka.network.RequestChannel.SendResponse
import kafka.security.auth.Authorizer
import kafka.server.QuotaFactory.QuotaManagers
import kafka.utils.{MockTime, TestUtils}
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.memory.MemoryPool
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests.UpdateMetadataRequest.{Broker, EndPoint}
import org.apache.kafka.common.requests.WriteTxnMarkersRequest.TxnMarkerEntry
import org.apache.kafka.common.requests.{FetchMetadata => JFetchMetadata, _}
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.easymock.{Capture, EasyMock, IAnswer}
import EasyMock._
import org.junit.Assert.{assertEquals, assertNull, assertTrue}
import org.junit.{After, Test}
import scala.collection.JavaConverters._
import scala.collection.Map
class KafkaApisTest {
private val requestChannel: RequestChannel = EasyMock.createNiceMock(classOf[RequestChannel])
private val requestChannelMetrics: RequestChannel.Metrics = EasyMock.createNiceMock(classOf[RequestChannel.Metrics])
private val replicaManager: ReplicaManager = EasyMock.createNiceMock(classOf[ReplicaManager])
private val groupCoordinator: GroupCoordinator = EasyMock.createNiceMock(classOf[GroupCoordinator])
private val adminManager: AdminManager = EasyMock.createNiceMock(classOf[AdminManager])
private val txnCoordinator: TransactionCoordinator = EasyMock.createNiceMock(classOf[TransactionCoordinator])
private val controller: KafkaController = EasyMock.createNiceMock(classOf[KafkaController])
private val zkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
private val metrics = new Metrics()
private val brokerId = 1
private val metadataCache = new MetadataCache(brokerId)
private val authorizer: Option[Authorizer] = None
private val clientQuotaManager: ClientQuotaManager = EasyMock.createNiceMock(classOf[ClientQuotaManager])
private val clientRequestQuotaManager: ClientRequestQuotaManager = EasyMock.createNiceMock(classOf[ClientRequestQuotaManager])
private val replicaQuotaManager: ReplicationQuotaManager = EasyMock.createNiceMock(classOf[ReplicationQuotaManager])
private val quotas = QuotaManagers(clientQuotaManager, clientQuotaManager, clientRequestQuotaManager,
replicaQuotaManager, replicaQuotaManager, replicaQuotaManager, None)
private val fetchManager: FetchManager = EasyMock.createNiceMock(classOf[FetchManager])
private val brokerTopicStats = new BrokerTopicStats
private val clusterId = "clusterId"
private val time = new MockTime
@After
def tearDown() {
quotas.shutdown()
metrics.close()
}
def createKafkaApis(interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion): KafkaApis = {
val properties = TestUtils.createBrokerConfig(brokerId, "zk")
properties.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerProtocolVersion.toString)
properties.put(KafkaConfig.LogMessageFormatVersionProp, interBrokerProtocolVersion.toString)
new KafkaApis(requestChannel,
replicaManager,
adminManager,
groupCoordinator,
txnCoordinator,
controller,
zkClient,
brokerId,
new KafkaConfig(properties),
metadataCache,
metrics,
authorizer,
quotas,
fetchManager,
brokerTopicStats,
clusterId,
time,
null
)
}
@Test
def testOffsetCommitWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val partitionOffsetCommitData = new OffsetCommitRequest.PartitionData(15L, Optional.empty[Integer](), "")
val (offsetCommitRequest, request) = buildRequest(new OffsetCommitRequest.Builder("groupId",
Map(invalidTopicPartition -> partitionOffsetCommitData).asJava))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleOffsetCommitRequest(request)
val response = readResponse(ApiKeys.OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[OffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.responseData().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def testTxnOffsetCommitWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val partitionOffsetCommitData = new TxnOffsetCommitRequest.CommittedOffset(15L, "", Optional.empty())
val (offsetCommitRequest, request) = buildRequest(new TxnOffsetCommitRequest.Builder("txnlId", "groupId",
15L, 0.toShort, Map(invalidTopicPartition -> partitionOffsetCommitData).asJava))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleTxnOffsetCommitRequest(request)
val response = readResponse(ApiKeys.TXN_OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[TxnOffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def testAddPartitionsToTxnWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val (addPartitionsToTxnRequest, request) = buildRequest(new AddPartitionsToTxnRequest.Builder(
"txnlId", 15L, 0.toShort, List(invalidTopicPartition).asJava))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleAddPartitionToTxnRequest(request)
val response = readResponse(ApiKeys.ADD_PARTITIONS_TO_TXN, addPartitionsToTxnRequest, capturedResponse)
.asInstanceOf[AddPartitionsToTxnResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddOffsetToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddOffsetsToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddPartitionsToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleTxnOffsetCommitRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleEndTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleEndTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleWriteTxnMarkersRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleWriteTxnMarkersRequest(null)
}
@Test
def shouldRespondWithUnsupportedForMessageFormatOnHandleWriteTxnMarkersWhenMagicLowerThanRequired(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(None)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnsupportedMessageFormatForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject(),
EasyMock.anyObject())).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))
}
})
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(None)
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject(),
EasyMock.anyObject())).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))
}
})
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val request = createWriteTxnMarkersRequest(asList(topicPartition))._2
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject()))
EasyMock.replay(replicaManager)
createKafkaApis().handleWriteTxnMarkersRequest(request)
EasyMock.verify(replicaManager)
}
@Test
def testLeaderReplicaIfLocalRaisesFencedLeaderEpoch(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.FENCED_LEADER_EPOCH)
}
@Test
def testLeaderReplicaIfLocalRaisesUnknownLeaderEpoch(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.UNKNOWN_LEADER_EPOCH)
}
@Test
def testLeaderReplicaIfLocalRaisesNotLeaderForPartition(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.NOT_LEADER_FOR_PARTITION)
}
@Test
def testLeaderReplicaIfLocalRaisesUnknownTopicOrPartition(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.UNKNOWN_TOPIC_OR_PARTITION)
}
private def testListOffsetFailedGetLeaderReplica(error: Errors): Unit = {
val tp = new TopicPartition("foo", 0)
val isolationLevel = IsolationLevel.READ_UNCOMMITTED
val currentLeaderEpoch = Optional.of[Integer](15)
EasyMock.expect(replicaManager.fetchOffsetForTimestamp(
EasyMock.eq(tp),
EasyMock.eq(ListOffsetRequest.EARLIEST_TIMESTAMP),
EasyMock.eq(Some(isolationLevel)),
EasyMock.eq(currentLeaderEpoch),
fetchOnlyFromLeader = EasyMock.eq(true))
).andThrow(error.exception)
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
val targetTimes = Map(tp -> new ListOffsetRequest.PartitionData(ListOffsetRequest.EARLIEST_TIMESTAMP,
currentLeaderEpoch))
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(targetTimes.asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse)
.asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(error, partitionData.error)
assertEquals(ListOffsetResponse.UNKNOWN_OFFSET, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
@Test
def testReadUncommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_COMMITTED)
}
/**
* Verifies that the metadata response is correct if the broker listeners are inconsistent (i.e. one broker has
* more listeners than another) and the request is sent on the listener that exists in both brokers.
*/
@Test
def testMetadataRequestOnSharedListenerWithInconsistentListenersAcrossBrokers(): Unit = {
val (plaintextListener, _) = updateMetadataCacheWithInconsistentListeners()
val response = sendMetadataRequestWithInconsistentListeners(plaintextListener)
assertEquals(Set(0, 1), response.brokers.asScala.map(_.id).toSet)
}
/*
* Verifies that the metadata response is correct if the broker listeners are inconsistent (i.e. one broker has
* more listeners than another) and the request is sent on the listener that exists in one broker.
*/
@Test
def testMetadataRequestOnDistinctListenerWithInconsistentListenersAcrossBrokers(): Unit = {
val (_, anotherListener) = updateMetadataCacheWithInconsistentListeners()
val response = sendMetadataRequestWithInconsistentListeners(anotherListener)
assertEquals(Set(0), response.brokers.asScala.map(_.id).toSet)
}
/**
* Verifies that sending a fetch request with version 9 works correctly when
* ReplicaManager.getLogConfig returns None.
*/
@Test
def testFetchRequestV9WithNoLogConfig(): Unit = {
val tp = new TopicPartition("foo", 0)
setupBasicMetadataCache(tp.topic, numPartitions = 1)
val hw = 3
val timestamp = 1000
expect(replicaManager.getLogConfig(EasyMock.eq(tp))).andReturn(None)
replicaManager.fetchMessages(anyLong, anyInt, anyInt, anyInt, anyBoolean,
anyObject[Seq[(TopicPartition, FetchRequest.PartitionData)]], anyObject[ReplicaQuota],
anyObject[Seq[(TopicPartition, FetchPartitionData)] => Unit](), anyObject[IsolationLevel])
expectLastCall[Unit].andAnswer(new IAnswer[Unit] {
def answer: Unit = {
val callback = getCurrentArguments.apply(7).asInstanceOf[(Seq[(TopicPartition, FetchPartitionData)] => Unit)]
val records = MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(timestamp, "foo".getBytes(StandardCharsets.UTF_8)))
callback(Seq(tp -> new FetchPartitionData(Errors.NONE, hw, 0, records,
None, None)))
}
})
val fetchData = Map(tp -> new FetchRequest.PartitionData(0, 0, 1000,
Optional.empty())).asJava
val fetchMetadata = new JFetchMetadata(0, 0)
val fetchContext = new FullFetchContext(time, new FetchSessionCache(1000, 100),
fetchMetadata, fetchData, false)
expect(fetchManager.newContext(anyObject[JFetchMetadata],
anyObject[util.Map[TopicPartition, FetchRequest.PartitionData]],
anyObject[util.List[TopicPartition]],
anyBoolean)).andReturn(fetchContext)
val capturedResponse = expectNoThrottling()
EasyMock.expect(clientQuotaManager.maybeRecordAndGetThrottleTimeMs(
anyObject[RequestChannel.Request](), anyDouble, anyLong)).andReturn(0)
EasyMock.replay(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, fetchManager)
val builder = new FetchRequest.Builder(9, 9, -1, 100, 0, fetchData)
val (fetchRequest, request) = buildRequest(builder)
createKafkaApis().handleFetchRequest(request)
val response = readResponse(ApiKeys.FETCH, fetchRequest, capturedResponse)
.asInstanceOf[FetchResponse[BaseRecords]]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(hw, partitionData.highWatermark)
assertEquals(-1, partitionData.lastStableOffset)
assertEquals(0, partitionData.logStartOffset)
assertEquals(timestamp,
partitionData.records.asInstanceOf[MemoryRecords].batches.iterator.next.maxTimestamp)
assertNull(partitionData.abortedTransactions)
}
/**
* Return pair of listener names in the metadataCache: PLAINTEXT and LISTENER2 respectively.
*/
private def updateMetadataCacheWithInconsistentListeners(): (ListenerName, ListenerName) = {
val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val anotherListener = new ListenerName("LISTENER2")
val brokers = Set(
new Broker(0, Seq(new EndPoint("broker0", 9092, SecurityProtocol.PLAINTEXT, plaintextListener),
new EndPoint("broker0", 9093, SecurityProtocol.PLAINTEXT, anotherListener)).asJava, "rack"),
new Broker(1, Seq(new EndPoint("broker1", 9092, SecurityProtocol.PLAINTEXT, plaintextListener)).asJava,
"rack")
)
val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0,
0, 0, Map.empty[TopicPartition, UpdateMetadataRequest.PartitionState].asJava, brokers.asJava).build()
metadataCache.updateMetadata(correlationId = 0, updateMetadataRequest)
(plaintextListener, anotherListener)
}
private def sendMetadataRequestWithInconsistentListeners(requestListener: ListenerName): MetadataResponse = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val (metadataRequest, requestChannelRequest) = buildRequest(MetadataRequest.Builder.allTopics, requestListener)
createKafkaApis().handleTopicMetadataRequest(requestChannelRequest)
readResponse(ApiKeys.METADATA, metadataRequest, capturedResponse).asInstanceOf[MetadataResponse]
}
private def testConsumerListOffsetLatest(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val latestOffset = 15L
val currentLeaderEpoch = Optional.empty[Integer]()
EasyMock.expect(replicaManager.fetchOffsetForTimestamp(
EasyMock.eq(tp),
EasyMock.eq(ListOffsetRequest.LATEST_TIMESTAMP),
EasyMock.eq(Some(isolationLevel)),
EasyMock.eq(currentLeaderEpoch),
fetchOnlyFromLeader = EasyMock.eq(true))
).andReturn(Some(new TimestampAndOffset(ListOffsetResponse.UNKNOWN_TIMESTAMP, latestOffset, currentLeaderEpoch)))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
val targetTimes = Map(tp -> new ListOffsetRequest.PartitionData(ListOffsetRequest.LATEST_TIMESTAMP,
currentLeaderEpoch))
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(targetTimes.asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(latestOffset, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = {
val requestBuilder = new WriteTxnMarkersRequest.Builder(asList(
new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions)))
buildRequest(requestBuilder)
}
private def buildRequest[T <: AbstractRequest](builder: AbstractRequest.Builder[T],
listenerName: ListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)): (T, RequestChannel.Request) = {
val request = builder.build()
val buffer = request.serialize(new RequestHeader(builder.apiKey, request.version, "", 0))
// read the header from the buffer first so that the body can be read next from the Request constructor
val header = RequestHeader.parse(buffer)
val context = new RequestContext(header, "1", InetAddress.getLocalHost, KafkaPrincipal.ANONYMOUS,
listenerName, SecurityProtocol.PLAINTEXT)
(request, new RequestChannel.Request(processor = 1, context = context, startTimeNanos = 0, MemoryPool.NONE, buffer,
requestChannelMetrics))
}
private def readResponse(api: ApiKeys, request: AbstractRequest, capturedResponse: Capture[RequestChannel.Response]): AbstractResponse = {
val response = capturedResponse.getValue
assertTrue(s"Unexpected response type: ${response.getClass}", response.isInstanceOf[SendResponse])
val sendResponse = response.asInstanceOf[SendResponse]
val send = sendResponse.responseSend
val channel = new ByteBufferChannel(send.size)
send.writeTo(channel)
channel.close()
channel.buffer.getInt() // read the size
ResponseHeader.parse(channel.buffer)
val struct = api.responseSchema(request.version).read(channel.buffer)
AbstractResponse.parseResponse(api, struct, request.version)
}
private def expectNoThrottling(): Capture[RequestChannel.Response] = {
EasyMock.expect(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(EasyMock.anyObject[RequestChannel.Request]()))
.andReturn(0)
EasyMock.expect(clientRequestQuotaManager.throttle(EasyMock.anyObject[RequestChannel.Request](), EasyMock.eq(0),
EasyMock.anyObject[RequestChannel.Response => Unit]()))
val capturedResponse = EasyMock.newCapture[RequestChannel.Response]()
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
capturedResponse
}
private def setupBasicMetadataCache(topic: String, numPartitions: Int): Unit = {
val replicas = List(0.asInstanceOf[Integer]).asJava
val partitionState = new UpdateMetadataRequest.PartitionState(1, 0, 1, replicas, 0, replicas, Collections.emptyList())
val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val broker = new Broker(0, Seq(new EndPoint("broker0", 9092, SecurityProtocol.PLAINTEXT, plaintextListener)).asJava, "rack")
val partitions = (0 until numPartitions).map(new TopicPartition(topic, _) -> partitionState).toMap
val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0,
0, 0, partitions.asJava, Set(broker).asJava).build()
metadataCache.updateMetadata(correlationId = 0, updateMetadataRequest)
}
}
| gf53520/kafka | core/src/test/scala/unit/kafka/server/KafkaApisTest.scala | Scala | apache-2.0 | 28,359 |
/* ## Language map */
package laughedelic.literator.lib
case class Comment(start: String, end: String, line: String)
case class Language(syntax: String, ext: String, comment: Comment)
object LanguageMap {
def clike = Comment("/*", "*/", "//")
val langs = List(
Language("c", "c", clike)
, Language("cpp", "cpp", clike)
, Language("csharp", "cs", clike)
, Language("objc", "m", clike)
, Language("java", "java", clike)
, Language("javascript", "js", clike)
, Language("scala", "scala", clike)
, Language("php", "php", clike)
, Language("haskell", "hs", Comment("{-", "-}", "--"))
, Language("clojure", "clj", Comment("(comment ", ")", ";"))
, Language("applescript", "applescript", Comment("(*", "*)", "--"))
, Language("pascal", "pas", Comment("(*", "*)", "//"))
)
val langMap: Map[String, Language] = Map(langs map { l => (l.ext, l) }: _*)
}
| laughedelic/literator | src/main/scala/lib/LanguageMap.scala | Scala | agpl-3.0 | 937 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import events._
import SharedHelpers._
class TopLevelSuite extends Suite with OneInstancePerTest {
import TopLevelSuite.sideEffectWasNotSeen
var sideEffectWasIsolated = true
def testOne() {
sideEffectWasNotSeen = sideEffectWasNotSeen && sideEffectWasIsolated
sideEffectWasIsolated = false
}
def testTwo() {
sideEffectWasNotSeen = sideEffectWasNotSeen && sideEffectWasIsolated
sideEffectWasIsolated = false
}
def testThree() {
sideEffectWasNotSeen = sideEffectWasNotSeen && sideEffectWasIsolated
sideEffectWasIsolated = false
}
}
object TopLevelSuite {
var sideEffectWasNotSeen = true
}
class OneInstancePerTestSpec extends FunSpec {
describe("The OneInstancePerTest trait") {
it("should isolate side effects from one test to the next in a top level Suite class that does not override newInstance") {
var sideEffectWasNotSeen = true
class MySuite extends Suite with OneInstancePerTest {
var sideEffectWasIsolated = true
def testOne() {
sideEffectWasNotSeen = sideEffectWasNotSeen && sideEffectWasIsolated
sideEffectWasIsolated = false
}
def testTwo() {
sideEffectWasNotSeen = sideEffectWasNotSeen && sideEffectWasIsolated
sideEffectWasIsolated = false
}
def testThree() {
sideEffectWasNotSeen = sideEffectWasNotSeen && sideEffectWasIsolated
sideEffectWasIsolated = false
}
override def newInstance = new MySuite
}
val suite = new MySuite
suite.run(None, Args(SilentReporter))
assert(sideEffectWasNotSeen)
}
it("should isolate side effects from one test to the next in an inner Suite class that overrides newInstance") {
val suite = new TopLevelSuite
suite.run(None, Args(SilentReporter))
assert(TopLevelSuite.sideEffectWasNotSeen)
}
it("should send TestIgnored for an ignored test") {
var aTheTestThisCalled = false
var aTheTestThatCalled = false
class ASpec extends WordSpec with OneInstancePerTest {
"test this" in { aTheTestThisCalled = true }
"test that" in { aTheTestThatCalled = true }
override def newInstance = new ASpec
}
val a = new ASpec
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(aTheTestThisCalled)
assert(aTheTestThatCalled)
var bTheTestThisCalled = false
var bTheTestThatCalled = false
class BSpec extends WordSpec with OneInstancePerTest {
"test this" ignore { bTheTestThisCalled = true }
"test that" in { bTheTestThatCalled = true }
override def newInstance = new BSpec
}
val b = new BSpec
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB))
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!bTheTestThisCalled)
assert(bTheTestThatCalled)
var cTheTestThisCalled = false
var cTheTestThatCalled = false
class CSpec extends WordSpec with OneInstancePerTest {
"test this" in { cTheTestThisCalled = true }
"test that" ignore { cTheTestThatCalled = true }
override def newInstance = new CSpec
}
val c = new CSpec
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repC))
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(cTheTestThisCalled)
assert(!cTheTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
var dTheTestThisCalled = false
var dTheTestThatCalled = false
class DSpec extends WordSpec with OneInstancePerTest {
"test this" ignore { dTheTestThisCalled = true }
"test that" ignore { dTheTestThatCalled = true }
override def newInstance = new DSpec
}
val d = new DSpec
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD))
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!dTheTestThisCalled)
assert(!dTheTestThatCalled)
}
it("should ignore a test marked as ignored if it is passed in a Some as testName") {
var bTheTestThisCalled = false
var bTheTestThatCalled = false
class BSpec extends WordSpec with OneInstancePerTest {
"test this" ignore { bTheTestThisCalled = true }
"test that" in { bTheTestThatCalled = true }
override def newInstance = new BSpec
}
val b = new BSpec
val repB = new TestIgnoredTrackingReporter
b.run(Some("test this"), Args(repB))
assert(repB.testIgnoredReceived)
assert(!bTheTestThisCalled)
assert(!bTheTestThatCalled)
}
it("should throw IllegalArgumentException from runTests if runTestInNewInstance is set but testName is empty") {
class ASpec extends WordSpec with OneInstancePerTest {
"test this" ignore { }
"test that" in { }
override def newInstance = new ASpec
def invokeRunTests() {
this.runTests(None, Args(SilentReporter, runTestInNewInstance = true))
}
}
val aSpec = new ASpec
intercept[IllegalArgumentException] {
aSpec.invokeRunTests()
}
}
it("should only execute nested suites in outer instance") {
class InnerSuite extends FunSuite {
test("hi") { info("hi info") }
}
class OuterSuite extends FunSuite with OneInstancePerTest {
override def nestedSuites = Vector(new InnerSuite)
test("outer 1") { info("outer 1 info") }
test("outer 2") { info("outer 2 info") }
override def newInstance = new OuterSuite
}
val rep = new EventRecordingReporter
val outer = new OuterSuite
outer.run(None, Args(rep))
assert(rep.testStartingEventsReceived.size === 3)
val testSucceededEvents = rep.testSucceededEventsReceived
assert(testSucceededEvents.size === 3)
testSucceededEvents.foreach { e =>
e.testName match {
case "hi" =>
assert(e.recordedEvents.size === 1)
assert(e.recordedEvents(0).asInstanceOf[InfoProvided].message === "hi info")
case "outer 1" =>
assert(e.recordedEvents.size === 1)
assert(e.recordedEvents(0).asInstanceOf[InfoProvided].message === "outer 1 info")
case "outer 2" =>
assert(e.recordedEvents.size === 1)
assert(e.recordedEvents(0).asInstanceOf[InfoProvided].message === "outer 2 info")
case other =>
fail("Unexpected TestSucceeded event: " + other)
}
}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/OneInstancePerTestSpec.scala | Scala | apache-2.0 | 7,717 |
package scala.reflect.quasiquotes
import org.scalacheck._, Prop._, Gen._, Arbitrary._
import scala.reflect.runtime.universe._, Flag._
object TermConstructionProps extends QuasiquoteProperties("term construction") {
property("unquote single tree return tree itself") = forAll { (t: Tree) =>
q"$t" β t
}
property("unquote trees into if expression") = forAll { (t1: Tree, t2: Tree, t3: Tree) =>
q"if($t1) $t2 else $t3" β If(t1, t2, t3)
}
property("unquote trees into ascriptiopn") = forAll { (t1: Tree, t2: Tree) =>
q"$t1 : $t2" β Typed(t1, t2)
}
property("unquote trees into apply") = forAll { (t1: Tree, t2: Tree, t3: Tree) =>
q"$t1($t2, $t3)" β Apply(t1, List(t2, t3))
}
property("unquote trees with .. rank into apply") = forAll { (ts: List[Tree]) =>
q"f(..$ts)" β Apply(q"f", ts)
}
property("unquote iterable into apply") = forAll { (trees: List[Tree]) =>
val itrees: Iterable[Tree] = trees
q"f(..$itrees)" β Apply(q"f", trees)
}
property("unquote trees with ... rank into apply") = forAll { (ts1: List[Tree], ts2: List[Tree]) =>
val argss = List(ts1, ts2)
q"f(...$argss)" β Apply(Apply(q"f", ts1), ts2)
}
property("unquote term name into assign") = forAll { (name: TermName, t: Tree) =>
q"$name = $t" β Assign(Ident(name), t)
}
property("unquote trees into block") = forAll { (t1: Tree, t2: Tree, t3: Tree) =>
blockInvariant(q"""{
$t1
$t2
$t3
}""", List(t1, t2, t3))
}
property("unquote tree into new") = forAll { (tree: Tree) =>
q"new $tree" β Apply(Select(New(tree), termNames.CONSTRUCTOR), List())
}
property("unquote tree into return") = forAll { (tree: Tree) =>
q"return $tree" β Return(tree)
}
property("unquote a list of arguments") = forAll { (fun: Tree, args: List[Tree]) =>
q"$fun(..$args)" β Apply(fun, args)
}
property("unquote list and non-list fun arguments") = forAll { (fun: Tree, arg1: Tree, arg2: Tree, args: List[Tree]) =>
q"$fun(..$args, $arg1, $arg2)" β Apply(fun, args ++ List(arg1) ++ List(arg2)) &&
q"$fun($arg1, ..$args, $arg2)" β Apply(fun, List(arg1) ++ args ++ List(arg2)) &&
q"$fun($arg1, $arg2, ..$args)" β Apply(fun, List(arg1) ++ List(arg2) ++ args)
}
property("unquote into new") = forAll { (name: TypeName, body: List[Tree]) =>
q"new $name { ..$body }" β
q"""{
final class $$anon extends $name {
..$body
}
new $$anon
}"""
}
property("unquote type name into this") = forAll { (T: TypeName) =>
q"$T.this" β This(T)
}
property("unquote tree into throw") = forAll { (t: Tree) =>
q"throw $t" β Throw(t)
}
property("unquote trees into type apply") = forAll { (fun: TreeIsTerm, types: List[Tree]) =>
q"$fun[..$types]" β (if (types.nonEmpty) TypeApply(fun, types) else fun)
}
property("unquote trees into while loop") = forAll { (cond: Tree, body: Tree) =>
val LabelDef(_, List(), If(cond1, Block(List(body1), Apply(_, List())), Literal(Constant(())))) = q"while($cond) $body"
body1 β body && cond1 β cond
}
property("unquote trees into do while loop") = forAll { (cond: Tree, body: Tree) =>
val LabelDef(_, List(), Block(List(body1), If(cond1, Apply(_, List()), Literal(Constant(()))))) = q"do $body while($cond)"
body1 β body && cond1 β cond
}
def blockInvariant(quote: Tree, trees: List[Tree]) =
quote β (trees match {
case Nil => q"{}"
case _ :+ last if !last.isTerm => Block(trees, q"()")
case head :: Nil => head
case init :+ last => Block(init, last)
})
property("unquote list of trees into block (1)") = forAll { (trees: List[Tree]) =>
blockInvariant(q"{ ..$trees }", trees)
}
property("unquote list of trees into block (2)") = forAll { (trees1: List[Tree], trees2: List[Tree]) =>
blockInvariant(q"{ ..$trees1 ; ..$trees2 }", trees1 ++ trees2)
}
property("unquote list of trees into block (3)") = forAll { (trees: List[Tree], tree: Tree) =>
blockInvariant(q"{ ..$trees; $tree }", trees :+ tree)
}
property("unquote term into brackets") = test {
val a = q"a"
assert(q"($a)" β a)
}
property("unquote terms into tuple") = test {
val a1 = q"a1"
val a2 = q"a2"
val as = List(a1, a2)
assert(q"(..$as)" β q"scala.Tuple2($a1, $a2)")
assert(q"(a0, ..$as)" β q"scala.Tuple3(a0, $a1, $a2)")
}
property("unquote empty list into tuple") = test {
val empty = List[Tree]()
assert(q"(..$empty)" β q"()")
}
property("unquote single element list into tuple") = test {
val xs = q"x" :: Nil
assert(q"(..$xs)" β xs.head)
}
property("function param flags are the same") = test {
val xy = q"val x: A" :: q"val y: B" :: Nil
assertEqAst(q"(..$xy) => x + y", "(x: A, y: B) => x + y")
}
property("anonymous functions don't support default values") = test {
val x = q"val x: Int = 1"
assertThrows[IllegalArgumentException] { q"($x) => x" }
}
property("assign variable") = test {
val v = q"v"
val value = q"foo"
assertEqAst(q"$v = $value", "v = foo")
}
property("assign update 1") = test {
val v = q"v"
val args = q"1" :: q"2" :: Nil
val value = q"foo"
assertEqAst(q"$v(..$args) = $value", "v(1, 2) = foo")
}
property("assign update 2") = test {
val a = q"v(0)"
val value = q"foo"
assertEqAst(q"$a = $value", "v(0) = foo")
}
property("assign or named arg") = test {
val assignx = q"x = 1"
assertEqAst(q"f($assignx)", "f(x = 1)")
}
property("fresh names are regenerated at each evaluation") = test {
def plusOne = q"{ _ + 1 }"
assert(!plusOne.equalsStructure(plusOne))
def whileTrue = q"while(true) false"
assert(!whileTrue.equalsStructure(whileTrue))
def withEvidence = q"def foo[T: X]"
assert(!withEvidence.equalsStructure(withEvidence))
}
property("make sure inference doesn't infer any") = test {
val l1 = List(q"foo")
val l2 = List(q"bar")
val baz = q"baz"
assert(q"f(..${l1 ++ l2})" β q"f(foo, bar)")
assert(q"f(..${l1 ++ l2}, $baz)" β q"f(foo, bar, baz)")
assert(q"f(${if (true) q"a" else q"b"})" β q"f(a)")
}
property("unquote iterable of non-parametric type") = test {
object O extends Iterable[Tree] { def iterator = List(q"foo").iterator }
q"f(..$O)"
}
property("SI-8016") = test {
val xs = q"1" :: q"2" :: Nil
assertEqAst(q"..$xs", "{1; 2}")
assertEqAst(q"{..$xs}", "{1; 2}")
}
property("SI-6842") = test {
val cases: List[Tree] = cq"a => b" :: cq"_ => c" :: Nil
assertEqAst(q"1 match { case ..$cases }", "1 match { case a => b case _ => c }")
assertEqAst(q"try 1 catch { case ..$cases }", "try 1 catch { case a => b case _ => c }")
}
property("SI-8009") = test {
q"`foo`".asInstanceOf[reflect.internal.SymbolTable#Ident].isBackquoted
}
property("SI-8148") = test {
val q"($a, $b) => $_" = q"_ + _"
assert(a.name != b.name)
}
property("SI-7275 a") = test {
val t = q"stat1; stat2"
assertEqAst(q"..$t", "{stat1; stat2}")
}
property("SI-7275 b") = test {
def f(t: Tree) = q"..$t"
assertEqAst(f(q"stat1; stat2"), "{stat1; stat2}")
}
property("SI-7275 c1") = test {
object O
implicit val liftO = Liftable[O.type] { _ => q"foo; bar" }
assertEqAst(q"f(..$O)", "f(foo, bar)")
}
property("SI-7275 c2") = test {
object O
implicit val liftO = Liftable[O.type] { _ => q"{ foo; bar }; { baz; bax }" }
assertEqAst(q"f(...$O)", "f(foo, bar)(baz, bax)")
}
property("SI-7275 d") = test {
val l = q"a; b" :: q"c; d" :: Nil
assertEqAst(q"f(...$l)", "f(a, b)(c, d)")
val l2: Iterable[Tree] = l
assertEqAst(q"f(...$l2)", "f(a, b)(c, d)")
}
property("SI-7275 e") = test {
val t = q"{ a; b }; { c; d }"
assertEqAst(q"f(...$t)", "f(a, b)(c, d)")
}
property("SI-7275 e2") = test {
val t = q"{ a; b }; c; d"
assertEqAst(q"f(...$t)", "f(a, b)(c)(d)")
}
property("remove synthetic unit") = test {
val q"{ ..$stats1 }" = q"{ def x = 2 }"
assert(stats1 β List(q"def x = 2"))
val q"{ ..$stats2 }" = q"{ class X }"
assert(stats2 β List(q"class X"))
val q"{ ..$stats3 }" = q"{ type X = Int }"
assert(stats3 β List(q"type X = Int"))
val q"{ ..$stats4 }" = q"{ val x = 2 }"
assert(stats4 β List(q"val x = 2"))
}
property("don't remove user-defined unit") = test {
val q"{ ..$stats }" = q"{ def x = 2; () }"
assert(stats β List(q"def x = 2", q"()"))
}
property("empty-tree is not a block") = test {
assertThrows[MatchError] {
val q"{ ..$stats1 }" = q" "
}
}
property("empty block is synthetic unit") = test {
val q"()" = q"{}"
val q"{..$stats}" = q"{}"
assert(stats.isEmpty)
assertEqAst(q"{..$stats}", "{}")
assertEqAst(q"{..$stats}", "()")
}
property("consistent variable order") = test {
val q"$a = $b = $c = $d = $e = $f = $g = $h = $k = $l" = q"a = b = c = d = e = f = g = h = k = l"
assert(a β q"a" && b β q"b" && c β q"c" && d β q"d" && e β q"e" && g β q"g" && h β q"h" && k β q"k" && l β q"l")
}
property("SI-8385 a") = test {
assertEqAst(q"(foo.x = 1)(2)", "(foo.x = 1)(2)")
}
property("SI-8385 b") = test {
assertEqAst(q"(() => ())()", "(() => ())()")
}
property("match scrutinee may not be empty") = test {
assertThrows[IllegalArgumentException] {
val scrutinee = q""
val cases = List(cq"_ =>")
q"$scrutinee match { case ..$cases }"
}
}
property("construct partial function") = test {
val cases = List(cq"a => b", cq"c => d")
assertEqAst(q"{ case ..$cases }", "{ case a => b case c => d }")
}
property("SI-8609 a") = test {
val q1 = q"val x = 1"
val q2 = q"..$q1; val y = 2"
assert(q2 β q"{ val x = 1; val y = 2 }")
}
property("SI-8609 b") = test {
val q1 = q"import foo.bar"
val q2 = q"..$q1; val y = 2"
assert(q2 β q"{ import foo.bar; val y = 2 }")
}
}
| shimib/scala | test/scalacheck/scala/reflect/quasiquotes/TermConstructionProps.scala | Scala | bsd-3-clause | 10,065 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import java.io.File
import java.util.Arrays
import kafka.common.KafkaException
import kafka.server._
import kafka.utils.TestUtils
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.junit.{After, Before}
import scala.collection.mutable.{ArrayBuffer, Buffer}
import java.util.Properties
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.utils.Time
/**
* A test harness that brings up some number of broker nodes
*/
abstract class KafkaServerTestHarness extends ZooKeeperTestHarness {
var instanceConfigs: Seq[KafkaConfig] = null
var servers: Buffer[KafkaServer] = new ArrayBuffer
var brokerList: String = null
var alive: Array[Boolean] = null
val kafkaPrincipalType = KafkaPrincipal.USER_TYPE
/**
* Implementations must override this method to return a set of KafkaConfigs. This method will be invoked for every
* test and should not reuse previous configurations unless they select their ports randomly when servers are started.
*/
def generateConfigs: Seq[KafkaConfig]
/**
* Override this in case ACLs or security credentials must be set before `servers` are started.
*
* This is required in some cases because of the topic creation in the setup of `IntegrationTestHarness`. If the ACLs
* are only set later, tests may fail. The failure could manifest itself as a cluster action
* authorization exception when processing an update metadata request (controller -> broker) or in more obscure
* ways (e.g. __consumer_offsets topic replication fails because the metadata cache has no brokers as a previous
* update metadata request failed due to an authorization exception).
*
* The default implementation of this method is a no-op.
*/
def configureSecurityBeforeServersStart() {}
/**
* Override this in case Tokens or security credentials needs to be created after `servers` are started.
* The default implementation of this method is a no-op.
*/
def configureSecurityAfterServersStart() {}
def configs: Seq[KafkaConfig] = {
if (instanceConfigs == null)
instanceConfigs = generateConfigs
instanceConfigs
}
def serverForId(id: Int): Option[KafkaServer] = servers.find(s => s.config.brokerId == id)
def boundPort(server: KafkaServer): Int = server.boundPort(listenerName)
protected def securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT
protected def listenerName: ListenerName = ListenerName.forSecurityProtocol(securityProtocol)
protected def trustStoreFile: Option[File] = None
protected def serverSaslProperties: Option[Properties] = None
protected def clientSaslProperties: Option[Properties] = None
protected def brokerTime(brokerId: Int): Time = Time.SYSTEM
@Before
override def setUp() {
super.setUp()
if (configs.isEmpty)
throw new KafkaException("Must supply at least one server config.")
// default implementation is a no-op, it is overridden by subclasses if required
configureSecurityBeforeServersStart()
// Add each broker to `servers` buffer as soon as it is created to ensure that brokers
// are shutdown cleanly in tearDown even if a subsequent broker fails to start
for (config <- configs)
servers += TestUtils.createServer(config, time = brokerTime(config.brokerId))
brokerList = TestUtils.bootstrapServers(servers, listenerName)
alive = new Array[Boolean](servers.length)
Arrays.fill(alive, true)
// default implementation is a no-op, it is overridden by subclasses if required
configureSecurityAfterServersStart()
}
@After
override def tearDown() {
if (servers != null) {
TestUtils.shutdownServers(servers)
}
super.tearDown
}
/**
* Create a topic in ZooKeeper.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(topic: String, numPartitions: Int = 1, replicationFactor: Int = 1,
topicConfig: Properties = new Properties): scala.collection.immutable.Map[Int, Int] =
TestUtils.createTopic(zkClient, topic, numPartitions, replicationFactor, servers, topicConfig)
/**
* Create a topic in ZooKeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]]): scala.collection.immutable.Map[Int, Int] =
TestUtils.createTopic(zkClient, topic, partitionReplicaAssignment, servers)
/**
* Pick a broker at random and kill it if it isn't already dead
* Return the id of the broker killed
*/
def killRandomBroker(): Int = {
val index = TestUtils.random.nextInt(servers.length)
killBroker(index)
index
}
def killBroker(index: Int) {
if(alive(index)) {
servers(index).shutdown()
servers(index).awaitShutdown()
alive(index) = false
}
}
/**
* Restart any dead brokers
*/
def restartDeadBrokers() {
for(i <- servers.indices if !alive(i)) {
servers(i).startup()
alive(i) = true
}
}
}
| Ishiihara/kafka | core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala | Scala | apache-2.0 | 6,085 |
package integration.models
import models.{MongoSugar, TVChannelProviderRepository, TVChannelProvider}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Span, Seconds}
import org.scalatest.{BeforeAndAfterAll, MustMatchers}
import org.scalatestplus.play.PlaySpec
import play.api.libs.iteratee.Enumerator
import reactivemongo.bson.BSONObjectID
class TVChannelProviderRepositoryIntSpec extends PlaySpec with MustMatchers with BeforeAndAfterAll with ScalaFutures with MongoSugar {
val tvChannelProvider1 = TVChannelProvider("FREEVIEW", Some(BSONObjectID.generate))
val tvChannelProvider2 = TVChannelProvider("SKY", Some(BSONObjectID.generate))
val tvChannelProvider3 = TVChannelProvider("TERRESTRIAL", Some(BSONObjectID.generate))
val tvChannelProvider4 = TVChannelProvider("PROVIDER", Some(BSONObjectID.generate))
implicit val defaultPatience =
PatienceConfig(timeout = Span(3, Seconds), interval = Span(5, Millis))
val tvChannelProviderRepository: TVChannelProviderRepository = new TVChannelProviderRepository(this.getClass.getCanonicalName)
override def beforeAll {
whenReady(tvChannelProviderRepository.removeAll()){
ok => println(s"Before - collection ${this.getClass.getCanonicalName} removed: $ok")
}
whenReady(tvChannelProviderRepository.insertBulk(
Enumerator(
tvChannelProvider1,
tvChannelProvider2,
tvChannelProvider3,
tvChannelProvider4))) {
response => response mustBe 4
}
}
override def afterAll {
whenReady(tvChannelProviderRepository.drop){
ok => println(s"After - collection ${this.getClass.getCanonicalName} dropped: $ok")
}
}
"findAll" should {
"return all the providers for the tv channels order alphabetically" in {
whenReady(tvChannelProviderRepository.findAll()) {
_ mustBe Seq(tvChannelProvider1, tvChannelProvider4, tvChannelProvider2, tvChannelProvider3)
}
}
}
}
| tvlive/tv-api | test/integration/models/TVChannelProviderRepositoryIntSpec.scala | Scala | apache-2.0 | 1,962 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.transport.netty.sapi
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import io.netty.bootstrap.Bootstrap
import io.netty.channel._
import io.netty.channel.group.DefaultChannelGroup
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.handler.codec.LengthFieldBasedFrameDecoder
import io.netty.handler.timeout.WriteTimeoutHandler
import io.netty.util.concurrent.GlobalEventExecutor
import io.rdbc.ImmutSeq
import io.rdbc.implbase.ConnectionFactoryPartialImpl
import io.rdbc.pgsql.core.auth.Authenticator
import io.rdbc.pgsql.core.config.sapi.{PgConnFactoryConfig, StmtCacheConfig}
import io.rdbc.pgsql.core.exception.{PgDriverInternalErrorException, PgUncategorizedException}
import io.rdbc.pgsql.core.internal.protocol.codec.{MessageDecoderFactory, MessageEncoderFactory}
import io.rdbc.pgsql.core.internal.protocol.messages.backend.BackendKeyData
import io.rdbc.pgsql.core.internal.protocol.messages.frontend.{CancelRequest, Terminate}
import io.rdbc.pgsql.core.typeconv._
import io.rdbc.pgsql.core.types._
import io.rdbc.pgsql.core._
import io.rdbc.pgsql.transport.netty.sapi.internal._
import io.rdbc.pgsql.transport.netty.sapi.internal.Compat._
import io.rdbc.sapi.Timeout
import io.rdbc.sapi.exceptions.RdbcException
import io.rdbc.util.Logging
import io.rdbc.util.scheduler.JdkScheduler
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
class NettyPgConnectionFactory protected(val nettyConfig: NettyPgConnectionFactory.Config)
extends ConnectionFactoryPartialImpl
with PgConnectionFactory
with Logging {
val pgConfig: PgConnFactoryConfig = nettyConfig.pgConfig
protected implicit val ec: ExecutionContext = pgConfig.ec
private[this] val scheduler = {
new JdkScheduler(nettyConfig.eventLoopGroup)
}
private[this] val openChannels = {
new DefaultChannelGroup(GlobalEventExecutor.INSTANCE) //TODO really global?
}
private[this] val shutDown = new AtomicBoolean(false)
private class ConnChannelInitializer extends ChannelInitializer[Channel] {
@volatile private var _maybeConn = Option.empty[NettyPgConnection]
def maybeConn: Option[NettyPgConnection] = _maybeConn
def initChannel(ch: Channel): Unit = {
val decoderHandler = new PgMsgDecoderHandler(MessageDecoderFactory.default)
val encoderHandler = new PgMsgEncoderHandler(MessageEncoderFactory.default)
ch.pipeline().addLast(framingHandler)
ch.pipeline().addLast(decoderHandler)
ch.pipeline().addLast(encoderHandler)
if (pgConfig.writeTimeout.value.isFinite()) {
ch.pipeline().addLast(new WriteTimeoutHandler(pgConfig.writeTimeout.value.toSeconds.toInt))
}
val conn = pgConnection(ch, decoderHandler, encoderHandler)
ch.pipeline().addLast(conn.handler)
_maybeConn = Some(conn)
}
override def channelActive(ctx: ChannelHandlerContext): Unit = {
openChannels.add(ctx.channel())
super.channelActive(ctx)
}
}
def connection()(implicit timeout: Timeout): Future[AbstractPgConnection] = traced {
if (!shutDown.get()) {
val initializer = new ConnChannelInitializer
baseBootstrap(Some(timeout))
.handler(initializer)
.connect().scalaFut
.flatMap { _ =>
initializer.maybeConn.map(Future.successful).getOrElse(Future.failed(new PgDriverInternalErrorException(
"Channel initializer did not create a connection instance"
))).flatMap { conn =>
conn.init(pgConfig.dbName, pgConfig.authenticator).map(_ => conn)
}
}
.recoverWith {
case ex: RdbcException => Future.failed(ex)
case NonFatal(ex) =>
Future.failed(new PgUncategorizedException(ex.getMessage, ex))
}
} else {
Future.failed(new PgUncategorizedException("The factory is shut down"))
}
}
def shutdown(): Future[Unit] = traced {
if (shutDown.compareAndSet(false, true)) {
def warn(detail: String): PartialFunction[Throwable, Unit] = {
case NonFatal(ex) =>
logger.warn(s"Error occurred during connection factory shutdown: $detail", ex)
}
for {
_ <- openChannels.writeAndFlush(Terminate).scalaFut
.recover(warn("could not write 'Terminate' message"))
_ <- openChannels.close().scalaFut
.recover(warn("could not close open channels"))
_ <- nettyConfig.eventLoopGroup.shutdownGracefully(0L, 0L, TimeUnit.SECONDS).scalaFut
.recover(warn("could not shutdown event loop group"))
} yield ()
} else {
logger.warn("Shutdown request received for already shut down connection factory")
Future.unit
}
}
private def pgConnection(ch: Channel,
decoderHandler: PgMsgDecoderHandler,
encoderHandler: PgMsgEncoderHandler): NettyPgConnection = traced {
val connConfig = PgConnectionConfig(
subscriberMinDemandRequestSize = pgConfig.subscriberMinDemandRequestSize,
subscriberBufferCapacity = pgConfig.subscriberBufferCapacity,
stmtCacheConfig = pgConfig.stmtCacheConfig
)
val typeConv = TypeConverter.fromPartials(BuiltInTypeConverters ++ pgConfig.typeConverters)
val typeCodec = AnyPgValCodec.fromCodecs(BuiltInCodecs ++ pgConfig.typeCodecs)
new NettyPgConnection(
id = ConnId(ch.id().asShortText()),
config = connConfig,
out = new NettyChannelWriter(ch),
decoder = decoderHandler,
encoder = encoderHandler,
ec = ec,
scheduler = scheduler,
requestCanceler = abortRequest,
stmtArgsConverter = {
val typeMapping = TypeMappingRegistry.fromMappings(BuiltInTypeMappings ++ pgConfig.typeMappings)
new StmtArgsConverter(AnyArgToPgArgConverter.of(
typeConverter = typeConv,
typeMapping = typeMapping,
typeCodec = typeCodec)
)
},
colValueToObjConverter = ColValueToObjConverter.fromCodecAndTypeconv(
codec = typeCodec,
converter = typeConv
)
)
}
private def baseBootstrap(connectTimeout: Option[Timeout]): Bootstrap = traced {
val address = new InetSocketAddress(pgConfig.host, pgConfig.port)
val bootstrap = new Bootstrap()
.group(nettyConfig.eventLoopGroup)
.channelFactory(nettyConfig.channelFactory)
.remoteAddress(address)
nettyConfig.channelOptions.foreach { opt =>
bootstrap.option(opt.option.asInstanceOf[ChannelOption[Any]], opt.value)
}
connectTimeout.foreach { timeout =>
if (timeout.value.isFinite()) {
bootstrap.option[Integer](
ChannelOption.CONNECT_TIMEOUT_MILLIS,
timeout.value.toMillis.toInt
)
}
}
bootstrap
}
private def abortRequest(bkd: BackendKeyData): Future[Unit] = traced {
baseBootstrap(connectTimeout = None)
.handler {
channelInitializer { ch =>
ch.pipeline().addLast(new PgMsgEncoderHandler(MessageEncoderFactory.default))
()
}
}
.connect().scalaFut
.flatMap { channel =>
channel
.writeAndFlush(CancelRequest(bkd.pid, bkd.key)).scalaFut
.flatMap(_ => channel.close().scalaFut)
.map(_ => ())
}
.recoverWith {
case NonFatal(ex) => Future.failed(new PgUncategorizedException("Could not abort request", ex))
}
}
private def framingHandler: LengthFieldBasedFrameDecoder = {
// format: off
val lengthFieldLength = 4
new LengthFieldBasedFrameDecoder(
/* max frame length = */ Int.MaxValue,
/* length field offset = */ 1,
/* length field length = */ lengthFieldLength,
/* length adjustment = */ -1 * lengthFieldLength,
/* initial bytes to strip = */ 0
)
// format: on
}
/* Scala 2.11 compat */
private def channelInitializer(f: Channel => Unit): ChannelInitializer[Channel] = {
new ChannelInitializer[Channel] {
def initChannel(ch: Channel): Unit = f(ch)
}
}
}
object NettyPgConnectionFactory extends Logging {
object Config {
import PgConnFactoryConfig.{Defaults => PgDefaults}
object Defaults {
val channelFactory: ChannelFactory[_ <: Channel] = new NioChannelFactory
def eventLoopGroup: EventLoopGroup = new NioEventLoopGroup
val channelOptions: ImmutSeq[ChannelOptionValue[_]] = {
Vector(ChannelOptionValue(ChannelOption.SO_KEEPALIVE, java.lang.Boolean.TRUE))
}
}
def apply(host: String,
port: Int,
authenticator: Authenticator,
dbName: Option[String] = None,
subscriberBufferCapacity: Int = PgDefaults.subscriberBufferCapacity,
subscriberMinDemandRequestSize: Int = PgDefaults.subscriberMinDemandRequestSize,
stmtCacheConfig: StmtCacheConfig = PgDefaults.stmtCacheConfig,
writeTimeout: Timeout = PgDefaults.writeTimeout,
ec: ExecutionContext = PgDefaults.ec,
channelFactory: ChannelFactory[_ <: Channel] = Defaults.channelFactory,
eventLoopGroup: EventLoopGroup = Defaults.eventLoopGroup,
channelOptions: ImmutSeq[ChannelOptionValue[_]] = Defaults.channelOptions,
typeConverters: Vector[PartialTypeConverter[_]] = PgDefaults.typeConverters,
typeMappings: Vector[TypeMapping[_, _ <: PgType[_ <: PgVal[_]]]] = PgDefaults.typeMappings,
typeCodecs: Vector[PgValCodec[_ <: PgVal[_]]] = PgDefaults.typeCodecs
): Config = {
val pgConfig = PgConnFactoryConfig(
host = host,
port = port,
dbName = dbName,
authenticator = authenticator,
subscriberBufferCapacity = subscriberBufferCapacity,
subscriberMinDemandRequestSize = subscriberMinDemandRequestSize,
stmtCacheConfig = stmtCacheConfig,
writeTimeout = writeTimeout,
typeConverters = typeConverters,
typeMappings = typeMappings,
typeCodecs = typeCodecs,
ec = ec
)
Config(
pgConfig = pgConfig,
channelFactory = channelFactory,
eventLoopGroup = eventLoopGroup,
channelOptions = channelOptions
)
}
}
final case class Config(pgConfig: PgConnFactoryConfig,
channelFactory: ChannelFactory[_ <: Channel],
eventLoopGroup: EventLoopGroup,
channelOptions: ImmutSeq[ChannelOptionValue[_]])
def apply(config: Config): NettyPgConnectionFactory = {
new NettyPgConnectionFactory(config)
}
}
| rdbc-io/rdbc-pgsql | rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/sapi/NettyPgConnectionFactory.scala | Scala | apache-2.0 | 11,303 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.ml.crf
import com.johnsnowlabs.tags.FastTest
import org.scalatest.flatspec.AnyFlatSpec
import java.io._
class SerializationSpec extends AnyFlatSpec {
val dataset = TestDatasets.small
val metadata = dataset.metadata
val weights = (0 until 8).map(i => 1f / i).toArray
val model = new LinearChainCrfModel(weights, dataset.metadata)
"LinearChainCrfModel" should "serialize and deserialize correctly" taggedAs FastTest in {
val memory = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(memory)
oos.writeObject(model)
oos.close
val input = new ObjectInputStream(new ByteArrayInputStream(memory.toByteArray))
val deserialized = input.readObject().asInstanceOf[LinearChainCrfModel]
assert(deserialized.weights.toSeq == model.weights.toSeq)
val newMeta = deserialized.metadata
assert(newMeta.labels.toSeq == metadata.labels.toSeq)
assert(newMeta.attrs.toSeq == metadata.attrs.toSeq)
assert(newMeta.attrFeatures.toSeq == metadata.attrFeatures.toSeq)
assert(newMeta.transitions.toSeq == metadata.transitions.toSeq)
assert(newMeta.featuresStat.toSeq == metadata.featuresStat.toSeq)
}
}
| JohnSnowLabs/spark-nlp | src/test/scala/com/johnsnowlabs/ml/crf/SerializationSpec.scala | Scala | apache-2.0 | 1,788 |
package io.finch
import cats.Eq
import cats.instances.AllInstances
import cats.laws._
import cats.laws.discipline._
import org.scalacheck.{Arbitrary, Prop}
import org.typelevel.discipline.Laws
trait DecodePathLaws[A] extends Laws with MissingInstances with AllInstances {
def capture: DecodePath[A]
def roundTrip(a: A): IsEq[Option[A]] =
capture(a.toString) <-> Some(a)
def all(implicit A: Arbitrary[A], eq: Eq[A]): RuleSet = new DefaultRuleSet(
name = "all",
parent = None,
"roundTrip" -> Prop.forAll((a: A) => roundTrip(a))
)
}
object DecodePathLaws {
def apply[A: DecodePath]: DecodePathLaws[A] = new DecodePathLaws[A] {
def capture: DecodePath[A] = DecodePath[A]
}
}
| finagle/finch | core/src/test/scala/io/finch/DecodePathLaws.scala | Scala | apache-2.0 | 709 |
Subsets and Splits