code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package org.scalera.tools object TraverseFunctions { implicit class MapHelpers[K,V](m1: Map[K,V]){ // Joining def fullJoin[W](m2: Map[K,W]): Map[K,(Option[V],Option[W])] = (m1.keySet ++ m2.keySet).map{ k => k -> (m1.get(k),m2.get(k)) }.toMap def <+>[W](m2: Map[K,W]): Map[K,(Option[V],Option[W])] = fullJoin(m2) def innerJoin[W](m2: Map[K,W]): Map[K,(V,W)] = (m1.keySet intersect m2.keySet).map{ k => k -> (m1(k),m2(k)) }.toMap def >+<[W](m2: Map[K,W]): Map[K,(V,W)] = innerJoin(m2) def leftJoin[W](m2: Map[K,W]): Map[K,(V,Option[W])] = m1.map{ case (k,v) => k -> (v,m2.get(k)) } def <+[W](m2: Map[K,W]): Map[K,(V,Option[W])] = leftJoin(m2) def rightJoin[W](m2: Map[K,W]): Map[K,(Option[V],W)] = m2.map{ case (k,v) => k -> (m1.get(k), v) } def +>[W](m2: Map[K,W]): Map[K,(Option[V],W)] = rightJoin(m2) // Merging /** * Generic merge. * It uses auxiliary functions for converting both maps values * into some common type, before merging. */ def gMerge[W,X]( m2: Map[K, W], f1: V => X, f2: W => X, m: (X,X) => X): Map[K,X] = (m1.keySet ++ m2.keySet).map{ k => k -> ((m1.get(k), m2.get(k)) match { case (Some(v), None) => f1(v) case (None, Some(w)) => f2(w) case (Some(v), Some(w)) => m(f1(v),f2(w)) }) }.toMap def merge(m2: Map[K,V], f: (V,V) => V): Map[K,V] = gMerge[V,V](m2, f1 = identity, f2 = identity, m = f) } }
Scalera/scalera-tools
src/main/scala/org/scalera/tools/TraverseFunctions.scala
Scala
apache-2.0
1,584
package ru.lester.spark.example.anomaly import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.mllib.linalg.{Vectors, Vector} import org.apache.spark.mllib.linalg.{Matrices, Matrix} import org.apache.spark.mllib.stat.distribution.MultivariateGaussian import org.apache.spark.mllib.regression.LabeledPoint object MultivariateGaussianDistribution { def main(args: Array[String]) { if (args.length != 4) { System.err.println( "Usage: MultivariateGaussianDistribution " + "<dataFile> <cvDataFile> <cvPredictionsFile> <outputDir>") System.exit(1) } val dataFile = args(0) val cvDataFile = args(1) val cvPredictions = args(2) val outputDir = args(3) val conf = new SparkConf().setAppName("MultivariateGaussianDistribution") val sc = new SparkContext(conf) val input = sc.textFile(dataFile) val examples = input.filter(s => s.length > 0 && !s.startsWith("#")).map { line => Vectors.dense(line.trim.split(' ').map(_.toDouble)) }.persist() val dataArray = examples.map(_.toArray) val m = dataArray.count() val n = dataArray.first().length val sums = dataArray.reduce((a, b) => a.zip(b).map(t => t._1 + t._2)) val mu = sums.map(_ / m) val subMuSquares = dataArray.aggregate(new Array[Double](n))((a, b) => a.zip(b).zipWithIndex.map {t => val diffMu = t._1._2 - mu(t._2) t._1._1 + (diffMu * diffMu) }, (acc1, acc2) => acc1.zip(acc2).map(a => a._1 + a._2)) val sigma2 = subMuSquares.map(_ / m) println(s"mu: ${mu.mkString(" ")}") println(s"sigma2: ${sigma2.mkString(" ")}") val multivariateGaussian = new MultivariateGaussian(Vectors.dense(mu), Matrices.diag(Vectors.dense(sigma2))) // Vector of probability density for training set using learned parameters val ps = examples.map(multivariateGaussian.pdf) val inputCV_X = sc.textFile(cvDataFile) val inputCV_Y = sc.textFile(cvPredictions) val examplesCV_X = inputCV_X.filter(s => s.length > 0 && !s.startsWith("#")) val examplesCV_Y = inputCV_Y.filter(s => s.length > 0 && !s.startsWith("#")) // Examples for cross-validation set along with "ground truth" for each example, i.e. explicitly marked as anomalous/non-anomalous val examplesCV = examplesCV_X.zip(examplesCV_Y).map { case (l1, l2) => LabeledPoint(l2.trim.toDouble, Vectors.dense(l1.trim.split(' ').map(_.toDouble))) }.persist() // Vector of probability density for cross validation set using learned parameters val psLabCV = examplesCV.map(lp => (multivariateGaussian.pdf(lp.features), lp.label)) val minPsCV = psLabCV.map(_._1).min() val maxPsCV = psLabCV.map(_._1).max() val step = (maxPsCV - minPsCV) / 1000 val epsF1 = (minPsCV to maxPsCV by step).map { epsilon => val predictions = psLabCV.map(t => (t._1 < epsilon, t._2 != 0.0)) // True positives val tp = predictions.filter(p => p._1 && p._2).count() // False positives val fp = predictions.filter(p => p._1 && !p._2).count() // False Negatives val fn = predictions.filter(p => !p._1 && p._2).count() // Precision val prec = tp.toDouble / (tp + fp) // Recall val rec = tp.toDouble / (tp + fn) // F1 Score val f1 = (2 * prec * rec) / (prec + rec) (epsilon, f1) } val bestEpsF1 = epsF1.foldLeft((0.0, 0.0)) {(acc, a) => if(acc._2 > a._2) acc else a} val epsilon = bestEpsF1._1 val outliers = ps.zipWithIndex.filter(_._1 < epsilon) println(f"Best epsilon found using cross-validation: $epsilon%e") println(f"Best F1 on Cross Validation Set: ${bestEpsF1._2}%f") println(f"Outliers found: ${outliers.count()}%d") ps.saveAsTextFile(s"${outputDir}/ps") examples.zipWithIndex().map(_.swap).join(outliers.map(_.swap)).saveAsTextFile(s"${outputDir}/outliers") sc.parallelize(epsF1).saveAsTextFile(s"${outputDir}/eps_f1") sc.stop() } }
vivanov/anomaly-detection
example/src/main/scala/ru/lester/spark/example/anomaly/MultivariateGaussianDistribution.scala
Scala
apache-2.0
3,957
/** Copyright 2015 TappingStone, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prediction.controller import io.prediction.core.BaseDataSource import io.prediction.core.BasePreparator import io.prediction.core.BaseAlgorithm import io.prediction.core.BaseServing import io.prediction.core.Doer import io.prediction.core.BaseEngine import io.prediction.workflow.CreateWorkflow import io.prediction.workflow.WorkflowUtils import io.prediction.workflow.EngineLanguage import io.prediction.workflow.PersistentModelManifest import io.prediction.workflow.SparkWorkflowUtils import io.prediction.workflow.StopAfterReadInterruption import io.prediction.workflow.StopAfterPrepareInterruption import io.prediction.data.storage.EngineInstance import _root_.java.util.NoSuchElementException import io.prediction.data.storage.StorageClientException import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.rdd.RDD import scala.language.implicitConversions import org.json4s._ import org.json4s.native.JsonMethods._ import org.json4s.native.Serialization.read import io.prediction.workflow.NameParamsSerializer import grizzled.slf4j.Logger /** This class chains up the entire data process. PredictionIO uses this * information to create workflows and deployments. In Scala, you should * implement an object that extends the `IEngineFactory` trait similar to the * following example. * * {{{ * object ItemRankEngine extends IEngineFactory { * def apply() = { * new Engine( * classOf[ItemRankDataSource], * classOf[ItemRankPreparator], * Map( * "knn" -> classOf[KNNAlgorithm], * "rand" -> classOf[RandomAlgorithm], * "mahoutItemBased" -> classOf[MahoutItemBasedAlgorithm]), * classOf[ItemRankServing]) * } * } * }}} * * @see [[IEngineFactory]] * @tparam TD Training data class. * @tparam EI Evaluation info class. * @tparam PD Prepared data class. * @tparam Q Input query class. * @tparam P Output prediction class. * @tparam A Actual value class. * @param dataSourceClassMap Map of data source class. * @param preparatorClassMap Map of preparator class. * @param algorithmClassMap Map of algorithm names to classes. * @param servingClassMap Map of serving class. * @group Engine */ class Engine[TD, EI, PD, Q, P, A]( val dataSourceClassMap: Map[String, Class[_ <: BaseDataSource[TD, EI, Q, A]]], val preparatorClassMap: Map[String, Class[_ <: BasePreparator[TD, PD]]], val algorithmClassMap: Map[String, Class[_ <: BaseAlgorithm[PD, _, Q, P]]], val servingClassMap: Map[String, Class[_ <: BaseServing[Q, P]]]) extends BaseEngine[EI, Q, P, A] { implicit lazy val formats = Utils.json4sDefaultFormats + new NameParamsSerializer @transient lazy val logger = Logger[this.type] /** * @param dataSourceClass Data source class. * @param preparatorClass Preparator class. * @param algorithmClassMap Map of algorithm names to classes. * @param servingClass Serving class. */ def this( dataSourceClass: Class[_ <: BaseDataSource[TD, EI, Q, A]], preparatorClass: Class[_ <: BasePreparator[TD, PD]], algorithmClassMap: Map[String, Class[_ <: BaseAlgorithm[PD, _, Q, P]]], servingClass: Class[_ <: BaseServing[Q, P]]) = this( Map("" -> dataSourceClass), Map("" -> preparatorClass), algorithmClassMap, Map("" -> servingClass) ) /** Returns a new Engine instance. Mimic case class's copy method behavior. */ def copy( dataSourceClassMap: Map[String, Class[_ <: BaseDataSource[TD, EI, Q, A]]] = dataSourceClassMap, preparatorClassMap: Map[String, Class[_ <: BasePreparator[TD, PD]]] = preparatorClassMap, algorithmClassMap: Map[String, Class[_ <: BaseAlgorithm[PD, _, Q, P]]] = algorithmClassMap, servingClassMap: Map[String, Class[_ <: BaseServing[Q, P]]] = servingClassMap): Engine[TD, EI, PD, Q, P, A] = { new Engine( dataSourceClassMap, preparatorClassMap, algorithmClassMap, servingClassMap) } /** Return persistentable models from trained model. */ def train( sc: SparkContext, engineParams: EngineParams, engineInstanceId: String, params: WorkflowParams): Seq[Any] = { val (dataSourceName, dataSourceParams) = engineParams.dataSourceParams val dataSource = Doer(dataSourceClassMap(dataSourceName), dataSourceParams) val (preparatorName, preparatorParams) = engineParams.preparatorParams val preparator = Doer(preparatorClassMap(preparatorName), preparatorParams) val algoParamsList = engineParams.algorithmParamsList val algorithms = algoParamsList.map { case (algoName, algoParams) => Doer(algorithmClassMap(algoName), algoParams) } val models = Engine.train( sc, dataSource, preparator, algorithms, params) val algoCount = algorithms.size val algoTuples: Seq[(String, Params, BaseAlgorithm[_, _, _, _], Any)] = (0 until algoCount).map { ax => { // val (name, params) = algoParamsList(ax) val (name, params) = algoParamsList(ax) (name, params, algorithms(ax), models(ax)) }} makeSerializableModels( sc, engineInstanceId = engineInstanceId, algoTuples = algoTuples) } /** Algorithm models can be persisted before deploy. However, it is also * possible that models are not persisted. This method retrains non-persisted * models and return a list of model that can be used directly in deploy. */ def prepareDeploy( sc: SparkContext, engineParams: EngineParams, engineInstanceId: String, persistedModels: Seq[Any], params: WorkflowParams): Seq[Any] = { val algoParamsList = engineParams.algorithmParamsList val algorithms = algoParamsList.map { case (algoName, algoParams) => Doer(algorithmClassMap(algoName), algoParams) } val models = if (persistedModels.exists(m => m.isInstanceOf[Unit.type])) { // If any of persistedModels is Unit, we need to re-train the model. logger.info("Some persisted models are Unit, need to re-train.") val (dataSourceName, dataSourceParams) = engineParams.dataSourceParams val dataSource = Doer(dataSourceClassMap(dataSourceName), dataSourceParams) val (preparatorName, preparatorParams) = engineParams.preparatorParams val preparator = Doer(preparatorClassMap(preparatorName), preparatorParams) val td = dataSource.readTrainingBase(sc) val pd = preparator.prepareBase(sc, td) val models = algorithms.zip(persistedModels).map { case (algo, m) => m match { case Unit => algo.trainBase(sc, pd) case _ => m } } models } else { logger.info("Using persisted model") persistedModels } models .zip(algorithms) .zip(algoParamsList) .zipWithIndex .map { case (((model, algo), (algoName, algoParams)), ax) => { model match { case modelManifest: PersistentModelManifest => { logger.info("Custom-persisted model detected for algorithm " + algo.getClass.getName) SparkWorkflowUtils.getPersistentModel( modelManifest, Seq(engineInstanceId, ax, algoName).mkString("-"), algoParams, Some(sc), getClass.getClassLoader) } case m => { try { logger.info( s"Loaded model ${m.getClass.getName} for algorithm " + s"${algo.getClass.getName}") m } catch { case e: NullPointerException => logger.warn( s"Null model detected for algorithm ${algo.getClass.getName}") m } } } // model match } } } /** Extract model for persistent layer. * * PredictionIO presist models for future use. It allows custom * implementation for persisting models. You need to implement the * [[io.prediction.controller.IPersistentModel]] interface. This method * traverses all models in the workflow. If the model is a * [[io.prediction.controller.IPersistentModel]], it calls the save method * for custom persistence logic. * * For model doesn't support custom logic, PredictionIO serializes the whole * model if the corresponding algorithm is local. On the other hand, if the * model is parallel (i.e. model associated with a number of huge RDDS), this * method return Unit, in which case PredictionIO will retrain the whole * model from scratch next time it is used. */ def makeSerializableModels( sc: SparkContext, engineInstanceId: String, // AlgoName, Algo, Model algoTuples: Seq[(String, Params, BaseAlgorithm[_, _, _, _], Any)] ): Seq[Any] = { logger.info(s"engineInstanceId=$engineInstanceId") algoTuples .zipWithIndex .map { case ((name, params, algo, model), ax) => algo.makePersistentModel( sc = sc, modelId = Seq(engineInstanceId, ax, name).mkString("-"), algoParams = params, bm = model) } } def eval( sc: SparkContext, engineParams: EngineParams, params: WorkflowParams) : Seq[(EI, RDD[(Q, P, A)])] = { val (dataSourceName, dataSourceParams) = engineParams.dataSourceParams val dataSource = Doer(dataSourceClassMap(dataSourceName), dataSourceParams) val (preparatorName, preparatorParams) = engineParams.preparatorParams val preparator = Doer(preparatorClassMap(preparatorName), preparatorParams) val algoParamsList = engineParams.algorithmParamsList val algorithms = algoParamsList.map { case (algoName, algoParams) => { try { Doer(algorithmClassMap(algoName), algoParams) } catch { case e: NoSuchElementException => { if (algoName == "") { logger.error("Empty algorithm name supplied but it could not " + "match with any algorithm in the engine's definition. " + "Existing algorithm name(s) are: " + s"${algorithmClassMap.keys.mkString(", ")}. Aborting.") } else { logger.error(s"${algoName} cannot be found in the engine's " + "definition. Existing algorithm name(s) are: " + s"${algorithmClassMap.keys.mkString(", ")}. Aborting.") } sys.exit(1) } } }} val (servingName, servingParams) = engineParams.servingParams val serving = Doer(servingClassMap(servingName), servingParams) Engine.eval(sc, dataSource, preparator, algorithms, serving) } override def jValueToEngineParams(variantJson: JValue): EngineParams = { val engineLanguage = EngineLanguage.Scala // Extract EngineParams logger.info(s"Extracting datasource params...") val dataSourceParams: (String, Params) = WorkflowUtils.getParamsFromJsonByFieldAndClass( variantJson, "datasource", dataSourceClassMap, engineLanguage) logger.info(s"Datasource params: ${dataSourceParams}") logger.info(s"Extracting preparator params...") val preparatorParams: (String, Params) = WorkflowUtils.getParamsFromJsonByFieldAndClass( variantJson, "preparator", preparatorClassMap, engineLanguage) logger.info(s"Preparator params: ${preparatorParams}") val algorithmsParams: Seq[(String, Params)] = variantJson findField { case JField("algorithms", _) => true case _ => false } map { jv => val algorithmsParamsJson = jv._2 algorithmsParamsJson match { case JArray(s) => s.map { algorithmParamsJValue => val eap = algorithmParamsJValue.extract[CreateWorkflow.AlgorithmParams] ( eap.name, WorkflowUtils.extractParams( engineLanguage, compact(render(eap.params)), algorithmClassMap(eap.name)) ) } case _ => Nil } } getOrElse Seq(("", EmptyParams())) logger.info(s"Extracting serving params...") val servingParams: (String, Params) = WorkflowUtils.getParamsFromJsonByFieldAndClass( variantJson, "serving", servingClassMap, engineLanguage) logger.info(s"Serving params: ${servingParams}") new EngineParams( dataSourceParams = dataSourceParams, preparatorParams = preparatorParams, algorithmParamsList = algorithmsParams, servingParams = servingParams) } def engineInstanceToEngineParams(engineInstance: EngineInstance) : EngineParams = { implicit val formats = DefaultFormats val engineLanguage = EngineLanguage.Scala val dataSourceParamsWithName: (String, Params) = { val (name, params) = read[(String, JValue)](engineInstance.dataSourceParams) if (!dataSourceClassMap.contains(name)) { logger.error(s"Unable to find datasource class with name '${name}'" + " defined in Engine.") sys.exit(1) } val extractedParams = WorkflowUtils.extractParams( engineLanguage, compact(render(params)), dataSourceClassMap(name)) (name, extractedParams) } val preparatorParamsWithName: (String, Params) = { val (name, params) = read[(String, JValue)](engineInstance.preparatorParams) if (!preparatorClassMap.contains(name)) { logger.error(s"Unable to find preparator class with name '${name}'" + " defined in Engine.") sys.exit(1) } val extractedParams = WorkflowUtils.extractParams( engineLanguage, compact(render(params)), preparatorClassMap(name)) (name, extractedParams) } val algorithmsParamsWithNames = read[Seq[(String, JValue)]](engineInstance.algorithmsParams).map { case (algoName, params) => val extractedParams = WorkflowUtils.extractParams( engineLanguage, compact(render(params)), algorithmClassMap(algoName)) (algoName, extractedParams) } val servingParamsWithName: (String, Params) = { val (name, params) = read[(String, JValue)](engineInstance.servingParams) if (!servingClassMap.contains(name)) { logger.error(s"Unable to find serving class with name '${name}'" + " defined in Engine.") sys.exit(1) } val extractedParams = WorkflowUtils.extractParams( engineLanguage, compact(render(params)), servingClassMap(name)) (name, extractedParams) } new EngineParams( dataSourceParams = dataSourceParamsWithName, preparatorParams = preparatorParamsWithName, algorithmParamsList = algorithmsParamsWithNames, servingParams = servingParamsWithName) } } object Engine { type EX = Int type AX = Int type QX = Long @transient lazy val logger = Logger[this.type] class DataSourceMap[TD, EI, Q, A]( val m: Map[String, Class[_ <: BaseDataSource[TD, EI, Q, A]]]) { def this(c: Class[_ <: BaseDataSource[TD, EI, Q, A]]) = this(Map("" -> c)) } object DataSourceMap { implicit def cToMap[TD, EI, Q, A]( c: Class[_ <: BaseDataSource[TD, EI, Q, A]]): DataSourceMap[TD, EI, Q, A] = new DataSourceMap(c) implicit def mToMap[TD, EI, Q, A]( m: Map[String, Class[_ <: BaseDataSource[TD, EI, Q, A]]]): DataSourceMap[TD, EI, Q, A] = new DataSourceMap(m) } class PreparatorMap[TD, PD]( val m: Map[String, Class[_ <: BasePreparator[TD, PD]]]) { def this(c: Class[_ <: BasePreparator[TD, PD]]) = this(Map("" -> c)) } object PreparatorMap { implicit def cToMap[TD, PD]( c: Class[_ <: BasePreparator[TD, PD]]): PreparatorMap[TD, PD] = new PreparatorMap(c) implicit def mToMap[TD, PD]( m: Map[String, Class[_ <: BasePreparator[TD, PD]]]): PreparatorMap[TD, PD] = new PreparatorMap(m) } class ServingMap[Q, P]( val m: Map[String, Class[_ <: BaseServing[Q, P]]]) { def this(c: Class[_ <: BaseServing[Q, P]]) = this(Map("" -> c)) } object ServingMap { implicit def cToMap[Q, P]( c: Class[_ <: BaseServing[Q, P]]): ServingMap[Q, P] = new ServingMap(c) implicit def mToMap[Q, P]( m: Map[String, Class[_ <: BaseServing[Q, P]]]): ServingMap[Q, P] = new ServingMap(m) } def apply[TD, EI, PD, Q, P, A]( dataSourceMap: DataSourceMap[TD, EI, Q, A], preparatorMap: PreparatorMap[TD, PD], algorithmClassMap: Map[String, Class[_ <: BaseAlgorithm[PD, _, Q, P]]], servingMap: ServingMap[Q, P]): Engine[TD, EI, PD, Q, P, A] = new Engine( dataSourceMap.m, preparatorMap.m, algorithmClassMap, servingMap.m ) def train[TD, PD, Q]( sc: SparkContext, dataSource: BaseDataSource[TD, _, Q, _], preparator: BasePreparator[TD, PD], algorithmList: Seq[BaseAlgorithm[PD, _, Q, _]], params: WorkflowParams ): Seq[Any] = { logger.info("EngineWorkflow.train") logger.info(s"DataSource: $dataSource") logger.info(s"Preparator: $preparator") logger.info(s"AlgorithmList: $algorithmList") if (params.skipSanityCheck) { logger.info("Data sanity check is off.") } else { logger.info("Data santiy check is on.") } val td = try { dataSource.readTrainingBase(sc) } catch { case e: StorageClientException => logger.error(s"Error occured reading from data source. (Reason: " + e.getMessage + ") Please see the log for debugging details.", e) sys.exit(1) } if (!params.skipSanityCheck) { td match { case sanityCheckable: SanityCheck => { logger.info(s"${td.getClass.getName} supports data sanity" + " check. Performing check.") sanityCheckable.sanityCheck() } case _ => { logger.info(s"${td.getClass.getName} does not support" + " data sanity check. Skipping check.") } } } if (params.stopAfterRead) { logger.info("Stopping here because --stop-after-read is set.") throw StopAfterReadInterruption() } val pd = preparator.prepareBase(sc, td) if (!params.skipSanityCheck) { pd match { case sanityCheckable: SanityCheck => { logger.info(s"${pd.getClass.getName} supports data sanity" + " check. Performing check.") sanityCheckable.sanityCheck() } case _ => { logger.info(s"${pd.getClass.getName} does not support" + " data sanity check. Skipping check.") } } } if (params.stopAfterPrepare) { logger.info("Stopping here because --stop-after-prepare is set.") throw StopAfterPrepareInterruption() } val models: Seq[Any] = algorithmList.map(_.trainBase(sc, pd)) if (!params.skipSanityCheck) { models.foreach { model => { model match { case sanityCheckable: SanityCheck => { logger.info(s"${model.getClass.getName} supports data sanity" + " check. Performing check.") sanityCheckable.sanityCheck() } case _ => { logger.info(s"${model.getClass.getName} does not support" + " data sanity check. Skipping check.") } } }} } logger.info("EngineWorkflow.train completed") models } def eval[TD, PD, Q, P, A, EI]( sc: SparkContext, dataSource: BaseDataSource[TD, EI, Q, A], preparator: BasePreparator[TD, PD], algorithmList: Seq[BaseAlgorithm[PD, _, Q, P]], serving: BaseServing[Q, P]): Seq[(EI, RDD[(Q, P, A)])] = { logger.info(s"DataSource: $dataSource") logger.info(s"Preparator: $preparator") logger.info(s"AlgorithmList: $algorithmList") logger.info(s"Serving: $serving") val algoMap: Map[AX, BaseAlgorithm[PD, _, Q, P]] = algorithmList .zipWithIndex .map(_.swap) .toMap val algoCount = algoMap.size val evalTupleMap: Map[EX, (TD, EI, RDD[(Q, A)])] = dataSource .readEvalBase(sc) .zipWithIndex .map(_.swap) .toMap val evalCount = evalTupleMap.size val evalTrainMap: Map[EX, TD] = evalTupleMap.mapValues(_._1) val evalInfoMap: Map[EX, EI] = evalTupleMap.mapValues(_._2) val evalQAsMap: Map[EX, RDD[(QX, (Q, A))]] = evalTupleMap .mapValues(_._3) .mapValues{ _.zipWithUniqueId().map(_.swap) } val preparedMap: Map[EX, PD] = evalTrainMap.mapValues { td => { preparator.prepareBase(sc, td) }} val algoModelsMap: Map[EX, Map[AX, Any]] = preparedMap.mapValues { pd => { algoMap.mapValues(_.trainBase(sc,pd)) }} val algoPredictsMap: Map[EX, RDD[(QX, Seq[P])]] = (0 until evalCount) .map { ex => { val modelMap: Map[AX, Any] = algoModelsMap(ex) val qs: RDD[(QX, Q)] = evalQAsMap(ex).mapValues(_._1) val algoPredicts: Seq[RDD[(QX, (AX, P))]] = (0 until algoCount) .map { ax => { val algo = algoMap(ax) val model = modelMap(ax) val rawPredicts: RDD[(QX, P)] = algo.batchPredictBase(sc, model, qs) val predicts: RDD[(QX, (AX, P))] = rawPredicts.map { case (qx, p) => { (qx, (ax, p)) }} predicts }} val unionAlgoPredicts: RDD[(QX, Seq[P])] = sc.union(algoPredicts) .groupByKey .mapValues { ps => { assert (ps.size == algoCount, "Must have same length as algoCount") // TODO. Check size == algoCount ps.toSeq.sortBy(_._1).map(_._2) }} (ex, unionAlgoPredicts) }} .toMap val servingQPAMap: Map[EX, RDD[(Q, P, A)]] = algoPredictsMap .map { case (ex, psMap) => { val qasMap: RDD[(QX, (Q, A))] = evalQAsMap(ex) val qpsaMap: RDD[(QX, Q, Seq[P], A)] = psMap.join(qasMap) .map { case (qx, t) => (qx, t._2._1, t._1, t._2._2) } val qpaMap: RDD[(Q, P, A)] = qpsaMap.map { case (qx, q, ps, a) => (q, serving.serveBase(q, ps), a) } (ex, qpaMap) }} (0 until evalCount).map { ex => { (evalInfoMap(ex), servingQPAMap(ex)) }} .toSeq } } /** This class serves as a logical grouping of all required engine's parameters. * * @param dataSourceParams Data Source name-parameters tuple. * @param preparatorParams Preparator name-parameters tuple. * @param algorithmParamsList List of algorithm name-parameter pairs. * @param servingParams Serving name-parameters tuple. * @group Engine */ class EngineParams( val dataSourceParams: (String, Params) = ("", EmptyParams()), val preparatorParams: (String, Params) = ("", EmptyParams()), val algorithmParamsList: Seq[(String, Params)] = Seq(), val servingParams: (String, Params) = ("", EmptyParams())) extends Serializable { // A case class style copy method. def copy( dataSourceParams: (String, Params) = dataSourceParams, preparatorParams: (String, Params) = preparatorParams, algorithmParamsList: Seq[(String, Params)] = algorithmParamsList, servingParams: (String, Params) = servingParams): EngineParams = { new EngineParams( dataSourceParams, preparatorParams, algorithmParamsList, servingParams) } } /** Companion object for creating [[EngineParams]] instances. * * @group Engine */ object EngineParams { /** Create EngineParams. * * @param dataSourceName Data Source name * @param dataSourceParams Data Source parameters * @param preparatorName Preparator name * @param preparatorParams Preparator parameters * @param algorithmParamsList List of algorithm name-parameter pairs. * @param servingName Serving name * @param servingParams Serving parameters */ def apply( dataSourceName: String = "", dataSourceParams: Params = EmptyParams(), preparatorName: String = "", preparatorParams: Params = EmptyParams(), algorithmParamsList: Seq[(String, Params)] = Seq(), servingName: String = "", servingParams: Params = EmptyParams()): EngineParams = { new EngineParams( dataSourceParams = (dataSourceName, dataSourceParams), preparatorParams = (preparatorName, preparatorParams), algorithmParamsList = algorithmParamsList, servingParams = (servingName, servingParams) ) } } /** SimpleEngine has only one algorithm, and uses default preparator and serving * layer. Current default preparator is `IdentityPreparator` and serving is * `FirstServing`. * * @tparam TD Training data class. * @tparam EI Evaluation info class. * @tparam Q Input query class. * @tparam P Output prediction class. * @tparam A Actual value class. * @param dataSourceClass Data source class. * @param algorithmClass of algorithm names to classes. * @group Engine */ class SimpleEngine[TD, EI, Q, P, A]( dataSourceClass: Class[_ <: BaseDataSource[TD, EI, Q, A]], algorithmClass: Class[_ <: BaseAlgorithm[TD, _, Q, P]]) extends Engine( dataSourceClass, IdentityPreparator(dataSourceClass), Map("" -> algorithmClass), LFirstServing(algorithmClass)) /** This shorthand class serves the `SimpleEngine` class. * * @param dataSourceParams Data source parameters. * @param algorithmParams List of algorithm name-parameter pairs. * @group Engine */ class SimpleEngineParams( dataSourceParams: Params = EmptyParams(), algorithmParams: Params = EmptyParams()) extends EngineParams( dataSourceParams = ("", dataSourceParams), algorithmParamsList = Seq(("", algorithmParams))) /** If you intend to let PredictionIO create workflow and deploy serving * automatically, you will need to implement an object that extends this trait * and return an [[Engine]]. * * @group Engine */ trait IEngineFactory { /** Creates an instance of an [[Engine]]. */ def apply(): BaseEngine[_, _, _, _] /** Override this method to programmatically return engine parameters. */ def engineParams(key: String): EngineParams = EngineParams() } /** Defines an engine parameters generator. * * Implementations of this trait can be supplied to "pio eval" as the second * argument. * * @group Evaluation */ trait EngineParamsGenerator { protected[this] var epList: Seq[EngineParams] = _ protected[this] var epListSet: Boolean = false /** Returns the list of [[EngineParams]] of this [[EngineParamsGenerator]]. */ def engineParamsList: Seq[EngineParams] = { assert(epListSet, "EngineParamsList not set") epList } /** Sets the list of [[EngineParams]] of this [[EngineParamsGenerator]]. */ def engineParamsList_=(l: Seq[EngineParams]) { assert(!epListSet, "EngineParamsList can bet set at most once") epList = Seq(l:_*) epListSet = true } } /** Mix in this trait for queries that contain prId (PredictedResultId). * This is useful when your engine expects queries to also be associated with * prId keys when feedback loop is enabled. * * @group General */ trait WithPrId { val prId: String = "" }
nvoron23/PredictionIO
core/src/main/scala/controller/Engine.scala
Scala
apache-2.0
27,768
import scala.io.Source object Prob81 extends App { val matrix = Source.fromFile("resources/p081_matrix.txt").getLines().map { line => line.split(',').map(_.toInt) }.toArray val xSize = matrix.length val ySize = matrix.head.length for { j <- matrix.indices i <- matrix.head.indices } { if(i == 0 && 0 < j) matrix(j)(i) += matrix(j - 1)(i) else if(0 < i && j == 0) matrix(j)(i) += matrix(j)(i - 1) else if(0 < i && 0 < j) matrix(j)(i) += math.min(matrix(j - 1)(i), matrix(j)(i - 1)) } println(matrix(ySize - 1)(xSize - 1)) }
ponkotuy/ProjectEular
src/main/scala/Prob81.scala
Scala
mit
566
package org.jetbrains.plugins.scala package lang package psi package stubs package elements import com.intellij.lang.ASTNode import com.intellij.psi.PsiElement import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream} import com.intellij.util.io.StringRef import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSelfTypeElement import org.jetbrains.plugins.scala.lang.psi.impl.base.types.ScSelfTypeElementImpl import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScSelfTypeElementStubImpl import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys /** * User: Alexander Podkhalyuzin * Date: 19.06.2009 */ class ScSelfTypeElementElementType extends ScStubElementType[ScSelfTypeElementStub, ScSelfTypeElement]("self type element") { override def serialize(stub: ScSelfTypeElementStub, dataStream: StubOutputStream): Unit = { dataStream.writeName(stub.getName) dataStream.writeOptionName(stub.typeText) dataStream.writeNames(stub.classNames) } override def deserialize(dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement]) = new ScSelfTypeElementStubImpl( parentStub, this, nameRef = dataStream.readName, typeTextRef = dataStream.readOptionName, typeNamesRefs = dataStream.readNames ) override def createStubImpl(typeElement: ScSelfTypeElement, parentStub: StubElement[_ <: PsiElement]): ScSelfTypeElementStub = { val typeElementText = typeElement.typeElement .map(_.getText) new ScSelfTypeElementStubImpl( parentStub, this, nameRef = StringRef.fromString(typeElement.name), typeTextRef = typeElementText.asReference, typeNamesRefs = typeElement.classNames.asReferences ) } override def indexStub(stub: ScSelfTypeElementStub, sink: IndexSink): Unit = this.indexStub(stub.classNames, sink, ScalaIndexKeys.SELF_TYPE_CLASS_NAME_KEY) override def createElement(node: ASTNode) = new ScSelfTypeElementImpl(node) override def createPsi(stub: ScSelfTypeElementStub) = new ScSelfTypeElementImpl(stub) }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScSelfTypeElementElementType.scala
Scala
apache-2.0
2,132
package fif.spark.avroparquet /** * Code generated from avro schemas by scalaAvro. Do not modify. * "ALL THESE FILES ARE YOURS—EXCEPT SAMPLEENTITY.SCALA / ATTEMPT NO MODIFICATIONS THERE" */ final case class SampleEntity(entityName: String, entityType: String, pages: Vector[Int]) extends com.nitro.scalaAvro.runtime.GeneratedMessage with com.nitro.scalaAvro.runtime.Message[SampleEntity] { def withEntityName(__v: String): SampleEntity = copy(entityName = __v) def withEntityType(__v: String): SampleEntity = copy(entityType = __v) def withPages(__v: Vector[Int]): SampleEntity = copy(pages = __v) def toMutable: org.apache.avro.generic.GenericRecord = { val __out__ = new org.apache.avro.generic.GenericData.Record(SampleEntity.schema) __out__.put("entityName", entityName) __out__.put("entityType", entityType) __out__.put( "pages", scala.collection.JavaConversions.asJavaCollection(pages.map(_e => _e))) __out__ } def companion = SampleEntity } object SampleEntity extends com.nitro.scalaAvro.runtime.GeneratedMessageCompanion[SampleEntity] { implicit def messageCompanion: com.nitro.scalaAvro.runtime.GeneratedMessageCompanion[ SampleEntity] = this def schema: org.apache.avro.Schema = new org.apache.avro.Schema.Parser().parse( """{"type":"record","name":"SampleEntity","namespace":"fif.spark.avroparquet","fields":[{"name":"entityName","type":"string"},{"name":"entityType","type":"string"},{"name":"pages","type":{"type":"array","items":"int"}}]}""") val _arbitrary: org.scalacheck.Gen[SampleEntity] = for { entityName <- com.nitro.scalaAvro.runtime.AvroGenUtils.genAvroString entityType <- com.nitro.scalaAvro.runtime.AvroGenUtils.genAvroString pages <- com.nitro.scalaAvro.runtime.AvroGenUtils.genAvroArray( org.scalacheck.Arbitrary.arbInt.arbitrary ) } yield SampleEntity( entityName = entityName, entityType = entityType, pages = pages ) def fromMutable( generic: org.apache.avro.generic.GenericRecord): SampleEntity = SampleEntity( entityName = convertString(generic.get("entityName")), entityType = convertString(generic.get("entityType")), pages = scala.collection.JavaConversions .asScalaIterator( generic .get("pages") .asInstanceOf[org.apache.avro.generic.GenericArray[Any]] .iterator()) .map(_elem => _elem.asInstanceOf[Int]) .toVector ) }
malcolmgreaves/abstract_data
data-tc-spark/src/test/scala/fif/spark/avroparquet/SampleEntity.scala
Scala
apache-2.0
2,563
/* * Licensed to Intel Corporation under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * Intel Corporation licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.example.textclassification object SimpleTokenizer { /** * Simple tokenizer to split text into separated tokens. * @param text text to be split. * @param lower convert to lower case or not. * @return An array of separated tokens. */ def toTokens(text: String, lower: Boolean = true): Array[String] = { text.replaceAll("[^a-zA-Z]", " ").toLowerCase().split("\\\\s+").filter(_.size > 2) } /** * Transform sample text into tokens and ignore those unknown tokens. * @param word2Meta Indicate the included words. */ def toTokens(text: String, word2Meta: Map[String, WordMeta]): Array[Float] = { SimpleTokenizer.toTokens(text).map { word: String => if (word2Meta.contains(word)) { Some(word2Meta(word).index.toFloat) } else { None } }.flatten } /** * Shape the token sequence to the specified length. * The sequence would be either padded or truncated. * @param sequenceLen the desired seq length * @param trunc truncated from pre or post. */ def shaping(tokens: Array[Float], sequenceLen: Int, trunc: String = "pre") : Array[Float] = { val paddedTokens = if (tokens.length > sequenceLen) { if ("pre" == trunc) { tokens.slice(tokens.length - sequenceLen, tokens.length) } else { tokens.slice(0, sequenceLen) } } else { tokens ++ Array.fill[Float](sequenceLen - tokens.length)(0) } paddedTokens } /** * Transform word to pre-trained vector. * @param embeddingSize size of the pre-trained vector * @param word2Vec pre-trained word2Vec */ def vectorization(tokens: Array[Float], embeddingSize: Int, word2Vec: Map[Float, Array[Float]]) : Array[Array[Float]] = { tokens.map { word => if (word2Vec.contains(word)) { word2Vec(word) } else { // Treat it as zeros if cannot be found from pre-trained word2Vec Array.fill[Float](embeddingSize)(0) } } } } /** * @param count frequency of the word. * @param index index of the word which ranked by the frequency from high to low. */ case class WordMeta(count: Int, index: Int)
dding3/BigDL
dl/src/main/scala/com/intel/analytics/bigdl/example/textclassification/TextTransformer.scala
Scala
apache-2.0
2,971
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package eu.lateral.swg import eu.lateral.swg.db.ImageRecord import eu.lateral.swg.db.Project import eu.lateral.swg.db.SWGSchema import eu.lateral.swg.utils._ import java.awt.AlphaComposite import java.awt.RenderingHints import java.awt.image.BufferedImage import java.io.ByteArrayInputStream import java.io.ByteArrayOutputStream import javax.imageio.ImageIO import org.apache.commons.io.IOUtils import org.squeryl.PrimitiveTypeMode._ object ImageUtils { def resize(image: Array[Byte], width: Int, height: Int, format: String, thumbnail: Boolean = true): Array[Byte] = { val imagetype = if (format.toLowerCase == "jpg") BufferedImage.TYPE_3BYTE_BGR else BufferedImage.TYPE_INT_ARGB val originalImage = ImageIO.read(new ByteArrayInputStream(image)) val (w, h) = (width.toDouble, height.toDouble) val (ow, oh) = (originalImage.getWidth.toDouble, originalImage.getHeight.toDouble) val (rw, rh, x, y) = if ((w / h) < (ow / oh)) { val adapted = oh * w / ow (w, adapted, 0.0, (h - adapted) / 2) } else { val adapted = ow * h / oh (adapted, h, (w - adapted) / 2, 0.0) } val resizedImage = if (thumbnail) new BufferedImage(width, height, imagetype) else new BufferedImage(rw.toInt, rh.toInt, imagetype) val g = resizedImage.createGraphics() g.setComposite(AlphaComposite.Src) g.setRenderingHint(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BILINEAR) g.setRenderingHint(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY) g.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON) if (thumbnail) g.drawImage(originalImage, x.toInt, y.toInt, rw.toInt, rh.toInt, null) else g.drawImage(originalImage, 0, 0, rw.toInt, rh.toInt, null) g.dispose() val out = new ByteArrayOutputStream ImageIO.write(resizedImage, format, out) out.toByteArray } def importImages(project: Project, url: String, monitor: StatusMonitor) = { val root = toFileObject(url) for (child <- traverse(url)) { val relativeName = root.getName.getRelativeName(child.getName) monitor.info(s"Importing $relativeName") transaction { val image = toByteArray(child) SWGSchema.images.insert(new ImageRecord( id = 0, projectId = project.id, imageNumber = project.maxImageNumber + 1, sourceURL = child.getName.getURI, relativePath = relativeName, techniqueId = 0, author = "", inception = "", width = 0.0, height = 0.0, originalImage = image, bigImage = resize(image, project.imageWidth, project.imageHeight, "jpg", false), bigImageFormat = "jpg", thumbnailImage = resize(image, project.thumbnailWidth, project.thumbnailHeight, "png", true), thumbnailFormat = "png")) } } } }
orest-d/swg
swg/src/main/scala/eu/lateral/swg/ImageUtils.scala
Scala
gpl-3.0
3,118
class Credentials(accessKeyId: String, secretAccessKey: String) extends com.amazonaws.auth.AWSCredentials { override def getAWSAccessKeyId: String = accessKeyId override def getAWSSecretKey: String = secretAccessKey } class SessionCredentials(accessKeyId: String, secretAccessKey: String, token: String) extends Credentials(accessKeyId, secretAccessKey) with com.amazonaws.auth.AWSSessionCredentials { override def getSessionToken: String = token } object Credentials { def apply(accessKeyId: String, secretAccessKey: String): Credentials = new Credentials(accessKeyId, secretAccessKey) def apply(accessKeyId: String, secretAccessKey: String, token: String): Credentials = new SessionCredentials(accessKeyId, secretAccessKey, token) }
hirokikonishi/awscala
aws/core/src/main/scala/Credentials.scala
Scala
apache-2.0
760
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util import java.io.{File, IOException} import org.apache.hadoop.conf.Configuration import org.apache.spark.SparkContext import org.apache.carbondata.common.logging.LogServiceFactory import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.datastore.filesystem.CarbonFile import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.util.CarbonUtil import org.apache.carbondata.events.{CreateDatabasePostExecutionEvent, OperationContext, OperationListenerBus} import org.apache.carbondata.processing.exception.DataLoadingException object FileUtils { /** * append all csv file path to a String, file path separated by comma */ private def getPathsFromCarbonFile( carbonFile: CarbonFile, stringBuild: StringBuilder, hadoopConf: Configuration): Unit = { if (carbonFile.isDirectory) { val files = carbonFile.listFiles() for (j <- 0 until files.size) { getPathsFromCarbonFile(files(j), stringBuild, hadoopConf) } } else { val path = carbonFile.getAbsolutePath val fileName = carbonFile.getName if (carbonFile.getSize == 0) { LogServiceFactory.getLogService(this.getClass.getCanonicalName) .warn(s"skip empty input file: ${CarbonUtil.removeAKSK(path)}") } else if (fileName.startsWith(CarbonCommonConstants.UNDERSCORE) || fileName.startsWith(CarbonCommonConstants.POINT)) { LogServiceFactory.getLogService(this.getClass.getCanonicalName) .warn(s"skip invisible input file: ${CarbonUtil.removeAKSK(path)}") } else { stringBuild.append(path.replace('\\\\', '/')).append(CarbonCommonConstants.COMMA) } } } /** * append all file path to a String, inputPath path separated by comma * */ def getPaths(inputPath: String): String = { getPaths(inputPath, FileFactory.getConfiguration) } def getPaths(inputPath: String, hadoopConf: Configuration): String = { if (inputPath == null || inputPath.isEmpty) { throw new DataLoadingException("Input file path cannot be empty.") } else { val stringBuild = new StringBuilder() val filePaths = inputPath.split(",") for (i <- 0 until filePaths.size) { val filePath = CarbonUtil.checkAndAppendHDFSUrl(filePaths(i)) val carbonFile = FileFactory.getCarbonFile(filePath, hadoopConf) if (!carbonFile.exists()) { throw new DataLoadingException( s"The input file does not exist: ${CarbonUtil.removeAKSK(filePaths(i))}" ) } getPathsFromCarbonFile(carbonFile, stringBuild, hadoopConf) } if (stringBuild.nonEmpty) { stringBuild.substring(0, stringBuild.size - 1) } else { throw new DataLoadingException("Please check your input path and make sure " + "that files end with '.csv' and content is not empty.") } } } def getSpaceOccupied(inputPath: String, hadoopConfiguration: Configuration): Long = { var size : Long = 0 if (inputPath == null || inputPath.isEmpty) { size } else { val filePaths = inputPath.split(",") for (i <- 0 until filePaths.size) { val carbonFile = FileFactory.getCarbonFile(filePaths(i), hadoopConfiguration) size = size + carbonFile.getSize } size } } def createDatabaseDirectory(dbName: String, storePath: String, sparkContext: SparkContext) { val databasePath: String = storePath + File.separator + dbName.toLowerCase val fileType = FileFactory.getFileType(databasePath) FileFactory.mkdirs(databasePath, fileType) val operationContext = new OperationContext val createDatabasePostExecutionEvent = new CreateDatabasePostExecutionEvent(dbName, databasePath, sparkContext) OperationListenerBus.getInstance.fireEvent(createDatabasePostExecutionEvent, operationContext) } }
manishgupta88/carbondata
integration/spark-common/src/main/scala/org/apache/spark/util/FileUtils.scala
Scala
apache-2.0
4,787
/** * Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com> */ package akka.io import java.net.{ SocketException, InetSocketAddress } import java.nio.channels.SelectionKey._ import java.io.{ FileInputStream, IOException } import java.nio.channels.{ FileChannel, SocketChannel } import java.nio.ByteBuffer import scala.annotation.tailrec import scala.collection.immutable import scala.util.control.NonFatal import scala.concurrent.duration._ import akka.actor._ import akka.util.ByteString import akka.io.Inet.SocketOption import akka.io.Tcp._ import akka.io.SelectionHandler._ import akka.dispatch.{ UnboundedMessageQueueSemantics, RequiresMessageQueue } /** * Base class for TcpIncomingConnection and TcpOutgoingConnection. * * INTERNAL API */ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketChannel, val pullMode: Boolean) extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import tcp.Settings._ import tcp.bufferPool import TcpConnection._ private[this] var pendingWrite: PendingWrite = EmptyPendingWrite private[this] var peerClosed = false private[this] var writingSuspended = false private[this] var readingSuspended = pullMode private[this] var interestedInResume: Option[ActorRef] = None var closedMessage: CloseInformation = _ // for ConnectionClosed message in postStop private var watchedActor: ActorRef = context.system.deadLetters def signDeathPact(actor: ActorRef): Unit = { unsignDeathPact() watchedActor = actor context.watch(watchedActor) } def unsignDeathPact(): Unit = if (watchedActor ne context.system.deadLetters) context.unwatch(watchedActor) def writePending = pendingWrite ne EmptyPendingWrite // STATES /** connection established, waiting for registration from user handler */ def waitingForRegistration(registration: ChannelRegistration, commander: ActorRef): Receive = { case Register(handler, keepOpenOnPeerClosed, useResumeWriting) ⇒ // up to this point we've been watching the commander, // but since registration is now complete we only need to watch the handler from here on if (handler != commander) { context.unwatch(commander) context.watch(handler) } if (TraceLogging) log.debug("[{}] registered as connection handler", handler) val info = ConnectionInfo(registration, handler, keepOpenOnPeerClosed, useResumeWriting) // if we have resumed reading from pullMode while waiting for Register then register OP_READ interest if (pullMode && !readingSuspended) resumeReading(info) doRead(info, None) // immediately try reading, pullMode is handled by readingSuspended context.setReceiveTimeout(Duration.Undefined) context.become(connected(info)) case ResumeReading ⇒ readingSuspended = false case SuspendReading ⇒ readingSuspended = true case cmd: CloseCommand ⇒ val info = ConnectionInfo(registration, commander, keepOpenOnPeerClosed = false, useResumeWriting = false) handleClose(info, Some(sender()), cmd.event) case ReceiveTimeout ⇒ // after sending `Register` user should watch this actor to make sure // it didn't die because of the timeout log.debug("Configured registration timeout of [{}] expired, stopping", RegisterTimeout) context.stop(self) } /** normal connected state */ def connected(info: ConnectionInfo): Receive = handleWriteMessages(info) orElse { case SuspendReading ⇒ suspendReading(info) case ResumeReading ⇒ resumeReading(info) case ChannelReadable ⇒ doRead(info, None) case cmd: CloseCommand ⇒ handleClose(info, Some(sender()), cmd.event) } /** the peer sent EOF first, but we may still want to send */ def peerSentEOF(info: ConnectionInfo): Receive = handleWriteMessages(info) orElse { case cmd: CloseCommand ⇒ handleClose(info, Some(sender()), cmd.event) } /** connection is closing but a write has to be finished first */ def closingWithPendingWrite(info: ConnectionInfo, closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Receive = { case SuspendReading ⇒ suspendReading(info) case ResumeReading ⇒ resumeReading(info) case ChannelReadable ⇒ doRead(info, closeCommander) case ChannelWritable ⇒ doWrite(info) if (!writePending) // writing is now finished handleClose(info, closeCommander, closedEvent) case UpdatePendingWriteAndThen(remaining, work) ⇒ pendingWrite = remaining work() if (writePending) info.registration.enableInterest(OP_WRITE) else handleClose(info, closeCommander, closedEvent) case WriteFileFailed(e) ⇒ handleError(info.handler, e) // rethrow exception from dispatcher task case Abort ⇒ handleClose(info, Some(sender()), Aborted) } /** connection is closed on our side and we're waiting from confirmation from the other side */ def closing(info: ConnectionInfo, closeCommander: Option[ActorRef]): Receive = { case SuspendReading ⇒ suspendReading(info) case ResumeReading ⇒ resumeReading(info) case ChannelReadable ⇒ doRead(info, closeCommander) case Abort ⇒ handleClose(info, Some(sender()), Aborted) } def handleWriteMessages(info: ConnectionInfo): Receive = { case ChannelWritable ⇒ if (writePending) { doWrite(info) if (!writePending && interestedInResume.nonEmpty) { interestedInResume.get ! WritingResumed interestedInResume = None } } case write: WriteCommand ⇒ if (writingSuspended) { if (TraceLogging) log.debug("Dropping write because writing is suspended") sender() ! write.failureMessage } else if (writePending) { if (TraceLogging) log.debug("Dropping write because queue is full") sender() ! write.failureMessage if (info.useResumeWriting) writingSuspended = true } else { pendingWrite = PendingWrite(sender(), write) if (writePending) doWrite(info) } case ResumeWriting ⇒ /* * If more than one actor sends Writes then the first to send this * message might resume too early for the second, leading to a Write of * the second to go through although it has not been resumed yet; there * is nothing we can do about this apart from all actors needing to * register themselves and us keeping track of them, which sounds bad. * * Thus it is documented that useResumeWriting is incompatible with * multiple writers. But we fail as gracefully as we can. */ writingSuspended = false if (writePending) { if (interestedInResume.isEmpty) interestedInResume = Some(sender()) else sender() ! CommandFailed(ResumeWriting) } else sender() ! WritingResumed case UpdatePendingWriteAndThen(remaining, work) ⇒ pendingWrite = remaining work() if (writePending) info.registration.enableInterest(OP_WRITE) case WriteFileFailed(e) ⇒ handleError(info.handler, e) // rethrow exception from dispatcher task } // AUXILIARIES and IMPLEMENTATION /** used in subclasses to start the common machinery above once a channel is connected */ def completeConnect(registration: ChannelRegistration, commander: ActorRef, options: immutable.Traversable[SocketOption]): Unit = { // Turn off Nagle's algorithm by default try channel.socket.setTcpNoDelay(true) catch { case e: SocketException ⇒ // as reported in #16653 some versions of netcat (`nc -z`) doesn't allow setTcpNoDelay // continue anyway log.debug("Could not enable TcpNoDelay: {}", e.getMessage) } options.foreach(_.afterConnect(channel.socket)) commander ! Connected( channel.socket.getRemoteSocketAddress.asInstanceOf[InetSocketAddress], channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) context.setReceiveTimeout(RegisterTimeout) // !!WARNING!! The line below is needed to make Windows notify us about aborted connections, see #15766 if (WindowsConnectionAbortWorkaroundEnabled) registration.enableInterest(OP_CONNECT) context.become(waitingForRegistration(registration, commander)) } def suspendReading(info: ConnectionInfo): Unit = { readingSuspended = true info.registration.disableInterest(OP_READ) } def resumeReading(info: ConnectionInfo): Unit = { readingSuspended = false info.registration.enableInterest(OP_READ) } def doRead(info: ConnectionInfo, closeCommander: Option[ActorRef]): Unit = if (!readingSuspended) { @tailrec def innerRead(buffer: ByteBuffer, remainingLimit: Int): ReadResult = if (remainingLimit > 0) { // never read more than the configured limit buffer.clear() val maxBufferSpace = math.min(DirectBufferSize, remainingLimit) buffer.limit(maxBufferSpace) val readBytes = channel.read(buffer) buffer.flip() if (TraceLogging) log.debug("Read [{}] bytes.", readBytes) if (readBytes > 0) info.handler ! Received(ByteString(buffer)) readBytes match { case `maxBufferSpace` ⇒ if (pullMode) MoreDataWaiting else innerRead(buffer, remainingLimit - maxBufferSpace) case x if x >= 0 ⇒ AllRead case -1 ⇒ EndOfStream case _ ⇒ throw new IllegalStateException("Unexpected value returned from read: " + readBytes) } } else MoreDataWaiting val buffer = bufferPool.acquire() try innerRead(buffer, ReceivedMessageSizeLimit) match { case AllRead ⇒ if (!pullMode) info.registration.enableInterest(OP_READ) case MoreDataWaiting ⇒ if (!pullMode) self ! ChannelReadable case EndOfStream if channel.socket.isOutputShutdown ⇒ if (TraceLogging) log.debug("Read returned end-of-stream, our side already closed") doCloseConnection(info.handler, closeCommander, ConfirmedClosed) case EndOfStream ⇒ if (TraceLogging) log.debug("Read returned end-of-stream, our side not yet closed") handleClose(info, closeCommander, PeerClosed) } catch { case e: IOException ⇒ handleError(info.handler, e) } finally bufferPool.release(buffer) } def doWrite(info: ConnectionInfo): Unit = pendingWrite = pendingWrite.doWrite(info) def closeReason = if (channel.socket.isOutputShutdown) ConfirmedClosed else PeerClosed def handleClose(info: ConnectionInfo, closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Unit = closedEvent match { case Aborted ⇒ if (TraceLogging) log.debug("Got Abort command. RESETing connection.") doCloseConnection(info.handler, closeCommander, closedEvent) case PeerClosed if info.keepOpenOnPeerClosed ⇒ // report that peer closed the connection info.handler ! PeerClosed // used to check if peer already closed its side later peerClosed = true context.become(peerSentEOF(info)) case _ if writePending ⇒ // finish writing first // Our registered actor is now free to terminate cleanly unsignDeathPact() if (TraceLogging) log.debug("Got Close command but write is still pending.") context.become(closingWithPendingWrite(info, closeCommander, closedEvent)) case ConfirmedClosed ⇒ // shutdown output and wait for confirmation if (TraceLogging) log.debug("Got ConfirmedClose command, sending FIN.") // If peer closed first, the socket is now fully closed. // Also, if shutdownOutput threw an exception we expect this to be an indication // that the peer closed first or concurrently with this code running. // also see http://bugs.sun.com/view_bug.do?bug_id=4516760 if (peerClosed || !safeShutdownOutput()) doCloseConnection(info.handler, closeCommander, closedEvent) else context.become(closing(info, closeCommander)) case _ ⇒ // close now if (TraceLogging) log.debug("Got Close command, closing connection.") doCloseConnection(info.handler, closeCommander, closedEvent) } def doCloseConnection(handler: ActorRef, closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Unit = { if (closedEvent == Aborted) abort() else channel.close() stopWith(CloseInformation(Set(handler) ++ closeCommander, closedEvent)) } def handleError(handler: ActorRef, exception: IOException): Unit = { log.debug("Closing connection due to IO error {}", exception) stopWith(CloseInformation(Set(handler), ErrorClosed(extractMsg(exception)))) } def safeShutdownOutput(): Boolean = try { channel.socket().shutdownOutput() true } catch { case _: SocketException ⇒ false } @tailrec private[this] def extractMsg(t: Throwable): String = if (t == null) "unknown" else { t.getMessage match { case null | "" ⇒ extractMsg(t.getCause) case msg ⇒ msg } } def abort(): Unit = { try channel.socket.setSoLinger(true, 0) // causes the following close() to send TCP RST catch { case NonFatal(e) ⇒ // setSoLinger can fail due to http://bugs.sun.com/view_bug.do?bug_id=6799574 // (also affected: OS/X Java 1.6.0_37) if (TraceLogging) log.debug("setSoLinger(true, 0) failed with [{}]", e) } channel.close() } def stopWith(closeInfo: CloseInformation): Unit = { closedMessage = closeInfo context.stop(self) } override def postStop(): Unit = { if (channel.isOpen) abort() if (writePending) pendingWrite.release() if (closedMessage != null) { val interestedInClose = if (writePending) closedMessage.notificationsTo + pendingWrite.commander else closedMessage.notificationsTo interestedInClose.foreach(_ ! closedMessage.closedEvent) } } override def postRestart(reason: Throwable): Unit = throw new IllegalStateException("Restarting not supported for connection actors.") def PendingWrite(commander: ActorRef, write: WriteCommand): PendingWrite = { @tailrec def create(head: WriteCommand, tail: WriteCommand = Write.empty): PendingWrite = head match { case Write.empty ⇒ if (tail eq Write.empty) EmptyPendingWrite else create(tail) case Write(data, ack) if data.nonEmpty ⇒ PendingBufferWrite(commander, data, ack, tail) case WriteFile(path, offset, count, ack) ⇒ PendingWriteFile(commander, path, offset, count, ack, tail) case CompoundWrite(h, t) ⇒ create(h, t) case x @ Write(_, ack) ⇒ // empty write with either an ACK or a non-standard NoACK if (x.wantsAck) commander ! ack create(tail) } create(write) } def PendingBufferWrite(commander: ActorRef, data: ByteString, ack: Event, tail: WriteCommand): PendingBufferWrite = { val buffer = bufferPool.acquire() try { val copied = data.copyToBuffer(buffer) buffer.flip() new PendingBufferWrite(commander, data.drop(copied), ack, buffer, tail) } catch { case NonFatal(e) ⇒ bufferPool.release(buffer) throw e } } class PendingBufferWrite( val commander: ActorRef, remainingData: ByteString, ack: Any, buffer: ByteBuffer, tail: WriteCommand) extends PendingWrite { def doWrite(info: ConnectionInfo): PendingWrite = { @tailrec def writeToChannel(data: ByteString): PendingWrite = { val writtenBytes = channel.write(buffer) // at first we try to drain the remaining bytes from the buffer if (TraceLogging) log.debug("Wrote [{}] bytes to channel", writtenBytes) if (buffer.hasRemaining) { // we weren't able to write all bytes from the buffer, so we need to try again later if (data eq remainingData) this else new PendingBufferWrite(commander, data, ack, buffer, tail) // copy with updated remainingData } else if (data.nonEmpty) { buffer.clear() val copied = data.copyToBuffer(buffer) buffer.flip() writeToChannel(data drop copied) } else { if (!ack.isInstanceOf[NoAck]) commander ! ack release() PendingWrite(commander, tail) } } try { val next = writeToChannel(remainingData) if (next ne EmptyPendingWrite) info.registration.enableInterest(OP_WRITE) next } catch { case e: IOException ⇒ handleError(info.handler, e); this } } def release(): Unit = bufferPool.release(buffer) } def PendingWriteFile(commander: ActorRef, filePath: String, offset: Long, count: Long, ack: Event, tail: WriteCommand): PendingWriteFile = new PendingWriteFile(commander, new FileInputStream(filePath).getChannel, offset, count, ack, tail) class PendingWriteFile( val commander: ActorRef, fileChannel: FileChannel, offset: Long, remaining: Long, ack: Event, tail: WriteCommand) extends PendingWrite with Runnable { def doWrite(info: ConnectionInfo): PendingWrite = { tcp.fileIoDispatcher.execute(this) this } def release(): Unit = fileChannel.close() def run(): Unit = try { val toWrite = math.min(remaining, tcp.Settings.TransferToLimit) val written = fileChannel.transferTo(offset, toWrite, channel) if (written < remaining) { val updated = new PendingWriteFile(commander, fileChannel, offset + written, remaining - written, ack, tail) self ! UpdatePendingWriteAndThen(updated, TcpConnection.doNothing) } else { release() val andThen = if (!ack.isInstanceOf[NoAck]) () ⇒ commander ! ack else doNothing self ! UpdatePendingWriteAndThen(PendingWrite(commander, tail), andThen) } } catch { case e: IOException ⇒ self ! WriteFileFailed(e) } } } /** * INTERNAL API */ private[io] object TcpConnection { sealed trait ReadResult object EndOfStream extends ReadResult object AllRead extends ReadResult object MoreDataWaiting extends ReadResult /** * Used to transport information to the postStop method to notify * interested party about a connection close. */ final case class CloseInformation(notificationsTo: Set[ActorRef], closedEvent: Event) /** * Groups required connection-related data that are only available once the connection has been fully established. */ final case class ConnectionInfo(registration: ChannelRegistration, handler: ActorRef, keepOpenOnPeerClosed: Boolean, useResumeWriting: Boolean) // INTERNAL MESSAGES final case class UpdatePendingWriteAndThen(remainingWrite: PendingWrite, work: () ⇒ Unit) extends NoSerializationVerificationNeeded final case class WriteFileFailed(e: IOException) sealed abstract class PendingWrite { def commander: ActorRef def doWrite(info: ConnectionInfo): PendingWrite def release(): Unit // free any occupied resources } object EmptyPendingWrite extends PendingWrite { def commander: ActorRef = throw new IllegalStateException def doWrite(info: ConnectionInfo): PendingWrite = throw new IllegalStateException def release(): Unit = throw new IllegalStateException } val doNothing: () ⇒ Unit = () ⇒ () }
jmnarloch/akka.js
akka-js-actor/jvm/src/main/scala/akka/io/TcpConnection.scala
Scala
bsd-3-clause
19,760
package com.eevolution.context.dictionary.domain.model import ai.x.play.json.Jsonx import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable} import org.joda.time.DateTime /** * Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution * Created by [email protected] , www.e-evolution.com on 12/10/2017. */ /** * Replication Log Entity * @param replicationLogId replication Log ID * @param tenantId Tenant ID * @param organizationId Organization ID * @param isActive Is Active * @param created Created * @param createdBy Created By * @param updated Updated * @param updatedBy Updated By * @param replicationRunId Replication Run ID * @param replicationTableId Replication Table ID * @param message Message * @param isReplicated Is Replicated * @param uuid UUID */ case class ReplicationLog(replicationLogId: Int, tenantId: Int, organizationId: Int, isActive : Boolean = true, created : DateTime = DateTime.now, createdBy : Int , updated : DateTime = DateTime.now, updatedBy : Int, replicationRunId: Int, replicationTableId: Option[Int], message: Option[String], isReplicated: Boolean = false, uuid: String ) extends DomainModel with ActiveEnabled with Identifiable with Traceable { override type ActiveEnabled = this.type override type Identifiable = this.type override type Traceable = this.type override def Id: Int = replicationLogId override val entityName: String = "AD_Replication_Log" override val identifier: String = "AD_Replication_Log_ID" } object ReplicationLog { implicit lazy val jsonFormat = Jsonx.formatCaseClass[ReplicationLog] def create(replicationLogId: Int, tenantId: Int, organizationId: Int, isActive : Boolean, created : DateTime, createdBy : Int , updated : DateTime, updatedBy : Int, replicationRunId: Int, replicationTableId: Int, message: String, isReplicated: Boolean, uuid: String) = ReplicationLog(replicationLogId, tenantId, organizationId, isActive, created, createdBy, updated, updatedBy, replicationRunId, None, None, isReplicated, uuid) }
adempiere/ADReactiveSystem
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/ReplicationLog.scala
Scala
gpl-3.0
3,372
package org.jetbrains.plugins.scala.lang.psi.impl.statements import com.intellij.lang.ASTNode import com.intellij.openapi.progress.ProgressManager import com.intellij.psi._ import com.intellij.psi.scope._ import org.jetbrains.plugins.scala.extensions.{PsiElementExt, ifReadAllowed} import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.statements._ import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createTypeElementFromText import org.jetbrains.plugins.scala.lang.psi.stubs.ScFunctionStub import org.jetbrains.plugins.scala.lang.psi.types.ScType import org.jetbrains.plugins.scala.lang.psi.types.api.Any import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext} /** * @author Jason Zaugg */ class ScMacroDefinitionImpl private (stub: ScFunctionStub, node: ASTNode) extends ScFunctionImpl(stub, ScalaElementTypes.MACRO_DEFINITION, node) with ScMacroDefinition { def this(node: ASTNode) = this(null, node) def this(stub: ScFunctionStub) = this(stub, null) override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = { //process function's parameters for dependent method types, and process type parameters if (!super[ScFunctionImpl].processDeclarations(processor, state, lastParent, place)) return false //do not process parameters for default parameters, only for function body //processing parameters for default parameters in ScParameters val parameterIncludingSynthetic: Seq[ScParameter] = effectiveParameterClauses.flatMap(_.parameters) if (getStub == null) { body match { case Some(x) if lastParent != null && (!needCheckProcessingDeclarationsForBody || x.startOffsetInParent == lastParent.startOffsetInParent) => for (p <- parameterIncludingSynthetic) { ProgressManager.checkCanceled() if (!processor.execute(p, state)) return false } case _ => } } else { if (lastParent != null && lastParent.getContext != lastParent.getParent) { for (p <- parameterIncludingSynthetic) { ProgressManager.checkCanceled() if (!processor.execute(p, state)) return false } } } true } protected def needCheckProcessingDeclarationsForBody = true override def toString: String = "ScMacroDefinition: " + ifReadAllowed(name)("") def returnTypeInner: TypeResult[ScType] = returnTypeElement match { case Some(rte: ScTypeElement) => rte.getType(TypingContext.empty) case None => Success(Any, Some(this)) // TODO look up type from the macro impl. } def body: Option[ScExpression] = byPsiOrStub(findChild(classOf[ScExpression]))(_.bodyExpression) override def hasAssign: Boolean = true override def accept(visitor: ScalaElementVisitor) { visitor.visitMacroDefinition(this) } override def getType(ctx: TypingContext): TypeResult[ScType] = { super.getType(ctx) } override def accept(visitor: PsiElementVisitor) { visitor match { case s: ScalaElementVisitor => s.visitMacroDefinition(this) case _ => super.accept(visitor) } } }
loskutov/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/impl/statements/ScMacroDefinitionImpl.scala
Scala
apache-2.0
3,713
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.exceptions /** * Exception thrown to indicate a test is pending. * * <p> * A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of * pending tests is to facilitate a style of testing in which documentation of behavior is sketched * out before tests are written to verify that behavior (and often, the before the behavior of * the system being tested is itself implemented). Such sketches form a kind of specification of * what tests and functionality to implement later. * </p> * * <p> * To support this style of testing, a test can be given a name that specifies one * bit of behavior required by the system being tested. The test can also include some code that * sends more information about the behavior to the reporter when the tests run. At the end of the test, * it can call method <code>pending</code>, which will cause it to complete abruptly with <code>TestPendingException</code>. * Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information * sent to the reporter when running the test can appear in the report of a test run. (In other words, * the code of a pending test is executed just like any other test.) However, because the test completes abruptly * with <code>TestPendingException</code>, the test will be reported as pending, to indicate * the actual test, and possibly the functionality, has not yet been implemented. * </p> * * @author Bill Venners */ class TestPendingException extends RuntimeException // This one isn't lazy, because, well, it could be lazy, but it is different than failed.
travisbrown/scalatest
src/main/scala/org/scalatest/exceptions/TestPendingException.scala
Scala
apache-2.0
2,288
package is.solidninja package openshift package client package impl import is.solidninja.k8s.api.v1.{PodList, ReplicationControllerList, ServiceList} import cats.effect._ import is.solidninja.openshift.api.v1.{Service => v1Service, _} import fs2.async.immutable.Signal import io.circe._ import io.circe.syntax._ import gnieh.diffson.circe._ import gnieh.diffson.circe.DiffsonProtocol._ import org.http4s._ import org.http4s.client._ import org.http4s.headers.{Authorization, Location} private[client] class HttpOpenshiftCluster(url: Uri, token: Signal[IO, Credentials.Token], httpClient: Client[IO]) extends OpenshiftCluster { val client = new HttpOpenshiftClient(httpClient, url, token) override def project(id: ProjectId): IO[OpenshiftProject] = // FIXME incorrect usage of IO.pure IO.pure(new HttpOpenshiftProject(client, id)) } private[client] class HttpOpenshiftProject(client: HttpOpenshiftClient, projectId: ProjectId) extends OpenshiftProject { override def pods(): IO[Seq[Pod]] = client.listPods(projectId) override def pod(name: String): IO[Option[Pod]] = client.getPod(projectId, name) override def deploymentConfigs(): IO[Seq[DeploymentConfig]] = client.listDeploymentConfigs(projectId) override def deploymentConfig(name: String): IO[Option[DeploymentConfig]] = client.getDeploymentConfig(projectId, name) override def routes(): IO[Seq[Route]] = client.listRoutes(projectId) override def route(name: String): IO[Option[Route]] = client.getRoute(projectId, name) override def services(): IO[Seq[v1Service]] = client.listServices(projectId) override def service(name: String): IO[Option[v1Service]] = client.getService(projectId, name) override def replicationControllers(): IO[Seq[ReplicationController]] = client.getReplicationControllers(projectId) override def replicationController(name: String): IO[Option[ReplicationController]] = client.getReplicationController(projectId, name) override def patchDeploymentConfig(name: String, patch: JsonPatch): IO[DeploymentConfig] = client.patchDeploymentConfig(projectId, name, patch) override def patchRoute(name: String, patch: JsonPatch): IO[Route] = client.patchRoute(projectId, name, patch) override def patchService(name: String, patch: JsonPatch): IO[v1Service] = client.patchService(projectId, name, patch) override def createDeploymentConfig(dc: DeploymentConfig): IO[DeploymentConfig] = client.createDeploymentConfig(projectId, dc) override def createRoute(route: Route): IO[Route] = client.createRoute(projectId, route) override def createService(service: v1Service): IO[v1Service] = client.createService(projectId, service) } private[client] class HttpOpenshiftClient(client: Client[IO], url: Uri, token: Signal[IO, Credentials.Token]) { import JsonProtocol._ import org.http4s.circe._ private val v1k8s = url / "api" / "v1" private val v1oapi = url / "oapi" / "v1" private def namespacek8s(projectId: ProjectId) = v1k8s / "namespaces" / projectId.id private def namespace(projectId: ProjectId) = v1oapi / "namespaces" / projectId.id def listPods(projectId: ProjectId): IO[Seq[Pod]] = get[PodList](namespacek8s(projectId) / "pods").map(_.items) def getPod(projectId: ProjectId, name: String): IO[Option[Pod]] = getOpt[Pod](namespacek8s(projectId) / "pods" / name) def listServices(projectId: ProjectId): IO[Seq[v1Service]] = get[ServiceList](namespacek8s(projectId) / "services").map(_.items) def getService(projectId: ProjectId, name: String): IO[Option[v1Service]] = getOpt[v1Service](namespacek8s(projectId) / "services" / name) def listRoutes(projectId: ProjectId): IO[Seq[Route]] = get[RouteList](namespace(projectId) / "routes").map(_.items) def getRoute(projectId: ProjectId, name: String): IO[Option[Route]] = getOpt[Route](namespace(projectId) / "routes" / name) def listDeploymentConfigs(projectId: ProjectId): IO[Seq[DeploymentConfig]] = get[DeploymentConfigList](namespace(projectId) / "deploymentconfigs").map(_.items) def getDeploymentConfig(projectId: ProjectId, name: String): IO[Option[DeploymentConfig]] = getOpt[DeploymentConfig](namespace(projectId) / "deploymentconfigs" / name) def getReplicationControllers(projectId: ProjectId): IO[Seq[ReplicationController]] = get[ReplicationControllerList](namespacek8s(projectId) / "replicationcontrollers").map(_.items) def getReplicationController(projectId: ProjectId, name: String): IO[Option[ReplicationController]] = getOpt[ReplicationController](namespacek8s(projectId) / "replicationcontrollers" / name) def patchDeploymentConfig(projectId: ProjectId, name: String, thePatch: JsonPatch): IO[DeploymentConfig] = patch[DeploymentConfig](namespace(projectId) / "deploymentconfigs" / name, thePatch.asJson) def patchService(projectId: ProjectId, name: String, thePatch: JsonPatch): IO[v1Service] = patch[v1Service](namespacek8s(projectId) / "services" / name, thePatch.asJson) def patchRoute(projectId: ProjectId, name: String, thePatch: JsonPatch): IO[Route] = patch[Route](namespace(projectId) / "routes" / name, thePatch.asJson) def createDeploymentConfig(projectId: ProjectId, dc: DeploymentConfig): IO[DeploymentConfig] = post[DeploymentConfig](namespace(projectId) / "deploymentconfigs", dc) def createRoute(projectId: ProjectId, route: Route): IO[Route] = post[Route](namespace(projectId) / "routes", route) def createService(projectId: ProjectId, service: v1Service): IO[v1Service] = post[v1Service](namespacek8s(projectId) / "services", service) private def getOpt[T: Decoder](uri: Uri)(implicit EF: Effect[IO]): IO[Option[T]] = EF.handleError(get[T](uri).map(Option(_))) { case UnexpectedStatus(Status.NotFound) => None } private def get[T: Decoder](uri: Uri): IO[T] = req[T](Request[IO](method = Method.GET, uri = uri)) private def patch[T: Decoder](uri: Uri, patch: Json): IO[T] = req[T]( Request[IO](method = Method.PATCH, uri = uri) .withBody(patch) // override the content-type header .map(_.putHeaders(Header("Content-Type", "application/json-patch+json"))) ) private def post[T: Decoder: Encoder](uri: Uri, obj: T): IO[T] = req[T]( Request[IO](method = Method.POST, uri = uri) .withBody(obj)(implicitly, jsonEncoderOf[IO, T]) ) private def req[T: Decoder](reqT: Request[IO]): IO[T] = req[T](IO.pure(reqT)) // FIXME: handle unauthorized requests in a more principled fashion - perhaps a IO[Credentials.Token]? private def req[T: Decoder](reqT: IO[Request[IO]]): IO[T] = for { tok <- token.get req <- reqT.map(_.putHeaders(Authorization(tok))) resp <- client.expect(req)(jsonOf[IO, T]) } yield resp } object OAuthClusterLogin { def cache(t: IO[Credentials.Token]): IO[Signal[IO, Credentials.Token]] = for { tok <- t } yield fs2.async.mutable.Signal.constant[IO, Credentials.Token](tok) def basic(client: Client[IO], url: Uri, credentials: BasicCredentials): IO[Credentials.Token] = { val req = Request[IO]( method = Method.GET, uri = url / "oauth" / "authorize" +? ("response_type", "token") +? ("client_id", "openshift-challenging-client"), headers = Headers( Header("X-CSRF-Token", "1"), Authorization(credentials) ) ) client.fetch[Credentials.Token](req)(getToken) } // FIXME: Define own exception types private[client] def getToken(resp: Response[IO]): IO[Credentials.Token] = IO { resp.headers .collectFirst { case Location(v) => v.uri.fragment.flatMap(extractToken) } .flatten .getOrElse(throw new RuntimeException("Unable to get location header and token")) } private[client] def extractToken(fragment: String): Option[Credentials.Token] = { val fragmentMap = fragment .split("&") .toList .map { kv => kv.split("=").toList match { case k :: v :: Nil => (k, v) // FIXME - ugly style case x => throw new RuntimeException(s"Expected key=value, got $x") } } .toMap if (fragmentMap.get("token_type").contains("Bearer")) fragmentMap.get("access_token").map(v => Credentials.Token(AuthScheme.Bearer, v)) else None } }
solidninja/openshift-scala-api
src/main/scala/is/solidninja/openshift/client/impl/HttpOpenshiftClient.scala
Scala
mit
8,355
package lore.compiler.poem.writer import lore.compiler.core.CompilationException import lore.compiler.semantics.NamePath import java.io.ByteArrayOutputStream import java.nio.{ByteBuffer, ByteOrder} import java.nio.charset.Charset /** * The bytecode writer writes various primitive data sizes in big endian. */ class BytecodeWriter { val output = new ByteArrayOutputStream() def writeUInt8(value: Int): Unit = { if (value > 255) { throw CompilationException(s"Cannot write value $value as a uint8: Maximum value exceeded.") } output.write((value & 0xFF).toByte) } def writeUInt16(value: Int): Unit = { if (value > 65535) { throw CompilationException(s"Cannot write value $value as a uint16: Maximum value exceeded.") } output.write(((value & 0xFF00) >> 8).toByte) output.write((value & 0xFF).toByte) } def writeInt16(value: Int): Unit = { if (value < Short.MinValue || value > Short.MaxValue) { throw CompilationException(s"Cannot write value $value as an int16: Maximum or minimum value exceeded.") } output.write(newBuffer(2).putShort(value.toShort).array()) } def writeInt64(value: Long): Unit = output.writeBytes(newBuffer(8).putLong(value).array()) def writeFloat64(value: Double): Unit = output.writeBytes(newBuffer(8).putDouble(value).array()) def writeBoolean(value: Boolean): Unit = writeUInt8(if (value) 1 else 0) def writeString(string: String): Unit = { output.writeBytes(string.getBytes(Charset.forName("UTF-8"))) } def writeStringWithLength(string: String): Unit = { val bytes = string.getBytes(Charset.forName("UTF-8")) writeUInt16(bytes.length) output.writeBytes(bytes) } def writeNamePath(namePath: NamePath): Unit = { writeStringWithLength(namePath.toString) } def writeManyWithCount8[A](values: Vector[A], write: A => Unit): Unit = { writeUInt8(values.length) values.foreach(write) } def writeManyWithCount16[A](values: Vector[A], write: A => Unit): Unit = { writeUInt16(values.length) values.foreach(write) } private def newBuffer(capacity: Int): ByteBuffer = ByteBuffer.allocate(capacity).order(ByteOrder.BIG_ENDIAN) }
marcopennekamp/lore
compiler/src/lore/compiler/poem/writer/BytecodeWriter.scala
Scala
mit
2,199
package se.citerus.dddsample.application.impl import org.apache.commons.logging.Log import se.citerus.dddsample.domain.model.handling._; import org.apache.commons.logging.LogFactory; import org.springframework.transaction.annotation.Transactional; import se.citerus.dddsample.application.ApplicationEvents; import se.citerus.dddsample.application.HandlingEventService; import se.citerus.dddsample.domain.model.cargo.TrackingId; import se.citerus.dddsample.domain.model.handling.CannotCreateHandlingEventException import se.citerus.dddsample.domain.model.location.UnLocode; import se.citerus.dddsample.domain.model.voyage.VoyageNumber; import java.util.Date; class HandlingEventServiceImpl(val handlingEventRepository: HandlingEventRepository, val applicationEvents: ApplicationEvents, handlingEventFactory:HandlingEventFactory) extends HandlingEventService { val logger = LogFactory.getLog(getClass()); @Override //@Transactional(rollbackFor = new Array(CannotCreateHandlingEventException)) def registerHandlingEvent(completionTime: Date, trackingId: TrackingId, voyageNumber: VoyageNumber, unLocode: UnLocode, eventType: HandlingEventType): Unit = { val registrationTime = new Date(); /* Using a factory to create a HandlingEvent (aggregate). This is where it is determined whether the incoming data, the attempt, actually is capable of representing a real handling event. */ val event = handlingEventFactory.createHandlingEvent(registrationTime, completionTime, trackingId, voyageNumber, unLocode, eventType); /* Store the new handling event, which updates the persistent state of the handling event aggregate (but not the cargo aggregate - that happens asynchronously!) */ handlingEventRepository.store(event); /* Publish an event stating that a cargo has been handled. */ applicationEvents.cargoWasHandled(event); logger.info("Registered handling event"); } }
oluies/ddd-sample-scala
src/main/scala/se/citerus/dddsample/application/impl/HandlingEventServiceImpl.scala
Scala
mit
2,176
package scala.meta package classifiers private[meta] trait Api { implicit class XtensionClassifiable[T: Classifiable](x: T) { def is[U](implicit classifier: Classifier[T, U]): Boolean = { classifier.apply(x) } def isNot[U](implicit classifier: Classifier[T, U]): Boolean = { !this.is(classifier) } } } private[meta] trait Aliases { }
DavidDudson/scalameta
scalameta/common/shared/src/main/scala/scala/meta/classifiers/Api.scala
Scala
bsd-3-clause
370
/* * Copyright 2010-2011 Vilius Normantas <[email protected]> * * This file is part of Crossbow library. * * Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU * General Public License as published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without * even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along with Crossbow. If not, * see <http://www.gnu.org/licenses/>. */ package lt.norma.crossbow.indicators /** Calculates square root of the specified target indicator. */ class Sqrt(target: Indicator[Double]) extends Transformation(target)( _.map(math.sqrt).filter(v => !v.isNaN && !v.isInfinity) ) { override def name = "Sqrt(" + target.name + ")" }
ViliusN/Crossbow
crossbow-core/src/lt/norma/crossbow/indicators/Sqrt.scala
Scala
gpl-3.0
1,027
/* * Copyright 2015-2018 Snowflake Computing * Copyright 2015 TouchType Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.snowflake.spark.snowflake import net.snowflake.spark.snowflake.streaming.SnowflakeSink import net.snowflake.spark.snowflake.Utils.SNOWFLAKE_SOURCE_SHORT_NAME import org.apache.spark.sql.execution.streaming.Sink import org.apache.spark.sql.sources._ import org.apache.spark.sql.streaming.OutputMode import org.apache.spark.sql.types.StructType import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode} import org.slf4j.LoggerFactory /** * Snowflake Source implementation for Spark SQL * Major TODO points: * - Add support for compression Snowflake->Spark * - Add support for using Snowflake Stage files, so the user doesn't need * to provide AWS passwords * - Add support for VARIANT */ class DefaultSource(jdbcWrapper: JDBCWrapper) extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider with StreamSinkProvider with DataSourceRegister { override def shortName(): String = SNOWFLAKE_SOURCE_SHORT_NAME private val log = LoggerFactory.getLogger(getClass) /** * Default constructor required by Data Source API */ def this() = this(DefaultJDBCWrapper) /** * Create a new `SnowflakeRelation` instance using parameters from Spark SQL DDL. * Resolves the schema using JDBC connection over provided URL, which must contain credentials. */ override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = { val params = Parameters.mergeParameters(parameters) // check spark version for push down if (params.autoPushdown) { SnowflakeConnectorUtils.checkVersionAndEnablePushdown( sqlContext.sparkSession ) } // pass parameters to pushdown functions pushdowns.setGlobalParameter(params) SnowflakeRelation(jdbcWrapper, params, None)(sqlContext) } /** * Load a `SnowflakeRelation` using user-provided schema, so no inference over JDBC will be used. */ override def createRelation(sqlContext: SQLContext, parameters: Map[String, String], schema: StructType): BaseRelation = { val params = Parameters.mergeParameters(parameters) // check spark version for push down if (params.autoPushdown) { SnowflakeConnectorUtils.checkVersionAndEnablePushdown( sqlContext.sparkSession ) } // pass parameters to pushdown functions pushdowns.setGlobalParameter(params) SnowflakeRelation(jdbcWrapper, params, Some(schema))(sqlContext) } /** * Creates a Relation instance by first writing the contents of the given DataFrame to Snowflake */ override def createRelation(sqlContext: SQLContext, saveMode: SaveMode, parameters: Map[String, String], data: DataFrame): BaseRelation = { val params = Parameters.mergeParameters(parameters) // check spark version for push down if (params.autoPushdown) { SnowflakeConnectorUtils.checkVersionAndEnablePushdown( sqlContext.sparkSession ) } // pass parameters to pushdown functions pushdowns.setGlobalParameter(params) val table = params.table.getOrElse { throw new IllegalArgumentException( "For save operations you must specify a Snowfake table name with the 'dbtable' parameter" ) } def tableExists: Boolean = jdbcWrapper.tableExists(params, table.toString) val (doSave, dropExisting) = saveMode match { case SaveMode.Append => (true, false) case SaveMode.Overwrite => (true, true) case SaveMode.ErrorIfExists => if (tableExists) { sys.error( s"Table $table already exists! (SaveMode is set to ErrorIfExists)" ) } else { (true, false) } case SaveMode.Ignore => if (tableExists) { log.info(s"Table $table already exists -- ignoring save request.") (false, false) } else { (true, false) } } if (doSave) { val updatedParams = parameters.updated("overwrite", dropExisting.toString) new SnowflakeWriter(jdbcWrapper) .save( sqlContext, data, saveMode, Parameters.mergeParameters(updatedParams) ) } createRelation(sqlContext, parameters) } override def createSink(sqlContext: SQLContext, parameters: Map[String, String], partitionColumns: Seq[String], outputMode: OutputMode): Sink = new SnowflakeSink(sqlContext, parameters, partitionColumns, outputMode) }
snowflakedb/spark-snowflake
src/main/scala/net/snowflake/spark/snowflake/DefaultSource.scala
Scala
apache-2.0
5,366
package repositories.storage.dao import no.uio.musit.models.ObjectTypes.{CollectionObjectType, Node, SampleObjectType} import no.uio.musit.models.{ObjectUUID, StorageNodeId} import no.uio.musit.test.MusitSpecWithAppPerSuite import no.uio.musit.test.matchers.MusitResultValues class LocalObjectDaoSpec extends MusitSpecWithAppPerSuite with MusitResultValues { val dao = fromInstanceCache[LocalObjectDao] val objectId1 = ObjectUUID.unsafeFromString("3a71f423-52b2-4437-a62b-6e37ad406bcd") val objectId2 = ObjectUUID.unsafeFromString("baab2f60-4f49-40fe-99c8-174b13b12d46") val sampleObjectId1 = ObjectUUID.unsafeFromString("28cf7c75-66b2-4991-b871-d92baeec0049") val location1 = StorageNodeId.unsafeFromString("244f09a3-eb1a-49e7-80ee-7a07baa016dd") val location2 = StorageNodeId.unsafeFromString("01134afe-b262-434b-a71f-8f697bc75e56") "LocalObjectDao" should { "find location for a list of ObjectUUIDs of different object types" in { val res = dao.currentLocations(Seq(objectId1, sampleObjectId1)).futureValue.successValue res.size mustBe 2 res mustBe Map( objectId1 -> Some(location1), sampleObjectId1 -> Some(location2) ) } "find location for a single collection object" in { val res = dao.currentLocation(objectId1, CollectionObjectType).futureValue.successValue res mustBe Some(location1) } "find a location for a single sample object" in { val res = dao.currentLocation(objectId2, SampleObjectType).futureValue.successValue res mustBe Some(location1) } "return an empty result when object has no location" in { val noSuchObject = ObjectUUID.generate() val res = dao.currentLocations(Seq(noSuchObject)).futureValue res.successValue mustBe Map(noSuchObject -> None) } "return an empty result when object type is wrong" in { val res = dao.currentLocation(objectId1, Node).futureValue.successValue res mustBe None } } }
MUSIT-Norway/musit
service_backend/test/repositories/storage/dao/LocalObjectDaoSpec.scala
Scala
gpl-2.0
2,038
package gapt.proofs import gapt.expr._ import gapt.expr.ty.->: import gapt.expr.ty.TBase import gapt.expr.ty.TVar import gapt.expr.ty.Ty import gapt.proofs.context.Context import gapt.proofs.context.facet.ProofNames import gapt.proofs.context.update.Update import gapt.proofs.lk.LKProof import gapt.proofs.lk.rules.AndLeftRule import gapt.proofs.lk.rules.AndRightRule import gapt.proofs.lk.rules.BottomAxiom import gapt.proofs.lk.rules.ContractionRule import gapt.proofs.lk.rules.CutRule import gapt.proofs.lk.rules.ConversionRule import gapt.proofs.lk.rules.EqualityRule import gapt.proofs.lk.rules.ExistsRightRule import gapt.proofs.lk.rules.ForallLeftRule import gapt.proofs.lk.rules.ImpLeftRule import gapt.proofs.lk.rules.ImpRightRule import gapt.proofs.lk.rules.InductionRule import gapt.proofs.lk.rules.LogicalAxiom import gapt.proofs.lk.rules.NegLeftRule import gapt.proofs.lk.rules.NegRightRule import gapt.proofs.lk.rules.OrLeftRule import gapt.proofs.lk.rules.OrRightRule import gapt.proofs.lk.rules.ProofLink import gapt.proofs.lk.rules.ReflexivityAxiom import gapt.proofs.lk.rules.SkolemQuantifierRule import gapt.proofs.lk.rules.StrongQuantifierRule import gapt.proofs.lk.rules.TopAxiom import gapt.proofs.lk.rules.WeakeningLeftRule import gapt.proofs.lk.rules.WeakeningRightRule import gapt.proofs.resolution.ResolutionProof import scala.collection.mutable trait Checkable[-T] { def check( obj: T )( implicit ctx: Context ): Unit } object Checkable { def requireDefEq( a: Expr, b: Expr )( implicit ctx: Context ): Unit = require( ctx.isDefEq( a, b ), s"${ctx.normalize( a ).toSigRelativeString} != ${ctx.normalize( b ).toSigRelativeString}" ) implicit object contextElementIsCheckable extends Checkable[Update] { def check( elem: Update )( implicit context: Context ): Unit = elem( context ) } class ExpressionChecker( implicit ctx: Context ) { private val validTy = mutable.Set[Ty]() def check( ty: Ty ): Unit = { if ( validTy.contains( ty ) ) return ty match { case ty @ TBase( name, params ) => require( ctx.isType( ty ), s"Unknown base type: $name" ) params.foreach( check ) case TVar( _ ) => case in ->: out => check( in ) check( out ) } validTy += ty } private val validExpr = mutable.Set[Expr]() def check( expr: Expr ): Unit = { if ( validExpr( expr ) ) return expr match { case c @ Const( name, _, params ) => require( ctx.constant( name, params ).contains( c ), s"Unknown constant: $c" ) case Var( _, t ) => check( t ) case Abs( v, e ) => check( v ) check( e ) case App( a, b ) => check( a ) check( b ) } validExpr += expr } } implicit object typeIsCheckable extends Checkable[Ty] { override def check( ty: Ty )( implicit context: Context ): Unit = new ExpressionChecker().check( ty ) } implicit object expressionIsCheckable extends Checkable[Expr] { def check( expr: Expr )( implicit context: Context ): Unit = new ExpressionChecker().check( expr ) } implicit def sequentIsCheckable[T: Checkable] = new Checkable[Sequent[T]] { def check( sequent: Sequent[T] )( implicit context: Context ) = sequent.foreach( context.check( _ ) ) } implicit object lkIsCheckable extends Checkable[LKProof] { import gapt.proofs.lk._ def check( p: LKProof )( implicit ctx: Context ): Unit = { ctx.check( p.endSequent ) p.subProofs.foreach { case ForallLeftRule( _, _, a, t, v ) => ctx.check( t ) case ExistsRightRule( _, _, a, t, v ) => ctx.check( t ) case q: EqualityRule => ctx.check( q.replacementContext ) case q @ InductionRule( cases, formula, term ) => ctx.check( formula ) ctx.check( term ) val Some( ctrsInCtx ) = ctx.getConstructors( q.indTy.asInstanceOf[TBase] ) val ctrsInProof = cases.map( _.constructor ) require( ctrsInProof == ctrsInCtx, s"Induction rule has incorrect constructors: ${ctrsInProof.mkString( ", " )}\\n" + s"Expected: ${ctrsInCtx.mkString( ", " )}" ) case sk: SkolemQuantifierRule => val Some( skolemDef ) = ctx.skolemDef( sk.skolemConst ) Checkable.requireDefEq( skolemDef( sk.skolemArgs ), sk.mainFormula ) ctx.check( sk.skolemTerm ) case StrongQuantifierRule( _, _, _, _, _ ) => case _: ReflexivityAxiom | _: LogicalAxiom => case ProofLink( name, sequent ) => val declSeq = ctx.get[ProofNames].lookup( name ) require( declSeq.nonEmpty, s"Proof name $name does not exist in context" ) require( declSeq.get == sequent, s"$declSeq\\nis not equal to \\n$sequent" ) case TopAxiom | BottomAxiom | _: NegLeftRule | _: NegRightRule | _: AndLeftRule | _: AndRightRule | _: OrLeftRule | _: OrRightRule | _: ImpLeftRule | _: ImpRightRule => case _: ContractionRule | _: WeakeningLeftRule | _: WeakeningRightRule => case _: CutRule => case d: ConversionRule => requireDefEq( d.mainFormula, d.auxFormula )( ctx ) } } } implicit object resolutionIsCheckable extends Checkable[ResolutionProof] { import gapt.proofs.resolution._ def check( p: ResolutionProof )( implicit ctx: Context ): Unit = { def checkAvatarDef( comp: AvatarDefinition ): Unit = for ( ( df, by ) <- comp.inducedDefinitions ) requireDefEq( df, by )( ctx ) p.subProofs.foreach { case Input( sequent ) => ctx.check( sequent ) case Refl( term ) => ctx.check( term ) case Taut( form ) => ctx.check( form ) case Defn( df, by ) => require( ctx.isDefEq( df, by ) ) case _: WeakQuantResolutionRule => case q: SkolemQuantResolutionRule => val Some( skolemDef ) = ctx.skolemDef( q.skolemConst ) require( BetaReduction.betaNormalize( skolemDef( q.skolemArgs ) ) == q.subProof.conclusion( q.idx ) ) ctx.check( q.skolemTerm ) case DefIntro( _, _, definition, _ ) => requireDefEq( definition.what, definition.by )( ctx ) case _: PropositionalResolutionRule => case AvatarComponent( defn ) => checkAvatarDef( defn ) case AvatarSplit( _, _, defn ) => checkAvatarDef( defn ) case _: AvatarComponent | _: AvatarSplit | _: AvatarContradiction => case _: Flip | _: Paramod => case _: Resolution => case _: Factor => case _: Subst => } } } }
gapt/gapt
core/src/main/scala/gapt/proofs/Checkable.scala
Scala
gpl-3.0
6,740
/* scala-stm - (c) 2009-2011, Stanford University, PPL */ package scala.concurrent.stm.ccstm import java.util.concurrent.atomic.AtomicLong import scala.annotation.tailrec /** A counter with a linearizable increment operator and adaptive contention * avoidance. Reading the counter with `apply()` is not linearizable (unless * the only delta passed to += is 1) and is not optimized. */ private[ccstm] class Counter extends { private final val MaxStripes = 64 // this doesn't need to be volatile because when we grow it we retain all of // the old AtomicLong-s private var _stripes = Array(new AtomicLong) private def grow(): Unit = synchronized { if (_stripes.length < MaxStripes) { val repl = new Array[AtomicLong](_stripes.length * 2) System.arraycopy(_stripes, 0, repl, 0, _stripes.length) var i = _stripes.length while (i < repl.length) { repl(i) = new AtomicLong i += 1 } _stripes = repl } } def += (delta: Int): Unit = if (delta != 0) { incr(delta) } @tailrec private def incr(delta: Int): Unit = { val s = _stripes val i = CCSTM.hash(Thread.currentThread) & (s.length - 1) val prev = s(i).get if (!s(i).compareAndSet(prev, prev + delta)) { grow() incr(delta) } } def apply(): Long = _stripes.foldLeft(0L)( _ + _.get ) override def toString: String = apply().toString }
nbronson/scala-stm
src/main/scala/scala/concurrent/stm/ccstm/Counter.scala
Scala
bsd-3-clause
1,444
package com.karasiq.shadowcloud.test.utils import akka.util.ByteString import com.karasiq.common.encoding.HexString import com.karasiq.shadowcloud.model.Chunk trait ByteStringImplicits { implicit class ByteStringOps(private val bs: ByteString) { def toHexString: String = { HexString.encode(bs) } } implicit class ByteStringObjOps(private val bs: ByteString.type) { def fromHexString(hexString: String): ByteString = { HexString.decode(hexString) } def fromChunks(chunks: Seq[Chunk]): ByteString = { chunks.map(_.data.plain).fold(ByteString.empty)(_ ++ _) } def fromEncryptedChunks(chunks: Seq[Chunk]): ByteString = { chunks.map(_.data.encrypted).fold(ByteString.empty)(_ ++ _) } } }
Karasiq/shadowcloud
utils/test/src/main/scala/com/karasiq/shadowcloud/test/utils/ByteStringImplicits.scala
Scala
apache-2.0
755
package com.sksamuel.elastic4s.requests.alias import com.sksamuel.elastic4s.Indexes import com.sksamuel.elastic4s.ext.OptionImplicits._ case class GetAliasesRequest(indices: Indexes, aliases: Seq[String] = Nil, ignoreUnavailable: Option[Boolean] = None) { def ignoreUnavailable(ignore: Boolean): GetAliasesRequest = copy(ignoreUnavailable = ignore.some) }
sksamuel/elastic4s
elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/alias/GetAliasesRequest.scala
Scala
apache-2.0
360
class Foo { def f: Int = 50 def init: Int = f } class Bar extends Foo { private var m = 10 override def f: Int = m } class Qux extends Bar { init override def f = a private val a = 30 // error }
som-snytt/dotty
tests/init/neg/override31.scala
Scala
apache-2.0
213
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.yarn import scala.collection.JavaConverters._ import scala.collection.mutable.{ArrayBuffer, HashMap, Set} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.yarn.api.records.ContainerId import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest import org.apache.spark.SparkConf import org.apache.spark.resource.ResourceProfile private[yarn] case class ContainerLocalityPreferences(nodes: Array[String], racks: Array[String]) /** * This strategy is calculating the optimal locality preferences of YARN containers by considering * the node ratio of pending tasks, number of required cores/containers and locality of current * existing and pending allocated containers. The target of this algorithm is to maximize the number * of tasks that would run locally. * * Consider a situation in which we have 20 tasks that require (host1, host2, host3) * and 10 tasks that require (host1, host2, host4), besides each container has 2 cores * and cpus per task is 1, so the required container number is 15, * and host ratio is (host1: 30, host2: 30, host3: 20, host4: 10). * * 1. If the requested container number (18) is more than the required container number (15): * * requests for 5 containers with nodes: (host1, host2, host3, host4) * requests for 5 containers with nodes: (host1, host2, host3) * requests for 5 containers with nodes: (host1, host2) * requests for 3 containers with no locality preferences. * * The placement ratio is 3 : 3 : 2 : 1, and set the additional containers with no locality * preferences. * * 2. If requested container number (10) is less than or equal to the required container number * (15): * * requests for 4 containers with nodes: (host1, host2, host3, host4) * requests for 3 containers with nodes: (host1, host2, host3) * requests for 3 containers with nodes: (host1, host2) * * The placement ratio is 10 : 10 : 7 : 4, close to expected ratio (3 : 3 : 2 : 1) * * 3. If containers exist but none of them can match the requested localities, * follow the method of 1 and 2. * * 4. If containers exist and some of them can match the requested localities. * For example if we have 1 container on each node (host1: 1, host2: 1: host3: 1, host4: 1), * and the expected containers on each node would be (host1: 5, host2: 5, host3: 4, host4: 2), * so the newly requested containers on each node would be updated to (host1: 4, host2: 4, * host3: 3, host4: 1), 12 containers by total. * * 4.1 If requested container number (18) is more than newly required containers (12). Follow * method 1 with an updated ratio 4 : 4 : 3 : 1. * * 4.2 If request container number (10) is less than newly required containers (12). Follow * method 2 with an updated ratio 4 : 4 : 3 : 1. * * 5. If containers exist and existing localities can fully cover the requested localities. * For example if we have 5 containers on each node (host1: 5, host2: 5, host3: 5, host4: 5), * which could cover the current requested localities. This algorithm will allocate all the * requested containers with no localities. */ private[yarn] class LocalityPreferredContainerPlacementStrategy( val sparkConf: SparkConf, val yarnConf: Configuration, resolver: SparkRackResolver) { /** * Calculate each container's node locality and rack locality * @param numContainer number of containers to calculate * @param numLocalityAwareTasks number of locality required tasks * @param hostToLocalTaskCount a map to store the preferred hostname and possible task * numbers running on it, used as hints for container allocation * @param allocatedHostToContainersMap host to allocated containers map, used to calculate the * expected locality preference by considering the existing * containers * @param localityMatchedPendingAllocations A sequence of pending container request which * matches the localities of current required tasks. * @param rp The ResourceProfile associated with this container. * @return node localities and rack localities, each locality is an array of string, * the length of localities is the same as number of containers */ def localityOfRequestedContainers( numContainer: Int, numLocalityAwareTasks: Int, hostToLocalTaskCount: Map[String, Int], allocatedHostToContainersMap: HashMap[String, Set[ContainerId]], localityMatchedPendingAllocations: Seq[ContainerRequest], rp: ResourceProfile ): Array[ContainerLocalityPreferences] = { val updatedHostToContainerCount = expectedHostToContainerCount( numLocalityAwareTasks, hostToLocalTaskCount, allocatedHostToContainersMap, localityMatchedPendingAllocations, rp) val updatedLocalityAwareContainerNum = updatedHostToContainerCount.values.sum // The number of containers to allocate, divided into two groups, one with preferred locality, // and the other without locality preference. val requiredLocalityFreeContainerNum = math.max(0, numContainer - updatedLocalityAwareContainerNum) val requiredLocalityAwareContainerNum = numContainer - requiredLocalityFreeContainerNum val containerLocalityPreferences = ArrayBuffer[ContainerLocalityPreferences]() if (requiredLocalityFreeContainerNum > 0) { for (i <- 0 until requiredLocalityFreeContainerNum) { containerLocalityPreferences += ContainerLocalityPreferences( null.asInstanceOf[Array[String]], null.asInstanceOf[Array[String]]) } } if (requiredLocalityAwareContainerNum > 0) { val largestRatio = updatedHostToContainerCount.values.max // Round the ratio of preferred locality to the number of locality required container // number, which is used for locality preferred host calculating. var preferredLocalityRatio = updatedHostToContainerCount.map { case(k, ratio) => val adjustedRatio = ratio.toDouble * requiredLocalityAwareContainerNum / largestRatio (k, adjustedRatio.ceil.toInt) } for (i <- 0 until requiredLocalityAwareContainerNum) { // Only filter out the ratio which is larger than 0, which means the current host can // still be allocated with new container request. val hosts = preferredLocalityRatio.filter(_._2 > 0).keys.toArray val racks = resolver.resolve(hosts).map(_.getNetworkLocation) .filter(_ != null).toSet containerLocalityPreferences += ContainerLocalityPreferences(hosts, racks.toArray) // Minus 1 each time when the host is used. When the current ratio is 0, // which means all the required ratio is satisfied, this host will not be allocated again. preferredLocalityRatio = preferredLocalityRatio.map { case (k, v) => (k, v - 1) } } } containerLocalityPreferences.toArray } /** * Calculate the number of executors needed to satisfy the given number of pending tasks for * the ResourceProfile. */ private def numExecutorsPending( numTasksPending: Int, rp: ResourceProfile): Int = { val tasksPerExec = rp.maxTasksPerExecutor(sparkConf) math.ceil(numTasksPending / tasksPerExec.toDouble).toInt } /** * Calculate the expected host to number of containers by considering with allocated containers. * @param localityAwareTasks number of locality aware tasks * @param hostToLocalTaskCount a map to store the preferred hostname and possible task * numbers running on it, used as hints for container allocation * @param allocatedHostToContainersMap host to allocated containers map, used to calculate the * expected locality preference by considering the existing * containers * @param localityMatchedPendingAllocations A sequence of pending container request which * matches the localities of current required tasks. * @return a map with hostname as key and required number of containers on this host as value */ private def expectedHostToContainerCount( localityAwareTasks: Int, hostToLocalTaskCount: Map[String, Int], allocatedHostToContainersMap: HashMap[String, Set[ContainerId]], localityMatchedPendingAllocations: Seq[ContainerRequest], rp: ResourceProfile ): Map[String, Int] = { val totalLocalTaskNum = hostToLocalTaskCount.values.sum val pendingHostToContainersMap = pendingHostToContainerCount(localityMatchedPendingAllocations) hostToLocalTaskCount.map { case (host, count) => val expectedCount = count.toDouble * numExecutorsPending(localityAwareTasks, rp) / totalLocalTaskNum // Take the locality of pending containers into consideration val existedCount = allocatedHostToContainersMap.get(host).map(_.size).getOrElse(0) + pendingHostToContainersMap.getOrElse(host, 0.0) // If existing container can not fully satisfy the expected number of container, // the required container number is expected count minus existed count. Otherwise the // required container number is 0. (host, math.max(0, (expectedCount - existedCount).ceil.toInt)) } } /** * According to the locality ratio and number of container requests, calculate the host to * possible number of containers for pending allocated containers. * * If current locality ratio of hosts is: Host1 : Host2 : Host3 = 20 : 20 : 10, * and pending container requests is 3, so the possible number of containers on * Host1 : Host2 : Host3 will be 1.2 : 1.2 : 0.6. * @param localityMatchedPendingAllocations A sequence of pending container request which * matches the localities of current required tasks. * @return a Map with hostname as key and possible number of containers on this host as value */ private def pendingHostToContainerCount( localityMatchedPendingAllocations: Seq[ContainerRequest]): Map[String, Double] = { val pendingHostToContainerCount = new HashMap[String, Int]() localityMatchedPendingAllocations.foreach { cr => cr.getNodes.asScala.foreach { n => val count = pendingHostToContainerCount.getOrElse(n, 0) + 1 pendingHostToContainerCount(n) = count } } val possibleTotalContainerNum = pendingHostToContainerCount.values.sum val localityMatchedPendingNum = localityMatchedPendingAllocations.size.toDouble pendingHostToContainerCount.map { case (k, v) => (k, v * localityMatchedPendingNum / possibleTotalContainerNum) }.toMap } }
maropu/spark
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/LocalityPreferredContainerPlacementStrategy.scala
Scala
apache-2.0
11,600
package com.komanov.mysql.streaming import java.sql._ import java.util import scala.collection.mutable object Query { val TableName = "test_table" val CreateSql = s""" CREATE TABLE $TableName ( id INT, name VARCHAR(1000), PRIMARY KEY (id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 """ private val MillionSql = s""" SELECT t1.id AS id1, t1.name AS name1, t2.id AS id2, t2.name AS name2, t2.name AS more1, t2.name AS more2, t2.name AS more3, t2.name AS more4 FROM $TableName t1 LEFT JOIN $TableName t2 ON TRUE """ val TableSize = 1000 val TestData = generateTestData(200) private val map = new util.IdentityHashMap[MysqlDriver, Connection]() private def generateTestData(length: Int) = { (1 to TableSize) .map(i => TestTableRow(i, i.toString.padTo(length, '0'))) .toList } def prepareTable(driver: MysqlDriver, length: Int = 200): Unit = { val conn = getConnection(driver) val st = conn.prepareStatement(s"INSERT INTO $TableName (id, name) VALUES(?, ?)") for (row <- generateTestData(length)) { st.setInt(1, row.id) st.setString(2, row.name) require(st.executeUpdate() == 1) } st.close() } def clearTable(driver: MysqlDriver): Unit = { withStatement(driver) { st => st.execute(s"TRUNCATE TABLE $TableName") } } def selectAtOnce(driver: MysqlDriver): List[TestTableRow] = { withStatement(driver) { st => val rs = st.executeQuery(s"SELECT id, name FROM $TableName") val result = mutable.ListBuffer[TestTableRow]() while (rs.next()) { result += mapRow(rs) } result.toList } } def selectAtOnce(driver: MysqlDriver, limit: Int): List[TestTableRow] = { withStatement(driver) { st => val rs = st.executeQuery(s"SELECT id, name FROM $TableName LIMIT $limit") val result = mutable.ListBuffer[TestTableRow]() while (rs.next()) { result += mapRow(rs) } result.toList } } def selectViaStreaming(driver: MysqlDriver): List[TestTableRow] = { val result = mutable.ListBuffer[TestTableRow]() forEach(driver, r => result += r) result.toList } def selectViaStreaming(driver: MysqlDriver, limit: Int): List[TestTableRow] = { val result = mutable.ListBuffer[TestTableRow]() forEach(driver, limit, r => result += r) result.toList } def forEach(driver: MysqlDriver, f: TestTableRow => Unit): Unit = { withStatement(driver) { st => st.setFetchSize(Int.MinValue) val rs = st.executeQuery(s"SELECT id, name FROM $TableName") while (rs.next()) { f(mapRow(rs)) } } } def forEach(driver: MysqlDriver, limit: Int, f: TestTableRow => Unit): Unit = { withStatement(driver) { st => st.setFetchSize(Int.MinValue) val rs = st.executeQuery(s"SELECT id, name FROM $TableName LIMIT $limit") while (rs.next()) { f(mapRow(rs)) } } } def forEachMillionAtOnce(driver: MysqlDriver): Unit = { withStatement(driver) { st => // no setFetchSize! var count = 0 val rs = st.executeQuery(MillionSql) while (rs.next()) { mapRow(rs) count += 1 } require(count == TableSize * TableSize) } } def forEachMillionViaStreaming(driver: MysqlDriver): Unit = { withStatement(driver) { st => st.setFetchSize(Int.MinValue) var count = 0 val rs = st.executeQuery(MillionSql) while (rs.next()) { mapRow(rs) count += 1 } require(count == TableSize * TableSize) } } private def mapRow(rs: ResultSet): TestTableRow = { TestTableRow(rs.getInt(1), rs.getString(2)) } private def withStatement[T](driver: MysqlDriver)(f: Statement => T): T = { val st = getConnection(driver).createStatement() try { f(st) } finally { st.close() } } private def getConnection(d: MysqlDriver): Connection = { var c = map.get(d) if (c == null) { c = DriverManager.getConnection(d.url) map.put(d, c) } c } } case class TestTableRow(id: Int, name: String)
dkomanov/stuff
src/com/komanov/mysql/streaming/Query.scala
Scala
mit
4,125
package dmtest.stack import dmtest._ object Pool { case class S(pool: Pool, size: Sector) extends Stack { private val linear: Linear = pool.alloc(size) override def terminate: Unit = { pool.free(linear) } override def path = linear.path } case class Range(start: Sector, len: Sector) extends Ordered[Range] { override def compare(that: Range): Int = (this.start.unwrap - that.start.unwrap).toInt def take(size: Sector): (Range, Range) = { require(len >= size) (Range(start, size), Range(start + size, len - size)) } } class FreeArea(initSize: Sector) { val m = scala.collection.mutable.SortedSet[Range](Range(Sector(0), initSize)) def getFreeSpace(size: Sector): Range = { val target = m.find(_.len >= size).get m -= target val (res, rem) = target.take(size) if (rem.len > Sector(0)) m += rem res } def merge(): Boolean = { val a = m.toList val b = m.toList.drop(1) a.zip(b).find { case (x, y) => x.start + x.len == y.start } match { case Some((x, y)) => m -= x m -= y m += Range(x.start, x.len + y.len) true case None => false } } def release(range: Range) = { m += range while (merge) {} } } } case class Pool(pool: Stack) { import Pool._ val freeArea = new FreeArea(pool.bdev.size) def alloc(size: Sector): Linear = { val space = freeArea.getFreeSpace(size) Linear.Table(pool, space.start, space.len).create } def free(linearS: Linear): Unit = { linearS.terminate() val space = Range(linearS.start, linearS.len) freeArea.release(space) } }
akiradeveloper/dmtest
src/main/scala/dmtest/stack/Pool.scala
Scala
apache-2.0
1,699
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.carbondata.commands import org.apache.spark.sql.test.util.QueryTest import org.scalatest.BeforeAndAfterAll import org.apache.carbondata.core.constants.CarbonLoadOptionConstants import org.apache.carbondata.core.exception.InvalidConfigurationException class SetCommandTestCase extends QueryTest with BeforeAndAfterAll{ override def beforeAll: Unit = { sql("set carbon=true") } test("test set command") { checkAnswer(sql("set"), sql("set")) } test("test set any value command") { checkAnswer(sql("set carbon=false"), sql("set carbon")) } test("test set command for enable.unsafe.sort=true") { checkAnswer(sql("set enable.unsafe.sort=true"), sql("set enable.unsafe.sort")) } test("test set command for enable.unsafe.sort for invalid option") { intercept[InvalidConfigurationException] { checkAnswer(sql("set enable.unsafe.sort=123"), sql("set enable.unsafe.sort")) } } // is_empty_data_bad_record test(s"test set command for" + s" ${ CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE }=true") { checkAnswer(sql(s"set ${ CarbonLoadOptionConstants .CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE }=true"), sql(s"set ${ CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE }")) } test(s"test set command for ${ CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE} for invalid option") { intercept[InvalidConfigurationException] { checkAnswer(sql(s"set ${ CarbonLoadOptionConstants .CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE }=123"), sql(s"set ${ CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE }")) } } test(s"test set command for ${ CarbonLoadOptionConstants .CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD }=true") { checkAnswer(sql(s"set ${ CarbonLoadOptionConstants .CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD }=true"), sql(s"set ${ CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD }")) } test("test set command for " + s"${CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD} for invalid option") { intercept[InvalidConfigurationException] { checkAnswer( sql(s"set ${CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD}=123"), sql(s"set ${CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD}")) } } // carbon.custom.block.distribution test("test set command for carbon.custom.block.distribution=true") { checkAnswer(sql("set carbon.custom.block.distribution=true"), sql("set carbon.custom.block.distribution")) } test("test set command for carbon.custom.block.distribution for invalid option") { intercept[InvalidConfigurationException] { checkAnswer(sql("set carbon.custom.block.distribution=123"), sql("set carbon.custom.block.distribution")) } } // sort_scope test(s"test set command for ${CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE}=LOCAL_SORT") { checkAnswer(sql(s"set ${CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE}=LOCAL_SORT"), sql(s"set ${CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE}")) } test("test set command for " + s"${CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE} for invalid option") { intercept[InvalidConfigurationException] { checkAnswer(sql(s"set ${CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE}=123"), sql(s"set ${CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE}")) } } test(s"test set carbon.table.load.sort.scope for valid options") { checkAnswer( sql(s"set carbon.table.load.sort.scope.db.tbl=no_sort"), sql(s"set carbon.table.load.sort.scope.db.tbl")) checkAnswer( sql(s"set carbon.table.load.sort.scope.db.tbl=local_sort"), sql(s"set carbon.table.load.sort.scope.db.tbl")) checkAnswer( sql(s"set carbon.table.load.sort.scope.db.tbl=global_sort"), sql(s"set carbon.table.load.sort.scope.db.tbl")) } test(s"test set carbon.table.load.sort.scope for invalid options") { intercept[InvalidConfigurationException] { checkAnswer( sql(s"set carbon.table.load.sort.scope.db.tbl=fake_sort"), sql(s"set carbon.table.load.sort.scope.db.tbl")) } } override def afterAll { defaultConfig() sqlContext.sparkSession.catalog.clearCache() Seq("carbon", "carbon.table.load.sort.scope.db.tbl").foreach { key => sqlContext.sparkSession.conf.unset(key) } } }
zzcclp/carbondata
integration/spark/src/test/scala/org/apache/spark/carbondata/commands/SetCommandTestCase.scala
Scala
apache-2.0
5,371
package com.twitter.finagle.tracing import com.twitter.finagle._ import com.twitter.finagle.client.Transporter import java.net.{SocketAddress, InetSocketAddress} /** * [[com.twitter.finagle.ServiceFactoryProxy]] used to trace the local addr and * server addr. */ class ServerDestTracingProxy[Req, Rep](self: ServiceFactory[Req, Rep]) extends ServiceFactoryProxy[Req, Rep](self) { override def apply(conn: ClientConnection) = { // this filter gymnastics is done so that annotation occurs after // traceId is set by any inbound request with tracing enabled val filter = new SimpleFilter[Req,Rep] { def apply(request: Req, service: Service[Req, Rep]) = { if (Trace.isActivelyTracing) { conn.localAddress match { case ia: InetSocketAddress => Trace.recordLocalAddr(ia) Trace.recordServerAddr(ia) case _ => // do nothing for non-ip address } conn.remoteAddress match { case ia: InetSocketAddress => Trace.recordClientAddr(ia) case _ => // do nothing for non-ip address } } service(request) } } self(conn) map { filter andThen _ } } } private[finagle] object ClientDestTracingFilter { object EndpointTracing extends Stack.Role /** * $module [[com.twitter.finagle.tracing.ClientDestTracingFilter]]. */ def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] = new Stack.Simple[ServiceFactory[Req, Rep]](EndpointTracing) { val description = "Record remote address of server" def make(params: Params, next: ServiceFactory[Req, Rep]) = { val Transporter.EndpointAddr(addr) = params[Transporter.EndpointAddr] new ClientDestTracingFilter(addr) andThen next } } } /** * [[com.twitter.finagle.Filter]] for clients to record the remote address of the server. * We don't log the local addr here because it's already done in the client Dispatcher. */ class ClientDestTracingFilter[Req,Rep](remoteSock: SocketAddress) extends SimpleFilter[Req,Rep] { def apply(request: Req, service: Service[Req, Rep]) = { val ret = service(request) remoteSock match { case ia: InetSocketAddress => Trace.recordServerAddr(ia) case _ => // do nothing for non-ip address } ret } }
JustinTulloss/finagle
finagle-core/src/main/scala/com/twitter/finagle/tracing/DestinationTracing.scala
Scala
apache-2.0
2,347
package model.dtos case class DiscussionThread(var id:Option[Long], discussion_thread_type_id:Int, //whether the discussion thread refers to the whole article (1 for article part and 2 for whole article) clientId:String, //id produced by javascript text:String, numberOfComments:Option[Int])
scify/DemocracIT-Web
app/model/dtos/DiscussionThread.scala
Scala
apache-2.0
406
/* * Copyright 2017 Spotify AB. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.spotify.featran.transformers import breeze.linalg._ import com.spotify.featran.{FeatureBuilder, FeatureRejection, FlatReader, FlatWriter} import com.twitter.algebird.Aggregator /** * Transform vector features by normalizing each vector to have unit norm. Parameter `p` specifies * the p-norm used for normalization (default 2). * * Missing values are transformed to zero vectors. * * When using aggregated feature summary from a previous session, vectors of different dimensions * are transformed to zero vectors and [[FeatureRejection.WrongDimension]] rejections are reported. */ object Normalizer extends SettingsBuilder { /** * Create a new [[Normalizer]] instance. * @param p * normalization in L^p^ space, must be greater than or equal to 1.0 * @param expectedLength * expected length of the input vectors, or 0 to infer from data */ def apply( name: String, p: Double = 2.0, expectedLength: Int = 0 ): Transformer[Array[Double], Int, Int] = new Normalizer(name, p, expectedLength) /** * Create a new [[OneHotEncoder]] from a settings object * @param setting * Settings object */ def fromSettings(setting: Settings): Transformer[Array[Double], Int, Int] = { val p = setting.params("p").toDouble val expectedLength = setting.params("expectedLength").toInt Normalizer(setting.name, p, expectedLength) } } private[featran] class Normalizer(name: String, val p: Double, val expectedLength: Int) extends Transformer[Array[Double], Int, Int](name) { require(p >= 1.0, "p must be >= 1.0") override val aggregator: Aggregator[Array[Double], Int, Int] = Aggregators.seqLength(expectedLength) override def featureDimension(c: Int): Int = c override def featureNames(c: Int): Seq[String] = names(c) override def buildFeatures(a: Option[Array[Double]], c: Int, fb: FeatureBuilder[_]): Unit = a match { case Some(x) => if (x.length != c) { fb.skip(c) fb.reject(this, FeatureRejection.WrongDimension(c, x.length)) } else { val dv = DenseVector(x) fb.add(names(c), (dv / norm(dv, p))(DenseVector.dv_s_Op_Double_OpDiv).data) } case None => fb.skip(c) } override def encodeAggregator(c: Int): String = c.toString override def decodeAggregator(s: String): Int = s.toInt override def params: Map[String, String] = Map("p" -> p.toString, "expectedLength" -> expectedLength.toString) override def flatRead[T: FlatReader]: T => Option[Any] = FlatReader[T].readDoubleArray(name) override def flatWriter[T](implicit fw: FlatWriter[T]): Option[Array[Double]] => fw.IF = fw.writeDoubleArray(name) }
spotify/featran
core/src/main/scala/com/spotify/featran/transformers/Normalizer.scala
Scala
apache-2.0
3,301
/* __ *\\ ** ________ ___ / / ___ __ ____ Scala.js API ** ** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | |__/ /____/ ** ** |/____/ ** \\* */ package scala.scalajs.js import scala.language.implicitConversions /** Value of type A or the JS undefined value. * In a type system with union types, this would really be * `A | js.prim.Undefined`. Since Scala does not have union types, but this * particular union is crucial to many interoperability scenarios, it is * provided as this trait. * * An API similar to that of [[scala.Option]] is provided through the * [[UndefOrOps]] implicit class, with the understanding that `undefined` is * the None value. */ @scala.scalajs.js.annotation.RawJSType // Don't do this at home! sealed trait UndefOr[+A] object UndefOr { implicit def any2undefOrA[A](value: A): UndefOr[A] = value.asInstanceOf[UndefOr[A]] implicit def undefOr2ops[A](value: UndefOr[A]): UndefOrOps[A] = new UndefOrOps(value) implicit def undefOr2jsAny[A](value: UndefOr[A])(implicit ev: A => Any): Any = value.map(ev).asInstanceOf[Any] } /** @define option [[UndefOr]] * @define none [[undefined]] */ final class UndefOrOps[A](val self: UndefOr[A]) extends AnyVal { import UndefOrOps._ /** Returns true if the option is `undefined`, false otherwise. */ @inline final def isEmpty: Boolean = isUndefined(self) /** Returns true if the option is not `undefined`, false otherwise. */ @inline final def isDefined: Boolean = !isEmpty /** Returns the option's value. * @note The option must be nonEmpty. * @throws Predef.NoSuchElementException if the option is empty. */ @inline final def get: A = if (isEmpty) throw new NoSuchElementException("undefined.get") else self.asInstanceOf[A] @inline final private def forceGet: A = self.asInstanceOf[A] /** Returns the option's value if the option is nonempty, otherwise * return the result of evaluating `default`. * * @param default the default expression. */ @inline final def getOrElse[B >: A](default: => B): B = if (isEmpty) default else this.forceGet /** Returns the option's value if it is nonempty, * or `null` if it is empty. * Although the use of null is discouraged, code written to use * $option must often interface with code that expects and returns nulls. * @example {{{ * val initalText: Option[String] = getInitialText * val textField = new JComponent(initalText.orNull,20) * }}} */ @inline final def orNull[A1 >: A](implicit ev: Null <:< A1): A1 = this getOrElse ev(null) /** Returns a $some containing the result of applying $f to this $option's * value if this $option is nonempty. * Otherwise return $none. * * @note This is similar to `flatMap` except here, * $f does not need to wrap its result in an $option. * * @param f the function to apply * @see flatMap * @see foreach */ @inline final def map[B](f: A => B): UndefOr[B] = if (isEmpty) undefined else f(this.forceGet) /** Returns the result of applying $f to this $option's * value if the $option is nonempty. Otherwise, evaluates * expression `ifEmpty`. * * @note This is equivalent to `$option map f getOrElse ifEmpty`. * * @param ifEmpty the expression to evaluate if empty. * @param f the function to apply if nonempty. */ @inline final def fold[B](ifEmpty: => B)(f: A => B): B = if (isEmpty) ifEmpty else f(this.forceGet) /** Returns the result of applying $f to this $option's value if * this $option is nonempty. * Returns $none if this $option is empty. * Slightly different from `map` in that $f is expected to * return an $option (which could be $none). * * @param f the function to apply * @see map * @see foreach */ @inline final def flatMap[B](f: A => UndefOr[B]): UndefOr[B] = if (isEmpty) undefined else f(this.forceGet) def flatten[B](implicit ev: A <:< UndefOr[B]): UndefOr[B] = if (isEmpty) undefined else ev(this.forceGet) /** Returns this $option if it is nonempty '''and''' applying the predicate $p to * this $option's value returns true. Otherwise, return $none. * * @param p the predicate used for testing. */ @inline final def filter(p: A => Boolean): UndefOr[A] = if (isEmpty || p(this.forceGet)) self else undefined /** Returns this $option if it is nonempty '''and''' applying the predicate $p to * this $option's value returns false. Otherwise, return $none. * * @param p the predicate used for testing. */ @inline final def filterNot(p: A => Boolean): UndefOr[A] = if (isEmpty || !p(this.forceGet)) self else undefined /** Returns false if the option is $none, true otherwise. * @note Implemented here to avoid the implicit conversion to Iterable. */ final def nonEmpty = isDefined /** Necessary to keep $option from being implicitly converted to * [[scala.collection.Iterable]] in `for` comprehensions. */ @inline final def withFilter(p: A => Boolean): WithFilter[A] = new WithFilter(self, p) /** Returns true if this option is nonempty '''and''' the predicate * $p returns true when applied to this $option's value. * Otherwise, returns false. * * @param p the predicate to test */ @inline final def exists(p: A => Boolean): Boolean = !isEmpty && p(this.forceGet) /** Returns true if this option is empty '''or''' the predicate * $p returns true when applied to this $option's value. * * @param p the predicate to test */ @inline final def forall(p: A => Boolean): Boolean = isEmpty || p(this.forceGet) /** Apply the given procedure $f to the option's value, * if it is nonempty. Otherwise, do nothing. * * @param f the procedure to apply. * @see map * @see flatMap */ @inline final def foreach[U](f: A => U): Unit = if (!isEmpty) f(this.forceGet) /** Returns a $some containing the result of * applying `pf` to this $option's contained * value, '''if''' this option is * nonempty '''and''' `pf` is defined for that value. * Returns $none otherwise. * * @param pf the partial function. * @return the result of applying `pf` to this $option's * value (if possible), or $none. */ @inline final def collect[B](pf: PartialFunction[A, B]): UndefOr[B] = if (isEmpty) undefined else pf.applyOrElse(this.forceGet, (_: A) => undefined).asInstanceOf[UndefOr[B]] /** Returns this $option if it is nonempty, * otherwise return the result of evaluating `alternative`. * @param alternative the alternative expression. */ @inline final def orElse[B >: A](alternative: => UndefOr[B]): UndefOr[B] = if (isEmpty) alternative else self /** Returns a singleton iterator returning the $option's value * if it is nonempty, or an empty iterator if the option is empty. */ def iterator: Iterator[A] = if (isEmpty) scala.collection.Iterator.empty else scala.collection.Iterator.single(this.forceGet) /** Returns a singleton list containing the $option's value * if it is nonempty, or the empty list if the $option is empty. */ def toList: List[A] = if (isEmpty) Nil else this.forceGet :: Nil /** Returns a [[scala.util.Left]] containing the given * argument `left` if this $option is empty, or * a [[scala.util.Right]] containing this $option's value if * this is nonempty. * * @param left the expression to evaluate and return if this is empty * @see toLeft */ @inline final def toRight[X](left: => X): Either[X, A] = if (isEmpty) Left(left) else Right(this.forceGet) /** Returns a [[scala.util.Right]] containing the given * argument `right` if this is empty, or * a [[scala.util.Left]] containing this $option's value * if this $option is nonempty. * * @param right the expression to evaluate and return if this is empty * @see toRight */ @inline final def toLeft[X](right: => X): Either[A, X] = if (isEmpty) Right(right) else Left(this.forceGet) /** Returns a [[scala.Some]] containing this $options's value * if this $option is nonempty, [[scala.None]] otherwise. */ @inline final def toOption: Option[A] = if (isEmpty) None else Some(this.forceGet) } object UndefOrOps { /** We need a whole WithFilter class to honor the "doesn't create a new * collection" contract even though it seems unlikely to matter much in a * collection with max size 1. */ class WithFilter[A](self: UndefOr[A], p: A => Boolean) { def map[B](f: A => B): UndefOr[B] = self filter p map f def flatMap[B](f: A => UndefOr[B]): UndefOr[B] = self filter p flatMap f def foreach[U](f: A => U): Unit = self filter p foreach f def withFilter(q: A => Boolean): WithFilter[A] = new WithFilter[A](self, x => p(x) && q(x)) } }
matthughes/scala-js
library/src/main/scala/scala/scalajs/js/UndefOr.scala
Scala
bsd-3-clause
9,316
/* * Copyright (c) 2018. Fengguo Wei and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License v2.0 * which accompanies this distribution, and is available at * https://www.apache.org/licenses/LICENSE-2.0 * * Detailed contributors are listed in the CONTRIBUTOR.md */ package org.argus.amandroid.plugin.communication import org.argus.amandroid.alir.taintAnalysis.AndroidSourceAndSinkManager import org.argus.amandroid.core.ApkGlobal import org.argus.jawa.core.ast.{AssignmentStatement, LiteralExpression, Location} import org.argus.jawa.core.util._ /** * @author Fengchi Lin * @author <a href="mailto:[email protected]">Fengguo Wei</a> */ class CommunicationSourceAndSinkManager(sasFilePath: String) extends AndroidSourceAndSinkManager(sasFilePath){ override def isStmtSource(apk: ApkGlobal, loc: Location): Boolean = { var flag = false val visitor = Visitor.build({ case as: AssignmentStatement => as.rhs match { case le: LiteralExpression => if(le.isString){ if(le.getString.contains("call_log") && le.getString.contains("calls")) { flag = true } else if(le.getString.contains("icc") && le.getString.contains("adn")) { flag =true } else if(le.getString.contains("com.android.contacts")) { flag =true } else if(le.getString.contains("sms/")) { flag = true } } false case _ => false } }) visitor(loc) flag } }
arguslab/Argus-SAF
amandroid/src/main/scala/org/argus/amandroid/plugin/communication/CommunicationSourceAndSinkManager.scala
Scala
apache-2.0
1,653
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gearpump.akkastream.module import akka.stream.impl.StreamLayout.{AtomicModule, Module} import akka.stream.impl.{SinkModule, SourceModule} import akka.stream.{Attributes, MaterializationContext, SinkShape, SourceShape} import org.reactivestreams.{Publisher, Subscriber} /** * [[DummyModule]] is a set of special module to help construct a RunnableGraph, * so that all ports are closed. * * In runtime, [[DummyModule]] should be ignored during materialization. * * For example, if you have a [[BridgeModule]] which only accept the input * message from out of band channel, then you can use DummySource to fake * a Message Source Like this. * * [[DummySource]] -> [[BridgeModule]] -> Sink * /| * / * out of band input message [[Publisher]] * * After materialization, [[DummySource]] will be removed. * [[BridgeModule]] -> Sink * /| * / * [[akka.stream.impl.PublisherSource]] * * */ trait DummyModule extends AtomicModule /** * * [[DummySource]]-> [[BridgeModule]] -> Sink * /| * / * out of band input message Source * * @param attributes Attributes * @param shape SourceShape[Out] * @tparam Out Output */ class DummySource[Out](val attributes: Attributes, shape: SourceShape[Out]) extends SourceModule[Out, Unit](shape) with DummyModule { override def create(context: MaterializationContext): (Publisher[Out], Unit) = { throw new UnsupportedOperationException() } override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, Unit] = { new DummySource[Out](attributes, shape) } override def withAttributes(attr: Attributes): Module = { new DummySource(attr, amendShape(attr)) } } /** * * Source-> [[BridgeModule]] -> [[DummySink]] * \\ * \\ * \\| * out of band output message [[Subscriber]] * * @param attributes Attributes * @param shape SinkShape[IN] */ class DummySink[IN](val attributes: Attributes, shape: SinkShape[IN]) extends SinkModule[IN, Unit](shape) with DummyModule { override def create(context: MaterializationContext): (Subscriber[IN], Unit) = { throw new UnsupportedOperationException() } override protected def newInstance(shape: SinkShape[IN]): SinkModule[IN, Unit] = { new DummySink[IN](attributes, shape) } override def withAttributes(attr: Attributes): Module = { new DummySink[IN](attr, amendShape(attr)) } }
manuzhang/incubator-gearpump
experiments/akkastream/src/main/scala/org/apache/gearpump/akkastream/module/DummyModule.scala
Scala
apache-2.0
3,423
package com.fustigatedcat.heystk.common.normalization import java.util.Date case class Normalization(log : Log, startProcessing : Long, endProcessing : Long = new Date().getTime, fields : Map[String, (String,String)] = Map()) { }
fustigatedcat/heystk
heystk-common/src/main/scala/com/fustigatedcat/heystk/common/normalization/Normalization.scala
Scala
gpl-3.0
308
package camelinaction import org.apache.camel.{Exchange, Processor} import org.apache.camel.ExchangePattern._ import akka.actor.Actor._ import akka.camel._ /** * @author Martin Krasser */ object SectionE32 extends Application { import SampleActors._ val service = CamelServiceManager.startCamelService val httpConsumer1 = actorOf[HttpConsumer1] val httpConsumer2 = actorOf[HttpConsumer2] service.awaitEndpointActivation(2) { httpConsumer1.start httpConsumer2.start } for (template <- CamelContextManager.template) { // in-out message exchange with HttpConsumer1 val exchange1 = template.send("http://localhost:8811/consumer1", InOut, new Processor { def process(exchange: Exchange) = exchange.getIn.setBody("Akka rocks") }) // in-out message exchange with HttpConsumer2 val exchange2 = template.send("http://localhost:8811/consumer2", InOut, new Processor { def process(exchange: Exchange) = exchange.getIn.setBody("Akka rocks") }) assert("received Akka rocks" == exchange1.getOut.getBody(classOf[String])) assert("<received>Akka rocks</received>" == exchange2.getOut.getBody(classOf[String])) assert("application/xml" == exchange2.getOut.getHeader("Content-Type")) } service.stop httpConsumer1.stop httpConsumer2.stop }
sprklinginfo/camelinaction2
old_chapters/appendixE/src/main/scala/SectionE32.scala
Scala
apache-2.0
1,338
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions.objects import java.lang.reflect.Modifier import scala.collection.mutable.Builder import scala.language.existentials import scala.reflect.ClassTag import org.apache.spark.{SparkConf, SparkEnv} import org.apache.spark.serializer._ import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.RowEncoder import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData} import org.apache.spark.sql.types._ /** * Common base class for [[StaticInvoke]], [[Invoke]], and [[NewInstance]]. */ trait InvokeLike extends Expression with NonSQLExpression { def arguments: Seq[Expression] def propagateNull: Boolean protected lazy val needNullCheck: Boolean = propagateNull && arguments.exists(_.nullable) /** * Prepares codes for arguments. * * - generate codes for argument. * - use ctx.splitExpressions() to not exceed 64kb JVM limit while preparing arguments. * - avoid some of nullabilty checking which are not needed because the expression is not * nullable. * - when needNullCheck == true, short circuit if we found one of arguments is null because * preparing rest of arguments can be skipped in the case. * * @param ctx a [[CodegenContext]] * @return (code to prepare arguments, argument string, result of argument null check) */ def prepareArguments(ctx: CodegenContext): (String, String, String) = { val resultIsNull = if (needNullCheck) { val resultIsNull = ctx.freshName("resultIsNull") ctx.addMutableState("boolean", resultIsNull, "") resultIsNull } else { "false" } val argValues = arguments.map { e => val argValue = ctx.freshName("argValue") ctx.addMutableState(ctx.javaType(e.dataType), argValue, "") argValue } val argCodes = if (needNullCheck) { val reset = s"$resultIsNull = false;" val argCodes = arguments.zipWithIndex.map { case (e, i) => val expr = e.genCode(ctx) val updateResultIsNull = if (e.nullable) { s"$resultIsNull = ${expr.isNull};" } else { "" } s""" if (!$resultIsNull) { ${expr.code} $updateResultIsNull ${argValues(i)} = ${expr.value}; } """ } reset +: argCodes } else { arguments.zipWithIndex.map { case (e, i) => val expr = e.genCode(ctx) s""" ${expr.code} ${argValues(i)} = ${expr.value}; """ } } val argCode = ctx.splitExpressions(ctx.INPUT_ROW, argCodes) (argCode, argValues.mkString(", "), resultIsNull) } } /** * Invokes a static function, returning the result. By default, any of the arguments being null * will result in returning null instead of calling the function. * * @param staticObject The target of the static call. This can either be the object itself * (methods defined on scala objects), or the class object * (static methods defined in java). * @param dataType The expected return type of the function call * @param functionName The name of the method to call. * @param arguments An optional list of expressions to pass as arguments to the function. * @param propagateNull When true, and any of the arguments is null, null will be returned instead * of calling the function. */ case class StaticInvoke( staticObject: Class[_], dataType: DataType, functionName: String, arguments: Seq[Expression] = Nil, propagateNull: Boolean = true) extends InvokeLike { val objectName = staticObject.getName.stripSuffix("$") override def nullable: Boolean = true override def children: Seq[Expression] = arguments override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported.") override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val javaType = ctx.javaType(dataType) val (argCode, argString, resultIsNull) = prepareArguments(ctx) val callFunc = s"$objectName.$functionName($argString)" // If the function can return null, we do an extra check to make sure our null bit is still set // correctly. val postNullCheck = if (ctx.defaultValue(dataType) == "null") { s"${ev.isNull} = ${ev.value} == null;" } else { "" } val code = s""" $argCode boolean ${ev.isNull} = $resultIsNull; final $javaType ${ev.value} = $resultIsNull ? ${ctx.defaultValue(dataType)} : $callFunc; $postNullCheck """ ev.copy(code = code) } } /** * Calls the specified function on an object, optionally passing arguments. If the `targetObject` * expression evaluates to null then null will be returned. * * In some cases, due to erasure, the schema may expect a primitive type when in fact the method * is returning java.lang.Object. In this case, we will generate code that attempts to unbox the * value automatically. * * @param targetObject An expression that will return the object to call the method on. * @param functionName The name of the method to call. * @param dataType The expected return type of the function. * @param arguments An optional list of expressions, whos evaluation will be passed to the function. * @param propagateNull When true, and any of the arguments is null, null will be returned instead * of calling the function. * @param returnNullable When false, indicating the invoked method will always return * non-null value. */ case class Invoke( targetObject: Expression, functionName: String, dataType: DataType, arguments: Seq[Expression] = Nil, propagateNull: Boolean = true, returnNullable : Boolean = true) extends InvokeLike { override def nullable: Boolean = targetObject.nullable || needNullCheck || returnNullable override def children: Seq[Expression] = targetObject +: arguments override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported.") @transient lazy val method = targetObject.dataType match { case ObjectType(cls) => val m = cls.getMethods.find(_.getName == functionName) if (m.isEmpty) { sys.error(s"Couldn't find $functionName on $cls") } else { m } case _ => None } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val javaType = ctx.javaType(dataType) val obj = targetObject.genCode(ctx) val (argCode, argString, resultIsNull) = prepareArguments(ctx) val returnPrimitive = method.isDefined && method.get.getReturnType.isPrimitive val needTryCatch = method.isDefined && method.get.getExceptionTypes.nonEmpty def getFuncResult(resultVal: String, funcCall: String): String = if (needTryCatch) { s""" try { $resultVal = $funcCall; } catch (Exception e) { org.apache.spark.unsafe.Platform.throwException(e); } """ } else { s"$resultVal = $funcCall;" } val evaluate = if (returnPrimitive) { getFuncResult(ev.value, s"${obj.value}.$functionName($argString)") } else { val funcResult = ctx.freshName("funcResult") // If the function can return null, we do an extra check to make sure our null bit is still // set correctly. val assignResult = if (!returnNullable) { s"${ev.value} = (${ctx.boxedType(javaType)}) $funcResult;" } else { s""" if ($funcResult != null) { ${ev.value} = (${ctx.boxedType(javaType)}) $funcResult; } else { ${ev.isNull} = true; } """ } s""" Object $funcResult = null; ${getFuncResult(funcResult, s"${obj.value}.$functionName($argString)")} $assignResult """ } val code = s""" ${obj.code} boolean ${ev.isNull} = true; $javaType ${ev.value} = ${ctx.defaultValue(dataType)}; if (!${obj.isNull}) { $argCode ${ev.isNull} = $resultIsNull; if (!${ev.isNull}) { $evaluate } } """ ev.copy(code = code) } override def toString: String = s"$targetObject.$functionName" } object NewInstance { def apply( cls: Class[_], arguments: Seq[Expression], dataType: DataType, propagateNull: Boolean = true): NewInstance = new NewInstance(cls, arguments, propagateNull, dataType, None) } /** * Constructs a new instance of the given class, using the result of evaluating the specified * expressions as arguments. * * @param cls The class to construct. * @param arguments A list of expression to use as arguments to the constructor. * @param propagateNull When true, if any of the arguments is null, then null will be returned * instead of trying to construct the object. * @param dataType The type of object being constructed, as a Spark SQL datatype. This allows you * to manually specify the type when the object in question is a valid internal * representation (i.e. ArrayData) instead of an object. * @param outerPointer If the object being constructed is an inner class, the outerPointer for the * containing class must be specified. This parameter is defined as an optional * function, which allows us to get the outer pointer lazily,and it's useful if * the inner class is defined in REPL. */ case class NewInstance( cls: Class[_], arguments: Seq[Expression], propagateNull: Boolean, dataType: DataType, outerPointer: Option[() => AnyRef]) extends InvokeLike { private val className = cls.getName override def nullable: Boolean = needNullCheck override def children: Seq[Expression] = arguments override lazy val resolved: Boolean = { // If the class to construct is an inner class, we need to get its outer pointer, or this // expression should be regarded as unresolved. // Note that static inner classes (e.g., inner classes within Scala objects) don't need // outer pointer registration. val needOuterPointer = outerPointer.isEmpty && cls.isMemberClass && !Modifier.isStatic(cls.getModifiers) childrenResolved && !needOuterPointer } override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported.") override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val javaType = ctx.javaType(dataType) val (argCode, argString, resultIsNull) = prepareArguments(ctx) val outer = outerPointer.map(func => Literal.fromObject(func()).genCode(ctx)) ev.isNull = resultIsNull val constructorCall = outer.map { gen => s"${gen.value}.new ${cls.getSimpleName}($argString)" }.getOrElse { s"new $className($argString)" } val code = s""" $argCode ${outer.map(_.code).getOrElse("")} final $javaType ${ev.value} = ${ev.isNull} ? ${ctx.defaultValue(javaType)} : $constructorCall; """ ev.copy(code = code) } override def toString: String = s"newInstance($cls)" } /** * Given an expression that returns on object of type `Option[_]`, this expression unwraps the * option into the specified Spark SQL datatype. In the case of `None`, the nullbit is set instead. * * @param dataType The expected unwrapped option type. * @param child An expression that returns an `Option` */ case class UnwrapOption( dataType: DataType, child: Expression) extends UnaryExpression with NonSQLExpression with ExpectsInputTypes { override def nullable: Boolean = true override def inputTypes: Seq[AbstractDataType] = ObjectType :: Nil override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val javaType = ctx.javaType(dataType) val inputObject = child.genCode(ctx) val code = s""" ${inputObject.code} final boolean ${ev.isNull} = ${inputObject.isNull} || ${inputObject.value}.isEmpty(); $javaType ${ev.value} = ${ev.isNull} ? ${ctx.defaultValue(javaType)} : (${ctx.boxedType(javaType)}) ${inputObject.value}.get(); """ ev.copy(code = code) } } /** * Converts the result of evaluating `child` into an option, checking both the isNull bit and * (in the case of reference types) equality with null. * * @param child The expression to evaluate and wrap. * @param optType The type of this option. */ case class WrapOption(child: Expression, optType: DataType) extends UnaryExpression with NonSQLExpression with ExpectsInputTypes { override def dataType: DataType = ObjectType(classOf[Option[_]]) override def nullable: Boolean = false override def inputTypes: Seq[AbstractDataType] = optType :: Nil override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val inputObject = child.genCode(ctx) val code = s""" ${inputObject.code} scala.Option ${ev.value} = ${inputObject.isNull} ? scala.Option$$.MODULE$$.apply(null) : new scala.Some(${inputObject.value}); """ ev.copy(code = code, isNull = "false") } } /** * A placeholder for the loop variable used in [[MapObjects]]. This should never be constructed * manually, but will instead be passed into the provided lambda function. */ case class LambdaVariable( value: String, isNull: String, dataType: DataType, nullable: Boolean = true) extends LeafExpression with Unevaluable with NonSQLExpression { override def genCode(ctx: CodegenContext): ExprCode = { ExprCode(code = "", value = value, isNull = if (nullable) isNull else "false") } } /** * When constructing [[MapObjects]], the element type must be given, which may not be available * before analysis. This class acts like a placeholder for [[MapObjects]], and will be replaced by * [[MapObjects]] during analysis after the input data is resolved. * Note that, ideally we should not serialize and send unresolved expressions to executors, but * users may accidentally do this(e.g. mistakenly reference an encoder instance when implementing * Aggregator). Here we mark `function` as transient because it may reference scala Type, which is * not serializable. Then even users mistakenly reference unresolved expression and serialize it, * it's just a performance issue(more network traffic), and will not fail. */ case class UnresolvedMapObjects( @transient function: Expression => Expression, child: Expression, customCollectionCls: Option[Class[_]] = None) extends UnaryExpression with Unevaluable { override lazy val resolved = false override def dataType: DataType = customCollectionCls.map(ObjectType.apply).getOrElse { throw new UnsupportedOperationException("not resolved") } } object MapObjects { private val curId = new java.util.concurrent.atomic.AtomicInteger() /** * Construct an instance of MapObjects case class. * * @param function The function applied on the collection elements. * @param inputData An expression that when evaluated returns a collection object. * @param elementType The data type of elements in the collection. * @param elementNullable When false, indicating elements in the collection are always * non-null value. * @param customCollectionCls Class of the resulting collection (returning ObjectType) * or None (returning ArrayType) */ def apply( function: Expression => Expression, inputData: Expression, elementType: DataType, elementNullable: Boolean = true, customCollectionCls: Option[Class[_]] = None): MapObjects = { val id = curId.getAndIncrement() val loopValue = s"MapObjects_loopValue$id" val loopIsNull = s"MapObjects_loopIsNull$id" val loopVar = LambdaVariable(loopValue, loopIsNull, elementType, elementNullable) MapObjects( loopValue, loopIsNull, elementType, function(loopVar), inputData, customCollectionCls) } } /** * Applies the given expression to every element of a collection of items, returning the result * as an ArrayType or ObjectType. This is similar to a typical map operation, but where the lambda * function is expressed using catalyst expressions. * * The type of the result is determined as follows: * - ArrayType - when customCollectionCls is None * - ObjectType(collection) - when customCollectionCls contains a collection class * * The following collection ObjectTypes are currently supported on input: * Seq, Array, ArrayData, java.util.List * * @param loopValue the name of the loop variable that used when iterate the collection, and used * as input for the `lambdaFunction` * @param loopIsNull the nullity of the loop variable that used when iterate the collection, and * used as input for the `lambdaFunction` * @param loopVarDataType the data type of the loop variable that used when iterate the collection, * and used as input for the `lambdaFunction` * @param lambdaFunction A function that take the `loopVar` as input, and used as lambda function * to handle collection elements. * @param inputData An expression that when evaluated returns a collection object. * @param customCollectionCls Class of the resulting collection (returning ObjectType) * or None (returning ArrayType) */ case class MapObjects private( loopValue: String, loopIsNull: String, loopVarDataType: DataType, lambdaFunction: Expression, inputData: Expression, customCollectionCls: Option[Class[_]]) extends Expression with NonSQLExpression { override def nullable: Boolean = inputData.nullable override def children: Seq[Expression] = lambdaFunction :: inputData :: Nil override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") override def dataType: DataType = customCollectionCls.map(ObjectType.apply).getOrElse( ArrayType(lambdaFunction.dataType, containsNull = lambdaFunction.nullable)) override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val elementJavaType = ctx.javaType(loopVarDataType) ctx.addMutableState("boolean", loopIsNull, "") ctx.addMutableState(elementJavaType, loopValue, "") val genInputData = inputData.genCode(ctx) val genFunction = lambdaFunction.genCode(ctx) val dataLength = ctx.freshName("dataLength") val convertedArray = ctx.freshName("convertedArray") val loopIndex = ctx.freshName("loopIndex") val convertedType = ctx.boxedType(lambdaFunction.dataType) // Because of the way Java defines nested arrays, we have to handle the syntax specially. // Specifically, we have to insert the [$dataLength] in between the type and any extra nested // array declarations (i.e. new String[1][]). val arrayConstructor = if (convertedType contains "[]") { val rawType = convertedType.takeWhile(_ != '[') val arrayPart = convertedType.reverse.takeWhile(c => c == '[' || c == ']').reverse s"new $rawType[$dataLength]$arrayPart" } else { s"new $convertedType[$dataLength]" } // In RowEncoder, we use `Object` to represent Array or Seq, so we need to determine the type // of input collection at runtime for this case. val seq = ctx.freshName("seq") val array = ctx.freshName("array") val determineCollectionType = inputData.dataType match { case ObjectType(cls) if cls == classOf[Object] => val seqClass = classOf[Seq[_]].getName s""" $seqClass $seq = null; $elementJavaType[] $array = null; if (${genInputData.value}.getClass().isArray()) { $array = ($elementJavaType[]) ${genInputData.value}; } else { $seq = ($seqClass) ${genInputData.value}; } """ case _ => "" } // The data with PythonUserDefinedType are actually stored with the data type of its sqlType. // When we want to apply MapObjects on it, we have to use it. val inputDataType = inputData.dataType match { case p: PythonUserDefinedType => p.sqlType case _ => inputData.dataType } val (getLength, getLoopVar) = inputDataType match { case ObjectType(cls) if classOf[Seq[_]].isAssignableFrom(cls) => s"${genInputData.value}.size()" -> s"${genInputData.value}.apply($loopIndex)" case ObjectType(cls) if cls.isArray => s"${genInputData.value}.length" -> s"${genInputData.value}[$loopIndex]" case ObjectType(cls) if classOf[java.util.List[_]].isAssignableFrom(cls) => s"${genInputData.value}.size()" -> s"${genInputData.value}.get($loopIndex)" case ArrayType(et, _) => s"${genInputData.value}.numElements()" -> ctx.getValue(genInputData.value, et, loopIndex) case ObjectType(cls) if cls == classOf[Object] => s"$seq == null ? $array.length : $seq.size()" -> s"$seq == null ? $array[$loopIndex] : $seq.apply($loopIndex)" } // Make a copy of the data if it's unsafe-backed def makeCopyIfInstanceOf(clazz: Class[_ <: Any], value: String) = s"$value instanceof ${clazz.getSimpleName}? ${value}.copy() : $value" val genFunctionValue = lambdaFunction.dataType match { case StructType(_) => makeCopyIfInstanceOf(classOf[UnsafeRow], genFunction.value) case ArrayType(_, _) => makeCopyIfInstanceOf(classOf[UnsafeArrayData], genFunction.value) case MapType(_, _, _) => makeCopyIfInstanceOf(classOf[UnsafeMapData], genFunction.value) case _ => genFunction.value } val loopNullCheck = inputDataType match { case _: ArrayType => s"$loopIsNull = ${genInputData.value}.isNullAt($loopIndex);" // The element of primitive array will never be null. case ObjectType(cls) if cls.isArray && cls.getComponentType.isPrimitive => s"$loopIsNull = false" case _ => s"$loopIsNull = $loopValue == null;" } val (initCollection, addElement, getResult): (String, String => String, String) = customCollectionCls match { case Some(cls) => // collection val getBuilder = s"${cls.getName}$$.MODULE$$.newBuilder()" val builder = ctx.freshName("collectionBuilder") ( s""" ${classOf[Builder[_, _]].getName} $builder = $getBuilder; $builder.sizeHint($dataLength); """, genValue => s"$builder.$$plus$$eq($genValue);", s"(${cls.getName}) $builder.result();" ) case None => // array ( s""" $convertedType[] $convertedArray = null; $convertedArray = $arrayConstructor; """, genValue => s"$convertedArray[$loopIndex] = $genValue;", s"new ${classOf[GenericArrayData].getName}($convertedArray);" ) } val code = s""" ${genInputData.code} ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; if (!${genInputData.isNull}) { $determineCollectionType int $dataLength = $getLength; $initCollection int $loopIndex = 0; while ($loopIndex < $dataLength) { $loopValue = ($elementJavaType) ($getLoopVar); $loopNullCheck ${genFunction.code} if (${genFunction.isNull}) { ${addElement("null")} } else { ${addElement(genFunctionValue)} } $loopIndex += 1; } ${ev.value} = $getResult } """ ev.copy(code = code, isNull = genInputData.isNull) } } object ExternalMapToCatalyst { private val curId = new java.util.concurrent.atomic.AtomicInteger() def apply( inputMap: Expression, keyType: DataType, keyConverter: Expression => Expression, valueType: DataType, valueConverter: Expression => Expression, valueNullable: Boolean): ExternalMapToCatalyst = { val id = curId.getAndIncrement() val keyName = "ExternalMapToCatalyst_key" + id val valueName = "ExternalMapToCatalyst_value" + id val valueIsNull = "ExternalMapToCatalyst_value_isNull" + id ExternalMapToCatalyst( keyName, keyType, keyConverter(LambdaVariable(keyName, "false", keyType, false)), valueName, valueIsNull, valueType, valueConverter(LambdaVariable(valueName, valueIsNull, valueType, valueNullable)), inputMap ) } } /** * Converts a Scala/Java map object into catalyst format, by applying the key/value converter when * iterate the map. * * @param key the name of the map key variable that used when iterate the map, and used as input for * the `keyConverter` * @param keyType the data type of the map key variable that used when iterate the map, and used as * input for the `keyConverter` * @param keyConverter A function that take the `key` as input, and converts it to catalyst format. * @param value the name of the map value variable that used when iterate the map, and used as input * for the `valueConverter` * @param valueIsNull the nullability of the map value variable that used when iterate the map, and * used as input for the `valueConverter` * @param valueType the data type of the map value variable that used when iterate the map, and * used as input for the `valueConverter` * @param valueConverter A function that take the `value` as input, and converts it to catalyst * format. * @param child An expression that when evaluated returns the input map object. */ case class ExternalMapToCatalyst private( key: String, keyType: DataType, keyConverter: Expression, value: String, valueIsNull: String, valueType: DataType, valueConverter: Expression, child: Expression) extends UnaryExpression with NonSQLExpression { override def foldable: Boolean = false override def dataType: MapType = MapType( keyConverter.dataType, valueConverter.dataType, valueContainsNull = valueConverter.nullable) override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val inputMap = child.genCode(ctx) val genKeyConverter = keyConverter.genCode(ctx) val genValueConverter = valueConverter.genCode(ctx) val length = ctx.freshName("length") val index = ctx.freshName("index") val convertedKeys = ctx.freshName("convertedKeys") val convertedValues = ctx.freshName("convertedValues") val entry = ctx.freshName("entry") val entries = ctx.freshName("entries") val keyElementJavaType = ctx.javaType(keyType) val valueElementJavaType = ctx.javaType(valueType) ctx.addMutableState(keyElementJavaType, key, "") ctx.addMutableState("boolean", valueIsNull, "") ctx.addMutableState(valueElementJavaType, value, "") val (defineEntries, defineKeyValue) = child.dataType match { case ObjectType(cls) if classOf[java.util.Map[_, _]].isAssignableFrom(cls) => val javaIteratorCls = classOf[java.util.Iterator[_]].getName val javaMapEntryCls = classOf[java.util.Map.Entry[_, _]].getName val defineEntries = s"final $javaIteratorCls $entries = ${inputMap.value}.entrySet().iterator();" val defineKeyValue = s""" final $javaMapEntryCls $entry = ($javaMapEntryCls) $entries.next(); $key = (${ctx.boxedType(keyType)}) $entry.getKey(); $value = (${ctx.boxedType(valueType)}) $entry.getValue(); """ defineEntries -> defineKeyValue case ObjectType(cls) if classOf[scala.collection.Map[_, _]].isAssignableFrom(cls) => val scalaIteratorCls = classOf[Iterator[_]].getName val scalaMapEntryCls = classOf[Tuple2[_, _]].getName val defineEntries = s"final $scalaIteratorCls $entries = ${inputMap.value}.iterator();" val defineKeyValue = s""" final $scalaMapEntryCls $entry = ($scalaMapEntryCls) $entries.next(); $key = (${ctx.boxedType(keyType)}) $entry._1(); $value = (${ctx.boxedType(valueType)}) $entry._2(); """ defineEntries -> defineKeyValue } val valueNullCheck = if (ctx.isPrimitiveType(valueType)) { s"$valueIsNull = false;" } else { s"$valueIsNull = $value == null;" } val arrayCls = classOf[GenericArrayData].getName val mapCls = classOf[ArrayBasedMapData].getName val convertedKeyType = ctx.boxedType(keyConverter.dataType) val convertedValueType = ctx.boxedType(valueConverter.dataType) val code = s""" ${inputMap.code} ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; if (!${inputMap.isNull}) { final int $length = ${inputMap.value}.size(); final Object[] $convertedKeys = new Object[$length]; final Object[] $convertedValues = new Object[$length]; int $index = 0; $defineEntries while($entries.hasNext()) { $defineKeyValue $valueNullCheck ${genKeyConverter.code} if (${genKeyConverter.isNull}) { throw new RuntimeException("Cannot use null as map key!"); } else { $convertedKeys[$index] = ($convertedKeyType) ${genKeyConverter.value}; } ${genValueConverter.code} if (${genValueConverter.isNull}) { $convertedValues[$index] = null; } else { $convertedValues[$index] = ($convertedValueType) ${genValueConverter.value}; } $index++; } ${ev.value} = new $mapCls(new $arrayCls($convertedKeys), new $arrayCls($convertedValues)); } """ ev.copy(code = code, isNull = inputMap.isNull) } } /** * Constructs a new external row, using the result of evaluating the specified expressions * as content. * * @param children A list of expression to use as content of the external row. */ case class CreateExternalRow(children: Seq[Expression], schema: StructType) extends Expression with NonSQLExpression { override def dataType: DataType = ObjectType(classOf[Row]) override def nullable: Boolean = false override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val rowClass = classOf[GenericRowWithSchema].getName val values = ctx.freshName("values") ctx.addMutableState("Object[]", values, "") val childrenCodes = children.zipWithIndex.map { case (e, i) => val eval = e.genCode(ctx) eval.code + s""" if (${eval.isNull}) { $values[$i] = null; } else { $values[$i] = ${eval.value}; } """ } val childrenCode = ctx.splitExpressions(ctx.INPUT_ROW, childrenCodes) val schemaField = ctx.addReferenceObj("schema", schema) val code = s""" $values = new Object[${children.size}]; $childrenCode final ${classOf[Row].getName} ${ev.value} = new $rowClass($values, $schemaField); """ ev.copy(code = code, isNull = "false") } } /** * Serializes an input object using a generic serializer (Kryo or Java). * * @param kryo if true, use Kryo. Otherwise, use Java. */ case class EncodeUsingSerializer(child: Expression, kryo: Boolean) extends UnaryExpression with NonSQLExpression { override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { // Code to initialize the serializer. val serializer = ctx.freshName("serializer") val (serializerClass, serializerInstanceClass) = { if (kryo) { (classOf[KryoSerializer].getName, classOf[KryoSerializerInstance].getName) } else { (classOf[JavaSerializer].getName, classOf[JavaSerializerInstance].getName) } } // try conf from env, otherwise create a new one val env = s"${classOf[SparkEnv].getName}.get()" val sparkConf = s"new ${classOf[SparkConf].getName}()" val serializerInit = s""" if ($env == null) { $serializer = ($serializerInstanceClass) new $serializerClass($sparkConf).newInstance(); } else { $serializer = ($serializerInstanceClass) new $serializerClass($env.conf()).newInstance(); } """ ctx.addMutableState(serializerInstanceClass, serializer, serializerInit) // Code to serialize. val input = child.genCode(ctx) val javaType = ctx.javaType(dataType) val serialize = s"$serializer.serialize(${input.value}, null).array()" val code = s""" ${input.code} final $javaType ${ev.value} = ${input.isNull} ? ${ctx.defaultValue(javaType)} : $serialize; """ ev.copy(code = code, isNull = input.isNull) } override def dataType: DataType = BinaryType } /** * Serializes an input object using a generic serializer (Kryo or Java). Note that the ClassTag * is not an implicit parameter because TreeNode cannot copy implicit parameters. * * @param kryo if true, use Kryo. Otherwise, use Java. */ case class DecodeUsingSerializer[T](child: Expression, tag: ClassTag[T], kryo: Boolean) extends UnaryExpression with NonSQLExpression { override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { // Code to initialize the serializer. val serializer = ctx.freshName("serializer") val (serializerClass, serializerInstanceClass) = { if (kryo) { (classOf[KryoSerializer].getName, classOf[KryoSerializerInstance].getName) } else { (classOf[JavaSerializer].getName, classOf[JavaSerializerInstance].getName) } } // try conf from env, otherwise create a new one val env = s"${classOf[SparkEnv].getName}.get()" val sparkConf = s"new ${classOf[SparkConf].getName}()" val serializerInit = s""" if ($env == null) { $serializer = ($serializerInstanceClass) new $serializerClass($sparkConf).newInstance(); } else { $serializer = ($serializerInstanceClass) new $serializerClass($env.conf()).newInstance(); } """ ctx.addMutableState(serializerInstanceClass, serializer, serializerInit) // Code to deserialize. val input = child.genCode(ctx) val javaType = ctx.javaType(dataType) val deserialize = s"($javaType) $serializer.deserialize(java.nio.ByteBuffer.wrap(${input.value}), null)" val code = s""" ${input.code} final $javaType ${ev.value} = ${input.isNull} ? ${ctx.defaultValue(javaType)} : $deserialize; """ ev.copy(code = code, isNull = input.isNull) } override def dataType: DataType = ObjectType(tag.runtimeClass) } /** * Initialize a Java Bean instance by setting its field values via setters. */ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Expression]) extends Expression with NonSQLExpression { override def nullable: Boolean = beanInstance.nullable override def children: Seq[Expression] = beanInstance +: setters.values.toSeq override def dataType: DataType = beanInstance.dataType override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported.") override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val instanceGen = beanInstance.genCode(ctx) val javaBeanInstance = ctx.freshName("javaBean") val beanInstanceJavaType = ctx.javaType(beanInstance.dataType) ctx.addMutableState(beanInstanceJavaType, javaBeanInstance, "") val initialize = setters.map { case (setterMethod, fieldValue) => val fieldGen = fieldValue.genCode(ctx) s""" ${fieldGen.code} ${javaBeanInstance}.$setterMethod(${fieldGen.value}); """ } val initializeCode = ctx.splitExpressions(ctx.INPUT_ROW, initialize.toSeq) val code = s""" ${instanceGen.code} this.${javaBeanInstance} = ${instanceGen.value}; if (!${instanceGen.isNull}) { $initializeCode } """ ev.copy(code = code, isNull = instanceGen.isNull, value = instanceGen.value) } } /** * Asserts that input values of a non-nullable child expression are not null. * * Note that there are cases where `child.nullable == true`, while we still need to add this * assertion. Consider a nullable column `s` whose data type is a struct containing a non-nullable * `Int` field named `i`. Expression `s.i` is nullable because `s` can be null. However, for all * non-null `s`, `s.i` can't be null. */ case class AssertNotNull(child: Expression, walkedTypePath: Seq[String] = Nil) extends UnaryExpression with NonSQLExpression { override def dataType: DataType = child.dataType override def foldable: Boolean = false override def nullable: Boolean = false override def flatArguments: Iterator[Any] = Iterator(child) private val errMsg = "Null value appeared in non-nullable field:" + walkedTypePath.mkString("\n", "\n", "\n") + "If the schema is inferred from a Scala tuple/case class, or a Java bean, " + "please try to use scala.Option[_] or other nullable types " + "(e.g. java.lang.Integer instead of int/scala.Int)." override def eval(input: InternalRow): Any = { val result = child.eval(input) if (result == null) { throw new NullPointerException(errMsg) } result } override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val childGen = child.genCode(ctx) // Use unnamed reference that doesn't create a local field here to reduce the number of fields // because errMsgField is used only when the value is null. val errMsgField = ctx.addReferenceMinorObj(errMsg) val code = s""" ${childGen.code} if (${childGen.isNull}) { throw new NullPointerException($errMsgField); } """ ev.copy(code = code, isNull = "false", value = childGen.value) } } /** * Returns the value of field at index `index` from the external row `child`. * This class can be viewed as [[GetStructField]] for [[Row]]s instead of [[InternalRow]]s. * * Note that the input row and the field we try to get are both guaranteed to be not null, if they * are null, a runtime exception will be thrown. */ case class GetExternalRowField( child: Expression, index: Int, fieldName: String) extends UnaryExpression with NonSQLExpression { override def nullable: Boolean = false override def dataType: DataType = ObjectType(classOf[Object]) override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") private val errMsg = s"The ${index}th field '$fieldName' of input row cannot be null." override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { // Use unnamed reference that doesn't create a local field here to reduce the number of fields // because errMsgField is used only when the field is null. val errMsgField = ctx.addReferenceMinorObj(errMsg) val row = child.genCode(ctx) val code = s""" ${row.code} if (${row.isNull}) { throw new RuntimeException("The input external row cannot be null."); } if (${row.value}.isNullAt($index)) { throw new RuntimeException($errMsgField); } final Object ${ev.value} = ${row.value}.get($index); """ ev.copy(code = code, isNull = "false") } } /** * Validates the actual data type of input expression at runtime. If it doesn't match the * expectation, throw an exception. */ case class ValidateExternalType(child: Expression, expected: DataType) extends UnaryExpression with NonSQLExpression with ExpectsInputTypes { override def inputTypes: Seq[AbstractDataType] = Seq(ObjectType(classOf[Object])) override def nullable: Boolean = child.nullable override def dataType: DataType = RowEncoder.externalDataTypeForInput(expected) override def eval(input: InternalRow): Any = throw new UnsupportedOperationException("Only code-generated evaluation is supported") private val errMsg = s" is not a valid external type for schema of ${expected.simpleString}" override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { // Use unnamed reference that doesn't create a local field here to reduce the number of fields // because errMsgField is used only when the type doesn't match. val errMsgField = ctx.addReferenceMinorObj(errMsg) val input = child.genCode(ctx) val obj = input.value val typeCheck = expected match { case _: DecimalType => Seq(classOf[java.math.BigDecimal], classOf[scala.math.BigDecimal], classOf[Decimal]) .map(cls => s"$obj instanceof ${cls.getName}").mkString(" || ") case _: ArrayType => s"$obj instanceof ${classOf[Seq[_]].getName} || $obj.getClass().isArray()" case _ => s"$obj instanceof ${ctx.boxedType(dataType)}" } val code = s""" ${input.code} ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; if (!${input.isNull}) { if ($typeCheck) { ${ev.value} = (${ctx.boxedType(dataType)}) $obj; } else { throw new RuntimeException($obj.getClass().getName() + $errMsgField); } } """ ev.copy(code = code, isNull = input.isNull) } }
wangyixiaohuihui/spark2-annotation
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala
Scala
apache-2.0
43,331
package controllers import play.api.mvc.Action import models.AssetSupport.IdType import play.api.mvc.BodyParsers.parse import models.customer.{AgentIn, AgentUpdate, AgentCreate} import controllers.utils.CrudController object Agents extends CrudController { override type MODEL = AgentIn override type UPDATEMODEL = AgentUpdate override type CREATEMODEL = AgentCreate def create = Action.async(parse.json) { request => super.create(request.body, id => controllers.routes.Agents.getById(id)) } def update(id: IdType) = Action.async(parse.json) { request => super.update(id, request.body) } def delete(id: String) = Action.async { super.delete(id) } }
tsechov/shoehorn
app/controllers/Agents.scala
Scala
apache-2.0
698
package dundertext.editor import org.junit.Test import org.junit.Assert._ class DocumentBufferTest { val buffer = DocumentBuffer.empty @Test def should_be_empty_at_start(): Unit = { assertEquals(0, buffer.length) assertTrue(buffer.isEmpty) } @Test def should_append_text(): Unit = { buffer.append("Hejsan") buffer.relink() assertEquals(1, buffer.length) assertEquals("Hejsan\\n\\n", buffer.asText) } @Test def should_append_multiple_text(): Unit = { buffer.append("Hejsan") buffer.append("Svejsan") buffer.relink() assertEquals(2, buffer.length) assertEquals( "Hejsan\\n\\n" + "Svejsan\\n\\n", buffer.asText) } }
dundertext/dundertext
editor/src/test/scala/dundertext/editor/DocumentBufferTest.scala
Scala
gpl-3.0
689
package serialization import model._ import scodec.Codec import scodec.codecs._ import scalaz.std.string._ import scalaz.{IMap, Order} object Model { // Hash = 256 bits // MutablePtr = 256 bits // RemotePath = MutablePtr + int32 size of list + n strings (int32 size in bytes + the bytes) // List of A: int32 size, then A's. // Map[A,B] = List of (A,B) // Folder = Map[String, Hash], Map[String, RemotePath], Map[String, Folder] val hashSha256Codec: Codec[Hash] = "Hash-SHA256" | bits(256).as[Hash] val mutablePtrCodec: Codec[MutablePtr] = "Mutable Pointer" | hashSha256Codec.as[MutablePtr] val remotePathCodec: Codec[RemotePath] = "Remote Path" | (mutablePtrCodec :: listOfN(int32, utf8_32)).as[RemotePath] def imapCodec[A: Order, B](codecA: Codec[A], codecB: Codec[B]): Codec[IMap[A, B]] = listOfN(int32, codecA ~ codecB). xmap[IMap[A, B]](l => IMap(l: _*), _.toList) implicit lazy val folderCodec: Codec[Folder] = "Index Folder" | { imapCodec(utf8_32, hashSha256Codec) :: imapCodec(utf8_32, remotePathCodec.as[FollowLeaf]) :: imapCodec(utf8_32, lazily(folderCodec)) }.as[Folder] }
NightRa/CIFS
src/main/scala/serialization/Model.scala
Scala
bsd-3-clause
1,132
package com.komanov.stringformat.jmh import java.util.concurrent.TimeUnit import com.komanov.stringformat.{InputArg, JavaFormats, ScalaFormats} import org.openjdk.jmh.annotations._ @State(Scope.Benchmark) @BenchmarkMode(Array(Mode.AverageTime)) @OutputTimeUnit(TimeUnit.NANOSECONDS) @Fork(value = 2, jvmArgs = Array("-Xmx2G")) @Measurement(iterations = 7, time = 3, timeUnit = TimeUnit.SECONDS) @Warmup(iterations = 3, time = 3, timeUnit = TimeUnit.SECONDS) abstract class BenchmarkBase @State(Scope.Benchmark) class ManyParamsBenchmark extends BenchmarkBase { @Param var arg: InputArg = InputArg.Tiny var nullObject: Object = null @Benchmark def javaConcat(): String = { JavaFormats.concat(arg.value1, arg.value2, nullObject) } @Benchmark def scalaConcat(): String = { ScalaFormats.concat(arg.value1, arg.value2, nullObject) } @Benchmark def stringFormat(): String = { JavaFormats.stringFormat(arg.value1, arg.value2, nullObject) } @Benchmark def messageFormat(): String = { JavaFormats.messageFormat(arg.value1, arg.value2, nullObject) } @Benchmark def slf4j(): String = { JavaFormats.slf4j(arg.value1, arg.value2, nullObject) } @Benchmark def concatOptimized1(): String = { ScalaFormats.optimizedConcat1(arg.value1, arg.value2, nullObject) } @Benchmark def concatOptimized2(): String = { ScalaFormats.optimizedConcat2(arg.value1, arg.value2, nullObject) } @Benchmark def concatOptimizedMacros(): String = { ScalaFormats.optimizedConcatMacros(arg.value1, arg.value2, nullObject) } @Benchmark def sInterpolator(): String = { ScalaFormats.sInterpolator(arg.value1, arg.value2, nullObject) } @Benchmark def fInterpolator(): String = { ScalaFormats.fInterpolator(arg.value1, arg.value2, nullObject) } @Benchmark def rawInterpolator(): String = { ScalaFormats.rawInterpolator(arg.value1, arg.value2, nullObject) } @Benchmark def sfiInterpolator(): String = { ScalaFormats.sfiInterpolator(arg.value1, arg.value2, nullObject) } }
dkomanov/stuff
src/com/komanov/stringformat/jmh/Benchmarks.scala
Scala
mit
2,070
/* * Copyright (c) 2017-2021, Robby, Kansas State University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.sireum import org.sireum.test._ class ZSTest extends TestSuite { final val size = Z(1024) final val zs123 = ZS(1, 2, 3) final val zs12 = ZS(1, 2) final val zs23 = ZS(2, 3) val tests = Tests { "append" - { * - assert(zs123 =~= zs12 :+ 3) * - assert(zs123.hashCode =~= (zs12 :+ 3).hashCode) } "prepend" - { * - assert(zs123 =~= 1 +: zs23) * - assert(zs123.hashCode =~= (1 +: zs23).hashCode) } "impl" - { "zsArray" - { var i = Z(0) var append: ZS = ZS() var prepend: ZS = ZS() while (i < size) { append :+= i prepend +:= size - i - 1 i += 1 } assert(append =~= prepend) } } } }
sireum/v3-logika-runtime
library/shared/src/test/scala/org/sireum/ZSTest.scala
Scala
bsd-2-clause
2,140
import sbt._ import Keys._ trait BuildSetting { // Eclipse plugin import com.typesafe.sbteclipse.plugin.EclipsePlugin._ //Dependency graph plugin import net.virtualvoid.sbt.graph.Plugin._ val scalaSettings = Defaults.defaultSettings ++ eclipseSettings ++ graphSettings ++ Seq( initialCommands := "import org.revenj.patterns._" , javaHome := sys.env.get("JDK16_HOME").map(file(_)) , javacOptions := Seq( "-deprecation" , "-encoding", "UTF-8" , "-Xlint:unchecked" , "-source", "1.6" , "-target", "1.6" ) , scalacOptions := Seq( "-deprecation" , "-encoding", "UTF-8" , "-feature" , "-language:existentials" , "-language:implicitConversions" , "-language:postfixOps" , "-language:reflectiveCalls" , "-optimise" , "-unchecked" , "-Xcheckinit" , "-Xlint" , "-Xmax-classfile-name", "72" , "-Xno-forwarders" , "-Xverify" , "-Yclosure-elim" , "-Ydead-code" , "-Yinline-warnings" , "-Yinline" , "-Yrepl-sync" , "-Ywarn-adapted-args" , "-Ywarn-dead-code" , "-Ywarn-inaccessible" , "-Ywarn-nullary-override" , "-Ywarn-nullary-unit" , "-Ywarn-numeric-widen" ) ++ (CrossVersion.partialVersion(scalaVersion.value) match { case Some((2, 11)) => Seq( "-Yconst-opt" , "-Ywarn-infer-any" , "-Ywarn-unused" ) case _ => Seq.empty }) , publishArtifact in (Compile, packageDoc) := false , externalResolvers := Resolver.withDefaultResolvers(resolvers.value, mavenCentral = false) ) } // ---------------------------------------------------------------------------- trait Dependencies { val jodaTime = "joda-time" % "joda-time" % "2.7" val jodaTimeConvert = "org.joda" % "joda-convert" % "1.2" // Pico container feat. context val picoContainer = "org.picocontainer" % "picocontainer" % "3.0.a4" classifier "ngs" // PgScala with PostgreSQL JDBC driver val pgscala = "org.pgscala" %% "pgscala" % "0.7.29" // Logging facade val slf4j = "org.slf4j" % "slf4j-api" % "1.7.12" // Jackson module for Scala val jacksonScala = "com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.4.4" val jacksonDatabind = "com.fasterxml.jackson.core" % "jackson-databind" % "2.4.4" val scalaReflect = Def.setting { "org.scala-lang" % "scala-reflect" % scalaVersion.value } //test libs lazy val scalaTest = "org.scalatest" %% "scalatest" % "2.2.4" lazy val jUnit = "junit" % "junit" % "4.12" // Logging val logback = "ch.qos.logback" % "logback-classic" % "1.1.3" % "compile->default" } // ---------------------------------------------------------------------------- object NGSBuild extends Build with Dependencies with BuildSetting { def serverProject(id: String, settings: scala.Seq[sbt.Def.Setting[_]]) = Project( id = id , base = file(id) , settings = settings ++ Seq(name := "revenj-scala-" + id.toLowerCase) ) lazy val interfaces = serverProject( "Interfaces" , scalaSettings ++ Seq( libraryDependencies ++= Seq( jodaTime , jodaTimeConvert , scalaReflect.value ) ) ) lazy val core = serverProject( "Core" , settings = scalaSettings ++ Seq( libraryDependencies ++= Seq( picoContainer , pgscala , slf4j , jacksonDatabind , jacksonScala ) , unmanagedSourceDirectories in Compile := (javaSource in Compile).value :: (scalaSource in Compile).value :: Nil ) ) dependsOn(interfaces) lazy val tests = serverProject( "Tests" , scalaSettings ++ Seq( libraryDependencies ++= Seq( scalaTest % "test" , jUnit % "test" , logback ) , unmanagedSourceDirectories in Compile := (scalaSource in Compile).value :: sourceDirectory.value / "generated" / "scala" :: Nil , unmanagedSourceDirectories in Test := (scalaSource in Test).value :: (javaSource in Test).value :: Nil ) ) dependsOn(core) lazy val root = project in file(".") aggregate(core, interfaces) settings (packagedArtifacts := Map.empty) }
tferega/revenj
scala/project/NGSBuild.scala
Scala
bsd-3-clause
4,275
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.accounts.frs102.boxes import uk.gov.hmrc.ct.box._ case class AC7200(value: Option[Boolean]) extends CtBoxIdentifier(name = "Enter Dividends note?") with CtOptionalBoolean with Input
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC7200.scala
Scala
apache-2.0
816
package gitbucket.core.util import gitbucket.core.api.JsonFormat import gitbucket.core.controller.Context import gitbucket.core.servlet.Database import java.util.regex.Pattern.quote import javax.servlet.http.{HttpSession, HttpServletRequest} import scala.util.matching.Regex import scala.util.control.Exception._ import slick.jdbc.JdbcBackend /** * Provides some usable implicit conversions. */ object Implicits { // Convert to slick session. implicit def request2Session(implicit request: HttpServletRequest): JdbcBackend#Session = Database.getSession(request) implicit def context2ApiJsonFormatContext(implicit context: Context): JsonFormat.Context = JsonFormat.Context(context.baseUrl) implicit class RichSeq[A](seq: Seq[A]) { def splitWith(condition: (A, A) => Boolean): Seq[Seq[A]] = split(seq)(condition) @scala.annotation.tailrec private def split[A](list: Seq[A], result: Seq[Seq[A]] = Nil)(condition: (A, A) => Boolean): Seq[Seq[A]] = list match { case x :: xs => { xs.span(condition(x, _)) match { case (matched, remained) => split(remained, result :+ (x :: matched))(condition) } } case Nil => result } } implicit class RichString(value: String){ def replaceBy(regex: Regex)(replace: Regex.MatchData => Option[String]): String = { val sb = new StringBuilder() var i = 0 regex.findAllIn(value).matchData.foreach { m => sb.append(value.substring(i, m.start)) i = m.end replace(m) match { case Some(s) => sb.append(s) case None => sb.append(m.matched) } } if(i < value.length){ sb.append(value.substring(i)) } sb.toString } def toIntOpt: Option[Int] = catching(classOf[NumberFormatException]) opt { Integer.parseInt(value) } } implicit class RichRequest(request: HttpServletRequest){ def paths: Array[String] = (request.getRequestURI.substring(request.getContextPath.length + 1) match{ case path if path.startsWith("api/v3/repos/") => path.substring(13/* "/api/v3/repos".length */) case path if path.startsWith("api/v3/orgs/") => path.substring(12/* "/api/v3/orgs".length */) case path => path }).split("/") def hasQueryString: Boolean = request.getQueryString != null def hasAttribute(name: String): Boolean = request.getAttribute(name) != null def gitRepositoryPath: String = request.getRequestURI.replaceFirst("^" + quote(request.getContextPath) + "/git/", "/") def baseUrl:String = { val url = request.getRequestURL.toString val len = url.length - (request.getRequestURI.length - request.getContextPath.length) url.substring(0, len).stripSuffix("/") } } implicit class RichSession(session: HttpSession){ def getAndRemove[T](key: String): Option[T] = { val value = session.getAttribute(key).asInstanceOf[T] if(value == null){ session.removeAttribute(key) } Option(value) } } }
zhoffice/gitbucket
src/main/scala/gitbucket/core/util/Implicits.scala
Scala
apache-2.0
3,045
/* * Copyright 2017 FOLIO Co., Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.folio_sec.reladomo.scala_api import com.gs.fw.finder.Navigation import scala.collection.JavaConverters._ /** * Represents a Scala facade of Reladomo's MithraTransactionalList. * The type must provide a MithraTransactionalList which is consistent with the Scala object's state. */ trait BiTemporalTransactionalListBase[TxObject <: TemporalTransactionalObjectBase, MithraTxObject <: com.gs.fw.common.mithra.MithraDatedTransactionalObject] extends Seq[TxObject] { self => /** * Returns consistent MithraTransactionalList */ def underlying: com.gs.fw.common.mithra.MithraDatedTransactionalList[MithraTxObject] def toScalaObject(mithraTxObject: MithraTxObject): TxObject // DomainList[MithraTxObject] def count(): Int = underlying.count() def deepFetch(navigation: Navigation[MithraTxObject]): self.type = { underlying.deepFetch(navigation) this } override def length: Int = count() override def apply(idx: Int): TxObject = toScalaObject(underlying.get(idx)) override def iterator: Iterator[TxObject] = underlying.iterator().asScala.map(toScalaObject) def newValueAppliers: Seq[() => Unit] def updateAll()(implicit tx: Transaction): Unit = { newValueAppliers.foreach(_.apply()) } // TransactionalDomainList[MithraTxObject] /** * force this list to resolve its operation. Normally, the operation is not resolved until necessary. */ def forceResolve(): Unit = underlying.forceResolve() /** * force this list to be re-read from the database. Works for both operation based and simple lists. * It has no effect on a list of detached objects. */ def forceRefresh(): Unit = underlying.forceRefresh() }
folio-sec/reladomo-scala
reladomo-scala-common/src/main/scala/com/folio_sec/reladomo/scala_api/BiTemporalTransactionalListBase.scala
Scala
apache-2.0
2,344
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs // License: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.fixture import akka.actor.{ ActorRef, ActorSystem } import org.scalatest._ import akka.testkit._ import org.ensime.api._ import org.ensime.core._ import scala.concurrent.duration._ // WORKAROUND http://stackoverflow.com/questions/13420809 object LoggingTestProbe { def apply()(implicit system: ActorSystem): TestProbe = { val probe = TestProbe() probe.setAutoPilot(new TestActor.AutoPilot { def run(sender: ActorRef, msg: Any) = { val other = sender.path val me = probe.ref.path system.log.debug(s"AsyncHelper $me received $msg from $other") this } }) probe } } object ProjectFixture extends Matchers { private[fixture] def startup( implicit testkit: TestKitFix, config: EnsimeConfig ): (TestActorRef[Project], TestProbe) = { import testkit._ val probe = LoggingTestProbe() probe.ignoreMsg { // these are too noisy for tests case e: SendBackgroundMessageEvent => true case e: DebugOutputEvent => true case e: DebugThreadStartEvent => true case e: DebugThreadDeathEvent => true case e: DebugVmError => true case DebugVmDisconnectEvent => true case ClearAllScalaNotesEvent => true case ClearAllJavaNotesEvent => true } val project = TestActorRef[Project](Project(probe.ref), "project") project ! ConnectionInfoReq expectMsg(ConnectionInfo()) if (config.scalaLibrary.isEmpty) probe.receiveN(2, 30.seconds) should contain only ( Broadcaster.Persist(AnalyzerReadyEvent), Broadcaster.Persist(IndexerReadyEvent) ) else probe.receiveN(3, 30.seconds) should contain only ( Broadcaster.Persist(AnalyzerReadyEvent), Broadcaster.Persist(FullTypeCheckCompleteEvent), Broadcaster.Persist(IndexerReadyEvent) ) (project, probe) } } trait ProjectFixture { /** * the project actor and a probe that receives async messages. */ def withProject( testCode: (TestActorRef[Project], TestProbe) => Any )( implicit testkit: TestKitFix, config: EnsimeConfig ): Any } trait IsolatedProjectFixture extends ProjectFixture { override def withProject(testCode: (TestActorRef[Project], TestProbe) => Any)(implicit testkit: TestKitFix, config: EnsimeConfig): Any = { val (project, probe) = ProjectFixture.startup testCode(project, probe) } } trait SharedProjectFixture extends ProjectFixture with SharedEnsimeConfigFixture with SharedTestKitFixture { private var _project: TestActorRef[Project] = _ private var _probe: TestProbe = _ override def beforeAll(): Unit = { super.beforeAll() implicit val testkit = _testkit implicit val config = _config val (project, probe) = ProjectFixture.startup _project = project _probe = probe } override def withProject(testCode: (TestActorRef[Project], TestProbe) => Any)(implicit testkit: TestKitFix, config: EnsimeConfig): Any = testCode(_project, _probe) }
pascr/ensime-server
core/src/it/scala/org/ensime/fixture/ProjectFixture.scala
Scala
gpl-3.0
3,153
package ucesoft.cbm.formats.cart import ucesoft.cbm.ChipID import ucesoft.cbm.cpu.Memory import ucesoft.cbm.formats.{Cartridge, CartridgeBuilder } import ucesoft.cbm.formats.ExpansionPortFactory.CartridgeExpansionPort import ucesoft.cbm.misc.{AMF29F040, FlashListener} import java.io.{ObjectInputStream, ObjectOutputStream} import java.util object EasyFlash { var jumper = false } class EasyFlash(crt: Cartridge, ram:Memory) extends CartridgeExpansionPort(crt,ram) { private[this] val io2mem = Array.ofDim[Int](256) private class FlashROM(low:Boolean) extends ROMMemory with FlashListener { val name = "FLASH" val startAddress = if (low) 0x8000 else 0xE000 val length = 8192 val isRom = true def isActive = true def init : Unit = {} private val amf29f040 = new AMF29F040(startAddress,low,this) val bankErasedMap = Array.ofDim[Boolean](64) updateROMBank checkErasedBanks private def checkErasedBanks : Unit = { val map = if (low) romlBanks else romhBanks for(b <- 0 to 63) { bankErasedMap(b) = !map.contains(b) } } override def eraseSector: Unit = { val bank = (if (low) romlBankIndex else romhBankIndex) & 0xF8 for(b <- bank until bank + 8) { val rom = getOrCreateBank(Some(b)).asInstanceOf[ROM] util.Arrays.fill(rom.data, 0xFF) bankErasedMap(b) = true } } private def getOrCreateBank(bankIndex:Option[Int]) : Memory = { val bankPresent = if (low) romlBanks.contains(bankIndex.getOrElse(romlBankIndex)) else romhBanks.contains(bankIndex.getOrElse(romhBankIndex)) if (!bankPresent) { val bank = if (low) bankIndex.getOrElse(romlBankIndex) else bankIndex.getOrElse(romhBankIndex) val newBank = new ROM(s"$name-${if (low) "roml" else "romh"}-$bank",if (low) 0x8000 else 0xA000,0x2000,Array.fill[Int](0x2000)(0xFF)) if (low) { romlBanks += bank -> newBank romlBankIndex = bank // force the new bank to be selected } else { romhBanks += bank -> newBank romhBankIndex = bank // force the new bank to be selected } updateROMBank newBank } else if (low) EasyFlash.super.ROML else EasyFlash.super.ROMH } override def flash(address: Int, value: Int, low: Boolean): Unit = { val rom = getOrCreateBank(None) rom.asInstanceOf[ROM].data(address) = value // flash value val bank = if (low) romlBankIndex else romhBankIndex bankErasedMap(bank) = false //println("FLASHING[%5b] bank %2d address %4X value %2X bank present=%5b".format(low,if (low) romlBankIndex else romhBankIndex,address,value,if (low) romlBanks.contains(romlBankIndex) else romhBanks.contains(romhBankIndex))) } def read(address: Int, chipID: ChipID.ID = ChipID.CPU) = amf29f040.read(address) override def writeROM(address: Int, value: Int, chipID: ChipID.ID = ChipID.CPU) : Unit = { //println("Flash write to %4X = %2X".format(address,value)) amf29f040.write(address,value) } def updateROMBank : Unit = amf29f040.setROMBank(if (low) EasyFlash.super.ROML else EasyFlash.super.ROMH) } private val flashL = new FlashROM(true) private val flashH = new FlashROM(false) override def ROML : Memory = flashL override def ROMH : Memory = flashH override def read(address: Int, chipID: ChipID.ID = ChipID.CPU) = { if (address >= 0xDF00) io2mem(address & 0xFF) else 0 } override def write(address: Int, value: Int, chipID: ChipID.ID = ChipID.CPU) : Unit = { if (address >= 0xDF00) io2mem(address & 0xFF) = value else { if ((address & 2) == 0) { val bank = value & 0x3F //println(s"Selecting bank $bank") romlBankIndex = bank romhBankIndex = bank flashL.updateROMBank flashH.updateROMBank } else { //println(s"EasyFlash Control = $value (${address.toHexString})") val gameControlledViaBit0 = (value & 4) == 4 exrom = (value & 2) == 0 game = if (gameControlledViaBit0) (value & 1) == 0 else EasyFlash.jumper notifyMemoryConfigurationChange } } } override def reset : Unit = { game = EasyFlash.jumper exrom = true romlBankIndex = 0 romhBankIndex = 0 notifyMemoryConfigurationChange } override def saveState(out: ObjectOutputStream): Unit = { super.saveState(out) out.writeObject(io2mem) } override def loadState(in: ObjectInputStream): Unit = { super.loadState(in) loadMemory[Int](io2mem,in) } def createCRT : Unit = { val builder = new CartridgeBuilder(crt.file,"KERNAL64 EASYFLASH",32,true,false) for(hl <- 0 to 1) { val banks = if (hl == 0) romlBanks else romhBanks val erasedMap = if (hl == 0) flashL.bankErasedMap else flashH.bankErasedMap for(b <- 0 to 63) { banks get b match { case Some(bank:ROM) => if (!erasedMap(b)) builder.addChip(bank.startAddress,2,b,bank.data) case None => } } } builder.finish } }
abbruzze/kernal64
Kernal64/src/ucesoft/cbm/formats/cart/EasyFlash.scala
Scala
mit
5,088
package scodec.protocols package pcap import scalaz.\/ import scalaz.\/.{ left, right } import scodec.{ Codec, Decoder, Err } import scodec.bits.BitVector import scodec.codecs._ import scodec.stream._ import scodec.{ Codec, Attempt, DecodeResult } import scodec.stream.decode.DecodingError import shapeless.Lazy case class CaptureFile( header: GlobalHeader, records: Vector[Record]) object CaptureFile { implicit val codec: Codec[CaptureFile] = "capture-file" | { Codec[GlobalHeader] >>:~ { hdr => vector(Record.codec(hdr.ordering)).hlist }}.as[CaptureFile] def payloadStreamDecoderPF[A](chunkSize: Int = 256)(linkDecoders: PartialFunction[LinkType, StreamDecoder[A]]): StreamDecoder[TimeStamped[A]] = payloadStreamDecoder(chunkSize)(linkDecoders.lift) def payloadStreamDecoder[A](chunkSize: Int = 256)(linkDecoders: LinkType => Option[StreamDecoder[A]]): StreamDecoder[TimeStamped[A]] = streamDecoder(chunkSize) { global => linkDecoders(global.network) match { case None => left(Err(s"unsupported link type ${global.network}")) case Some(decoder) => right { hdr => decoder map { value => TimeStamped(hdr.timestamp plus (global.thiszone * 1000L), value) } } } } def recordStreamDecoder(chunkSize: Int = 256): StreamDecoder[Record] = streamDecoder[Record](chunkSize) { global => right { hdr => decode.once(bits) map { bs => Record(hdr.copy(timestampSeconds = hdr.timestampSeconds + global.thiszone), bs) } }} def streamDecoder[A](chunkSize: Int = 256)(f: GlobalHeader => Err \/ (RecordHeader => StreamDecoder[A])): StreamDecoder[A] = for { global <- decode.once[GlobalHeader] decoderFn <- f(global).fold(decode.fail, decode.emit) recordDecoder = RecordHeader.codec(global.ordering) flatMap { header => decode.isolateBytes(header.includedLength) { decoderFn(header) }.strict } values <- decode.manyChunked(chunkSize)(Lazy(recordDecoder)).flatMap(x => decode.emitAll(x)) } yield values }
jrudnick/scodec-protocols
src/main/scala/scodec/protocols/pcap/CaptureFile.scala
Scala
bsd-3-clause
2,044
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming.continuous import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue} import org.mockito.Mockito._ import org.scalatest.mockito.MockitoSugar import org.apache.spark.{SparkEnv, TaskContext} import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow} import org.apache.spark.sql.connector.read.streaming.{ContinuousPartitionReader, ContinuousStream, PartitionOffset} import org.apache.spark.sql.connector.write.streaming.StreamingWrite import org.apache.spark.sql.execution.streaming.continuous._ import org.apache.spark.sql.streaming.StreamTest import org.apache.spark.sql.types.{DataType, IntegerType, StructType} class ContinuousQueuedDataReaderSuite extends StreamTest with MockitoSugar { case class LongPartitionOffset(offset: Long) extends PartitionOffset val coordinatorId = s"${getClass.getSimpleName}-epochCoordinatorIdForUnitTest" val startEpoch = 0 var epochEndpoint: RpcEndpointRef = _ override def beforeEach(): Unit = { super.beforeEach() epochEndpoint = EpochCoordinatorRef.create( mock[StreamingWrite], mock[ContinuousStream], mock[ContinuousExecution], coordinatorId, startEpoch, spark, SparkEnv.get) EpochTracker.initializeCurrentEpoch(0) } override def afterEach(): Unit = { SparkEnv.get.rpcEnv.stop(epochEndpoint) epochEndpoint = null super.afterEach() } private val mockContext = mock[TaskContext] when(mockContext.getLocalProperty(ContinuousExecution.START_EPOCH_KEY)) .thenReturn(startEpoch.toString) when(mockContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY)) .thenReturn(coordinatorId) /** * Set up a ContinuousQueuedDataReader for testing. The blocking queue can be used to send * rows to the wrapped data reader. */ private def setup(): (BlockingQueue[UnsafeRow], ContinuousQueuedDataReader) = { val queue = new ArrayBlockingQueue[UnsafeRow](1024) val partitionReader = new ContinuousPartitionReader[InternalRow] { var index = -1 var curr: UnsafeRow = _ override def next() = { curr = queue.take() index += 1 true } override def get = curr override def getOffset = LongPartitionOffset(index) override def close() = {} } val reader = new ContinuousQueuedDataReader( 0, partitionReader, new StructType().add("i", "int"), mockContext, dataQueueSize = sqlContext.conf.continuousStreamingExecutorQueueSize, epochPollIntervalMs = sqlContext.conf.continuousStreamingExecutorPollIntervalMs) (queue, reader) } private def unsafeRow(value: Int) = { UnsafeProjection.create(Array(IntegerType : DataType))( new GenericInternalRow(Array(value: Any))) } test("basic data read") { val (input, reader) = setup() input.add(unsafeRow(12345)) assert(reader.next().getInt(0) == 12345) } test("basic epoch marker") { val (input, reader) = setup() epochEndpoint.askSync[Long](IncrementAndGetEpoch) assert(reader.next() == null) } test("new rows after markers") { val (input, reader) = setup() epochEndpoint.askSync[Long](IncrementAndGetEpoch) epochEndpoint.askSync[Long](IncrementAndGetEpoch) epochEndpoint.askSync[Long](IncrementAndGetEpoch) assert(reader.next() == null) assert(reader.next() == null) assert(reader.next() == null) input.add(unsafeRow(11111)) input.add(unsafeRow(22222)) assert(reader.next().getInt(0) == 11111) assert(reader.next().getInt(0) == 22222) } test("new markers after rows") { val (input, reader) = setup() input.add(unsafeRow(11111)) input.add(unsafeRow(22222)) assert(reader.next().getInt(0) == 11111) assert(reader.next().getInt(0) == 22222) epochEndpoint.askSync[Long](IncrementAndGetEpoch) epochEndpoint.askSync[Long](IncrementAndGetEpoch) epochEndpoint.askSync[Long](IncrementAndGetEpoch) assert(reader.next() == null) assert(reader.next() == null) assert(reader.next() == null) } test("alternating markers and rows") { val (input, reader) = setup() input.add(unsafeRow(11111)) assert(reader.next().getInt(0) == 11111) input.add(unsafeRow(22222)) assert(reader.next().getInt(0) == 22222) epochEndpoint.askSync[Long](IncrementAndGetEpoch) assert(reader.next() == null) input.add(unsafeRow(33333)) assert(reader.next().getInt(0) == 33333) input.add(unsafeRow(44444)) assert(reader.next().getInt(0) == 44444) epochEndpoint.askSync[Long](IncrementAndGetEpoch) assert(reader.next() == null) } }
bdrillard/spark
sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousQueuedDataReaderSuite.scala
Scala
apache-2.0
5,584
package vexriscv.demo import vexriscv.plugin._ import vexriscv.{plugin, VexRiscv, VexRiscvConfig} import spinal.core._ /** * Created by spinalvm on 15.06.17. */ object GenSmallest extends App{ def cpu() = new VexRiscv( config = VexRiscvConfig( plugins = List( new IBusSimplePlugin( resetVector = 0x80000000l, cmdForkOnSecondStage = false, cmdForkPersistence = false, prediction = NONE, catchAccessFault = false, compressedGen = false ), new DBusSimplePlugin( catchAddressMisaligned = false, catchAccessFault = false ), new CsrPlugin(CsrPluginConfig.smallest), new DecoderSimplePlugin( catchIllegalInstruction = false ), new RegFilePlugin( regFileReadyKind = plugin.SYNC, zeroBoot = false ), new IntAluPlugin, new SrcPlugin( separatedAddSub = false, executeInsertion = false ), new LightShifterPlugin, new HazardSimplePlugin( bypassExecute = false, bypassMemory = false, bypassWriteBack = false, bypassWriteBackBuffer = false, pessimisticUseSrc = false, pessimisticWriteRegFile = false, pessimisticAddressMatch = false ), new BranchPlugin( earlyBranch = false, catchAddressMisaligned = false ), new YamlPlugin("cpu0.yaml") ) ) ) SpinalVerilog(cpu()) }
SpinalHDL/VexRiscv
src/main/scala/vexriscv/demo/GenSmallest.scala
Scala
mit
1,583
package spire package math import java.lang.Math import scala.math.{ScalaNumber, ScalaNumericConversions} import spire.algebra._ import spire.syntax.field._ import spire.syntax.isReal._ import spire.syntax.nroot._ object Quaternion extends QuaternionInstances { def i[@sp(Float, Double) A](implicit f: Rig[A]): Quaternion[A] = Quaternion(f.zero, f.one, f.zero, f.zero) def j[@sp(Float, Double) A](implicit f: Rig[A]): Quaternion[A] = Quaternion(f.zero, f.zero, f.one, f.zero) def k[@sp(Float, Double) A](implicit f: Rig[A]): Quaternion[A] = Quaternion(f.zero, f.zero, f.zero, f.one) def zero[@sp(Float, Double) A](implicit f: Semiring[A]): Quaternion[A] = Quaternion(f.zero, f.zero, f.zero, f.zero) def one[@sp(Float, Double) A](implicit f: Rig[A]): Quaternion[A] = Quaternion(f.one, f.zero, f.zero, f.zero) def apply[@sp(Float, Double) A](a: A)(implicit f: Semiring[A]): Quaternion[A] = Quaternion(a, f.zero, f.zero, f.zero) def apply[@sp(Float, Double) A](c: Complex[A])(implicit f: Semiring[A]): Quaternion[A] = Quaternion(c.real, c.imag, f.zero, f.zero) } // really a skew field private[math] trait QuaternionAlgebra[A] extends Field[Quaternion[A]] with Eq[Quaternion[A]] with NRoot[Quaternion[A]] with InnerProductSpace[Quaternion[A], A] with FieldAlgebra[Quaternion[A], A] { implicit def f: Fractional[A] implicit def t: Trig[A] implicit def r: IsReal[A] def eqv(x: Quaternion[A], y: Quaternion[A]): Boolean = x == y override def neqv(x: Quaternion[A], y: Quaternion[A]): Boolean = x != y override def minus(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a - b def negate(a: Quaternion[A]): Quaternion[A] = -a def one: Quaternion[A] = Quaternion.one[A] def plus(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a + b override def pow(a: Quaternion[A], b: Int): Quaternion[A] = a.pow(b) override def times(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a * b def zero: Quaternion[A] = Quaternion.zero[A] def div(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a / b def quot(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a /~ b def mod(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a % b def gcd(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = { @tailrec def _gcd(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = if (b.isZero) a else _gcd(b, a - (a / b).round * b) _gcd(a, b) } def nroot(a: Quaternion[A], k: Int): Quaternion[A] = a.nroot(k) override def sqrt(a: Quaternion[A]): Quaternion[A] = a.sqrt def fpow(a: Quaternion[A], b: Quaternion[A]): Quaternion[A] = a.fpow(b.r) //FIXME def timesl(a: A, q: Quaternion[A]): Quaternion[A] = q * a def dot(x: Quaternion[A], y: Quaternion[A]): A = x.dot(y) } trait QuaternionInstances { implicit def QuaternionAlgebra[A](implicit fr: Fractional[A], tr: Trig[A], isr: IsReal[A]): QuaternionAlgebra[A] = new QuaternionAlgebra[A] { val f = fr val t = tr val r = isr def scalar = f def nroot = f } } final case class Quaternion[@sp(Float, Double) A](r: A, i: A, j: A, k: A) extends ScalaNumber with ScalaNumericConversions with Serializable { lhs => // junky ScalaNumber stuff override def byteValue: Byte = longValue.toByte override def shortValue: Short = longValue.toShort def intValue: Int = longValue.toInt override def longValue: Long = anyToLong(r) def floatValue: Float = doubleValue.toFloat def doubleValue: Double = anyToDouble(r) private[this] def sillyIsReal: Boolean = anyIsZero(i) && anyIsZero(j) && anyIsZero(k) def underlying: Object = this def isWhole: Boolean = sillyIsReal && anyIsWhole(r) override final def isValidInt: Boolean = sillyIsReal && anyIsValidInt(r) // important to keep in sync with Complex[_] override def hashCode: Int = if (sillyIsReal) r.## else (19 * r.##) + (41 * i.##) + (13 * j.##) + (77 * k.##) + 97 // not typesafe, so this is the best we can do :( override def equals(that: Any): Boolean = that match { case that: Quaternion[_] => this === that case that: Complex[_] => r == that.real && i == that.imag && anyIsZero(j) && anyIsZero(k) case that => sillyIsReal && r == that } def ===(that: Quaternion[_]): Boolean = r == that.r && i == that.i && j == that.j && k == that.k def =!=(that: Quaternion[_]): Boolean = !(this === that) def isZero(implicit o: IsReal[A]): Boolean = r.isSignZero && i.isSignZero && j.isSignZero && k.isSignZero def isReal(implicit o: IsReal[A]): Boolean = i.isSignZero && j.isSignZero && k.isSignZero def isPure(implicit o: IsReal[A]): Boolean = r.isSignZero def real(implicit s: Semiring[A]): Quaternion[A] = Quaternion(r) def pure(implicit s: Semiring[A]): Quaternion[A] = Quaternion(s.zero, i, j, k) def abs(implicit f: Field[A], o: IsReal[A], n: NRoot[A]): A = (r.pow(2) + i.pow(2) + j.pow(2) + k.pow(2)).sqrt def pureAbs(implicit f: Field[A], o: IsReal[A], n: NRoot[A]): A = (i.pow(2) + j.pow(2) + k.pow(2)).sqrt def eqv(rhs: Quaternion[A])(implicit o: Eq[A]): Boolean = lhs.r === rhs.r && lhs.i === rhs.i && lhs.j === rhs.j && lhs.k === rhs.k def neqv(rhs: Quaternion[A])(implicit o: Eq[A]): Boolean = lhs.r =!= rhs.r && lhs.i =!= rhs.i && lhs.j =!= rhs.j && lhs.k =!= rhs.k override def toString: String = s"($r + ${i}i + ${j}j + ${k}k)" def toComplex: Complex[A] = Complex(r, i) def signum(implicit o: IsReal[A]): Int = r.signum match { case 0 => i.signum match { case 0 => j.signum match { case 0 => k.signum case n => n } case n => n } case n => n } def quaternionSignum(implicit f: Field[A], o: IsReal[A], n: NRoot[A]): Quaternion[A] = if (isZero) this else this / abs def pureSignum(implicit f: Field[A], o: IsReal[A], n: NRoot[A]): Quaternion[A] = if (isReal) Quaternion.zero[A] else (pure / pureAbs) def unary_-(implicit s: Rng[A]): Quaternion[A] = Quaternion(-r, -i, -j, -k) def conjugate(implicit s: Rng[A]): Quaternion[A] = Quaternion(r, -i, -j, -k) def reciprocal(implicit f: Field[A]): Quaternion[A] = conjugate / (r.pow(2) + i.pow(2) + j.pow(2) + k.pow(2)) def sqrt(implicit f: Field[A], o: IsReal[A], n0: NRoot[A]): Quaternion[A] = if (!isReal) { val n = (r + abs).sqrt Quaternion(n, i / n, j / n, k / n) / f.fromInt(2).sqrt } else if (r.signum >= 0) { Quaternion(r.sqrt) } else { Quaternion(f.zero, r.abs.sqrt, f.zero, f.zero) } def nroot(m: Int)(implicit f: Field[A], o: IsReal[A], n0: NRoot[A], tr: Trig[A]): Quaternion[A] = if (m <= 0) { throw new IllegalArgumentException(s"illegal root: $m") } else if (m == 1) { this } else if (!isReal) { val s = pureAbs val n = abs val t = acos(r / n) val v = Quaternion(f.zero, i / s, j / s, k / s) val e = if (sin(t).signum >= 0) v else -v val tm = t / m (e * sin(tm) + cos(tm)) * n.nroot(m) } else if (r.signum >= 0) { Quaternion(r.nroot(m)) } else { Quaternion(Complex(r).nroot(m)) } def unit(implicit f: Field[A], o: IsReal[A], n: NRoot[A]): Quaternion[A] = Quaternion(r.pow(2), i.pow(2), j.pow(2), k.pow(2)) / abs def +(rhs: A)(implicit s: Semiring[A]): Quaternion[A] = Quaternion(r + rhs, i, j, k) def +(rhs: Complex[A])(implicit s: Semiring[A]): Quaternion[A] = Quaternion(r + rhs.real, i + rhs.imag, j, k) def +(rhs: Quaternion[A])(implicit s: Semiring[A]): Quaternion[A] = Quaternion(lhs.r + rhs.r, lhs.i + rhs.i, lhs.j + rhs.j, lhs.k + rhs.k) def -(rhs: A)(implicit s: Rng[A]): Quaternion[A] = Quaternion(r - rhs, i, j, k) def -(rhs: Complex[A])(implicit s: Rng[A]): Quaternion[A] = Quaternion(r - rhs.real, i - rhs.imag, j, k) def -(rhs: Quaternion[A])(implicit s: Rng[A]): Quaternion[A] = Quaternion(lhs.r - rhs.r, lhs.i - rhs.i, lhs.j - rhs.j, lhs.k - rhs.k) def *(rhs: A)(implicit s: Semiring[A]): Quaternion[A] = Quaternion(r * rhs, i * rhs, j * rhs, k * rhs) def *(rhs: Complex[A])(implicit s: Rng[A]): Quaternion[A] = Quaternion( (r * rhs.real) - (i * rhs.imag), (r * rhs.imag) + (i * rhs.real), (j * rhs.real) + (k * rhs.imag), (j * rhs.imag) + (k * rhs.real) ) def *(rhs: Quaternion[A])(implicit s: Rng[A]): Quaternion[A] = Quaternion( (lhs.r * rhs.r) - (lhs.i * rhs.i) - (lhs.j * rhs.j) - (lhs.k * rhs.k), (lhs.r * rhs.i) + (lhs.i * rhs.r) + (lhs.j * rhs.k) - (lhs.k * rhs.j), (lhs.r * rhs.j) - (lhs.i * rhs.k) + (lhs.j * rhs.r) + (lhs.k * rhs.i), (lhs.r * rhs.k) + (lhs.i * rhs.j) - (lhs.j * rhs.i) + (lhs.k * rhs.r) ) def /(rhs: A)(implicit f: Field[A]): Quaternion[A] = Quaternion(r / rhs, i / rhs, j / rhs, k / rhs) def /(rhs: Complex[A])(implicit f: Field[A]): Quaternion[A] = lhs * Quaternion(rhs).reciprocal def /(rhs: Quaternion[A])(implicit f: Field[A]): Quaternion[A] = lhs * rhs.reciprocal def pow(k: Int)(implicit s: Ring[A]): Quaternion[A] = { @tailrec def loop(p: Quaternion[A], b: Quaternion[A], e: Int): Quaternion[A] = if (e == 0) p else if ((e & 1) == 1) loop(p * b, b * b, e >>> 1) else loop(p, b * b, e >>> 1) if (k >= 0) loop(Quaternion.one[A], this, k) else throw new IllegalArgumentException(s"illegal exponent: $k") } def **(k: Int)(implicit s: Ring[A]): Quaternion[A] = pow(k) def fpow(k0: A)(implicit f: Field[A], o: IsReal[A], n0: NRoot[A], tr: Trig[A]): Quaternion[A] = if (k0.signum < 0) { Quaternion.zero } else if (k0 == f.zero) { Quaternion.one } else if (k0 == f.one) { this } else if (!isReal) { val s = (i ** 2 + j ** 2 + k ** 2).sqrt val v = Quaternion(f.zero, i / s, j / s, k / s) val n = abs val t = acos(r / n) (Quaternion(cos(t * k0)) + v * sin(t * k0)) * n.fpow(k0) } else if (r.signum >= 0) { Quaternion(r.fpow(k0)) } else { Quaternion(Complex(r).pow(Complex(k0))) } def floor(implicit o: IsReal[A]): Quaternion[A] = Quaternion(r.floor, i.floor, j.floor, k.floor) def ceil(implicit o: IsReal[A]): Quaternion[A] = Quaternion(r.ceil, i.ceil, j.ceil, k.ceil) def round(implicit o: IsReal[A]): Quaternion[A] = Quaternion(r.round, i.round, j.round, k.round) // TODO: instead of floor for /~, should be round-toward-zero def /~(rhs: A)(implicit f: Field[A], o: IsReal[A]): Quaternion[A] = (lhs / rhs).floor def /~(rhs: Complex[A])(implicit f: Field[A], o: IsReal[A]): Quaternion[A] = (lhs / rhs).floor def /~(rhs: Quaternion[A])(implicit f: Field[A], o: IsReal[A]): Quaternion[A] = (lhs / rhs).floor def %(rhs: A)(implicit f: Field[A], o: IsReal[A]): Quaternion[A] = lhs - (lhs /~ rhs) def %(rhs: Complex[A])(implicit f: Field[A], o: IsReal[A]): Quaternion[A] = lhs - (lhs /~ rhs) def %(rhs: Quaternion[A])(implicit f: Field[A], o: IsReal[A]): Quaternion[A] = lhs - (lhs /~ rhs) def /%(rhs: A)(implicit f: Field[A], o: IsReal[A]): (Quaternion[A], Quaternion[A]) = { val q = lhs /~ rhs (q, lhs - q) } def /%(rhs: Complex[A])(implicit f: Field[A], o: IsReal[A]): (Quaternion[A], Quaternion[A]) = { val q = lhs /~ rhs (q, lhs - q) } def /%(rhs: Quaternion[A])(implicit f: Field[A], o: IsReal[A]): (Quaternion[A], Quaternion[A]) = { val q = lhs /~ rhs (q, lhs - q) } def dot(rhs: Quaternion[A])(implicit f: Field[A]): A = (lhs.conjugate * rhs + rhs.conjugate * lhs).r / f.fromInt(2) }
rklaehn/spire
core/shared/src/main/scala/spire/math/Quaternion.scala
Scala
mit
11,561
/* * Copyright 2010 LinkedIn * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.io._ import kafka.utils._ import kafka.message._ import kafka.common._ import junit.framework.TestCase import junit.framework.Assert._ import kafka.TestUtils import kafka.server.KafkaConfig class LogManagerTest extends TestCase { val time: MockTime = new MockTime() val maxLogAge = 1000 var logDir: File = null var logManager: LogManager = null var config:KafkaConfig = null override def setUp() { val props = TestUtils.createBrokerConfig(0, -1) config = new KafkaConfig(props) { override val logFileSize = 1024 override val enableZookeeper = false } logManager = new LogManager(config, null, time, -1, maxLogAge) logManager.startup logDir = logManager.logDir } override def tearDown() { logManager.close() Utils.rm(logDir) } def testCreateLog() { val log = logManager.getOrCreateLog("kafka", 0) log.append(TestUtils.singleMessageSet("test".getBytes())) } def testCleanup() { val log = logManager.getOrCreateLog("cleanup", 0) var offset = 0L for(i <- 0 until 1000) { var set = TestUtils.singleMessageSet("test".getBytes()) log.append(set) offset += set.sizeInBytes } assertTrue("There should be more than one segment now.", log.numberOfSegments > 1) time.currentMs += maxLogAge + 10000 logManager.cleanupLogs() assertEquals("Now there should only be only one segment.", 1, log.numberOfSegments) assertEquals("Should get empty fetch off new log.", 0L, log.read(offset, 1024).sizeInBytes) try { log.read(0, 1024) fail("Should get exception from fetching earlier.") } catch { case e: OffsetOutOfRangeException => "This is good." } // log should still be appendable log.append(TestUtils.singleMessageSet("test".getBytes())) } def testTimeBasedFlush() { val props = TestUtils.createBrokerConfig(0, -1) logManager.close Thread.sleep(100) config = new KafkaConfig(props) { override val logFileSize = 1024 *1024 *1024 override val enableZookeeper = false override val flushSchedulerThreadRate = 50 override val flushInterval = Int.MaxValue override val flushIntervalMap = Utils.getTopicFlushIntervals("timebasedflush:100") } logManager = new LogManager(config, null, time, -1, maxLogAge) logManager.startup val log = logManager.getOrCreateLog("timebasedflush", 0) for(i <- 0 until 200) { var set = TestUtils.singleMessageSet("test".getBytes()) log.append(set) } assertTrue("The last flush time has to be within defaultflushInterval of current time ", (System.currentTimeMillis - log.getLastFlushedTime) < 100) } def testConfigurablePartitions() { val props = TestUtils.createBrokerConfig(0, -1) logManager.close Thread.sleep(100) config = new KafkaConfig(props) { override val logFileSize = 256 override val enableZookeeper = false override val topicPartitionsMap = Utils.getTopicPartitions("testPartition:2") } logManager = new LogManager(config, null, time, -1, maxLogAge) logManager.startup for(i <- 0 until 2) { val log = logManager.getOrCreateLog("testPartition", i) for(i <- 0 until 250) { var set = TestUtils.singleMessageSet("test".getBytes()) log.append(set) } } try { val log = logManager.getOrCreateLog("testPartition", 2) assertTrue("Should not come here", log != null) } catch { case _ => } } }
jinfei21/kafka
test/unit/kafka/log/LogManagerTest.scala
Scala
apache-2.0
4,327
/* __ *\\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | | ** ** |/ ** \\* */ package scala package util.control /** Methods exported by this object implement tail calls via trampolining. * Tail calling methods have to return their result using `done` or call the * next method using `tailcall`. Both return a `TailRec` object. The result * of evaluating a tailcalling function can be retrieved from a `Tailrec` * value using method `result`. * Implemented as described in "Stackless Scala with Free Monads" * [[http://blog.higher-order.com/assets/trampolines.pdf]] * * Here's a usage example: * {{{ * import scala.util.control.TailCalls._ * * def isEven(xs: List[Int]): TailRec[Boolean] = * if (xs.isEmpty) done(true) else tailcall(isOdd(xs.tail)) * * def isOdd(xs: List[Int]): TailRec[Boolean] = * if (xs.isEmpty) done(false) else tailcall(isEven(xs.tail)) * * isEven((1 to 100000).toList).result * * def fib(n: Int): TailRec[Int] = * if (n < 2) done(n) else for { * x <- tailcall(fib(n - 1)) * y <- tailcall(fib(n - 2)) * } yield (x + y) * * fib(40).result * }}} */ object TailCalls { /** This class represents a tailcalling computation */ abstract class TailRec[+A] { /** Continue the computation with `f`. */ final def map[B](f: A => B): TailRec[B] = flatMap(a => Call(() => Done(f(a)))) /** Continue the computation with `f` and merge the trampolining * of this computation with that of `f`. */ final def flatMap[B](f: A => TailRec[B]): TailRec[B] = this match { case Done(a) => Call(() => f(a)) case c@Call(_) => Cont(c, f) // Take advantage of the monad associative law to optimize the size of the required stack case c: Cont[a1, b1] => Cont(c.a, (x: a1) => c.f(x) flatMap f) } /** Returns either the next step of the tailcalling computation, * or the result if there are no more steps. */ @annotation.tailrec final def resume: Either[() => TailRec[A], A] = this match { case Done(a) => Right(a) case Call(k) => Left(k) case Cont(a, f) => a match { case Done(v) => f(v).resume case Call(k) => Left(() => k().flatMap(f)) case Cont(b, g) => b.flatMap(x => g(x) flatMap f).resume } } /** Returns the result of the tailcalling computation. */ @annotation.tailrec final def result: A = this match { case Done(a) => a case Call(t) => t().result case Cont(a, f) => a match { case Done(v) => f(v).result case Call(t) => t().flatMap(f).result case Cont(b, g) => b.flatMap(x => g(x) flatMap f).result } } } /** Internal class representing a tailcall */ protected case class Call[A](rest: () => TailRec[A]) extends TailRec[A] /** Internal class representing the final result returned from a tailcalling * computation */ protected case class Done[A](value: A) extends TailRec[A] /** Internal class representing a continuation with function A => TailRec[B]. * It is needed for the flatMap to be implemented. */ protected case class Cont[A, B](a: TailRec[A], f: A => TailRec[B]) extends TailRec[B] /** Performs a tailcall * @param rest the expression to be evaluated in the tailcall * @return a `TailRec` object representing the expression `rest` */ def tailcall[A](rest: => TailRec[A]): TailRec[A] = Call(() => rest) /** Used to return final result from tailcalling computation * @param `result` the result value * @return a `TailRec` object representing a computation which immediately * returns `result` */ def done[A](result: A): TailRec[A] = Done(result) }
rorygraves/perf_tester
corpus/scala-library/src/main/scala/util/control/TailCalls.scala
Scala
apache-2.0
4,156
package lila.coach import play.api.libs.json._ final class JsonView(jsonWriters: JSONWriters) { import jsonWriters._ def opening(period: Period, color: chess.Color): Fu[JsObject] = fuccess { val stat = period.data val openings = stat.openings(color).trim Json.obj( "from" -> period.from, "to" -> period.to, "color" -> color.name, "results" -> stat.results.base, "colorResults" -> stat.colorResults(color), "openings" -> openings.m.map { case (eco, results) => eco -> Json.obj( "opening" -> EcopeningDB.allByEco.get(eco), "results" -> results) }, "openingResults" -> openings.results, "families" -> Ecopening.makeFamilies { openings.m.keys.flatMap(EcopeningDB.allByEco.get) }.values.toList.sortBy(-_.ecos.size).map { fam => Json.obj( "family" -> fam, "results" -> fam.ecos.flatMap(openings.m.get).foldLeft(Results.empty) { (res, oRes) => res merge oRes }) } ) } def move(period: Period): Fu[JsObject] = fuccess { val stat = period.data Json.obj( "from" -> period.from, "to" -> period.to, "perfs" -> (Json.obj( "perf" -> Json.obj( "key" -> "global", "name" -> "Global", "icon" -> "C"), "results" -> stat.results ) :: lila.rating.PerfType.nonPuzzle.flatMap { pt => stat.perfResults.m.get(pt) map { results => Json.obj( "perf" -> pt, "results" -> results) } }) ) } }
bjhaid/lila
modules/coach/src/main/JsonView.scala
Scala
mit
1,596
package uk.gov.gds.ier.transaction.overseas.parentsAddress import uk.gov.gds.ier.serialiser.WithSerialiser import uk.gov.gds.ier.step.StepTemplate import uk.gov.gds.ier.service.WithAddressService import uk.gov.gds.ier.model.Addresses import uk.gov.gds.ier.transaction.overseas.{InprogressOverseas, WithOverseasControllers} import uk.gov.gds.ier.model.PossibleAddress trait ParentsAddressSelectMustache extends StepTemplate[InprogressOverseas] { self:WithAddressService with WithOverseasControllers with WithSerialiser => val title = "What was your parent or guardian's last UK address?" val questionNumber = "" case class SelectModel ( question: Question, lookupUrl: String, manualUrl: String, postcode: Field, address: Field, possibleJsonList: Field, possiblePostcode: Field, hasAddresses: Boolean, hasAuthority: Boolean ) extends MustacheData val mustache = MustacheTemplate("overseas/parentsAddressSelect") { (form, post) => implicit val progressForm = form val selectedUprn = form(keys.parentsAddress.uprn).value val postcode = form(keys.parentsAddress.postcode).value.orElse { form(keys.possibleAddresses.postcode).value } val storedAddresses = for( jsonList <- form(keys.possibleAddresses.jsonList).value; postcode <- postcode ) yield { PossibleAddress( jsonList = serialiser.fromJson[Addresses](jsonList), postcode = postcode ) } //IER0091 : Temp removing the storedAddresses section of the code checks to remove populating against the hidden input field //val maybeAddresses = storedAddresses orElse lookupAddresses(postcode) val maybeAddresses = lookupAddresses(postcode) val options = maybeAddresses.map { possibleAddress => possibleAddress.jsonList.addresses }.getOrElse(List.empty).map { address => SelectOption( value = address.uprn.getOrElse(""), text = address.addressLine.getOrElse(""), selected = if (address.uprn == selectedUprn) { "selected=\\"selected\\"" } else "" ) } val hasAddresses = maybeAddresses.exists { poss => !poss.jsonList.addresses.isEmpty } val hasAuthority = hasAddresses || addressService.validAuthority(postcode) val addressSelect = SelectField( key = keys.parentsAddress.uprn, optionList = options, default = SelectOption( value = "", text = s"${options.size} addresses found" ) ) val addressSelectWithError = addressSelect.copy( classes = if (!hasAddresses) { "invalid" } else { addressSelect.classes } ) SelectModel( question = Question( postUrl = post.url, number = questionNumber, title = title, errorMessages = progressForm.globalErrors.map(_.message) ), lookupUrl = overseas.ParentsAddressStep.routing.get.url, manualUrl = overseas.ParentsAddressManualStep.routing.get.url, postcode = TextField(keys.parentsAddress.postcode), address = addressSelectWithError, possibleJsonList = TextField(keys.possibleAddresses.jsonList).copy( value = maybeAddresses.map { poss => serialiser.toJson(poss.jsonList) }.getOrElse("") ), possiblePostcode = TextField(keys.possibleAddresses.postcode).copy( value = form(keys.parentsAddress.postcode).value.getOrElse("") ), hasAddresses = hasAddresses, hasAuthority = hasAuthority ) } private[parentsAddress] def lookupAddresses( maybePostcode:Option[String]): Option[PossibleAddress] = { maybePostcode.map { postcode => val addresses = addressService.lookupPartialAddress(postcode) PossibleAddress( jsonList = Addresses(addresses), postcode = postcode ) } } }
michaeldfallen/ier-frontend
app/uk/gov/gds/ier/transaction/overseas/parentsAddress/ParentsAddressSelectMustache.scala
Scala
mit
3,889
/* * Artificial Intelligence for Humans * Volume 1: Fundamental Algorithms * Scala Version * http://www.aifh.org * http://www.jeffheaton.com * * Code repository: * https://github.com/jeffheaton/aifh * Copyright 2013 by Jeff Heaton * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For more information on Heaton Research copyrights, licenses * and trademarks visit: * http://www.heatonresearch.com/copyright */ package com.heatonresearch.aifh.general.data import scala.collection.mutable.ArrayBuffer import scala.language.implicitConversions /** * Support class for adding methods to our mutable and immutable vector types. * Importing RichData._ into a file will add methods to Vector and ArrayBuffer */ object RichData { implicit def toRichVector(v : Vector[Double]) : RichVector = { new RichVector(v) } implicit def toRichBuffer[T](b : ArrayBuffer[T]) : RichArrayBuffer[T] = new RichArrayBuffer(b) } class RichVector(data :Vector[Double]) { /** * Return the index that has the max value. * * @return The index. */ def maxIndex : Int = { var result: Int = -1 var max = Double.NegativeInfinity (0 until data.length) foreach { i => if (data(i) > max) { max = data(i) result = i } } result } def swap(p1 : Int,p2 : Int) : Vector[Double] = { val temp1 = data(p1) val temp2 = data(p2) data.updated(p1,temp2).updated(p2,temp1) } } class RichArrayBuffer[T](buffer : ArrayBuffer[T]) { def set(values : Seq[T]) { buffer.clear() buffer ++= values } def updateValues(f : (Int,T) => T) { for(i <- 0 until buffer.length) buffer(i) = f(i,buffer(i)) } def setFrom(other : IndexedSeq[T],otherStart : Int,thisStart : Int,noElements : Int) { for(i <- 0 until noElements) buffer(thisStart + i) = other(otherStart + i) } }
HairyFotr/aifh
vol1/scala-examples/src/main/scala/com/heatonresearch/aifh/general/data/RichData.scala
Scala
apache-2.0
2,379
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ui.scope import scala.collection.mutable import org.apache.spark.SparkConf import org.apache.spark.scheduler._ import org.apache.spark.ui.SparkUI /** * A SparkListener that constructs a DAG of RDD operations. */ private[ui] class RDDOperationGraphListener(conf: SparkConf) extends SparkListener { // Note: the fate of jobs and stages are tied. This means when we clean up a job, // we always clean up all of its stages. Similarly, when we clean up a stage, we // always clean up its job (and, transitively, other stages in the same job). private[ui] val jobIdToStageIds = new mutable.HashMap[Int, Seq[Int]] private[ui] val jobIdToSkippedStageIds = new mutable.HashMap[Int, Seq[Int]] private[ui] val stageIdToJobId = new mutable.HashMap[Int, Int] private[ui] val stageIdToGraph = new mutable.HashMap[Int, RDDOperationGraph] private[ui] val completedStageIds = new mutable.HashSet[Int] // Keep track of the order in which these are inserted so we can remove old ones private[ui] val jobIds = new mutable.ArrayBuffer[Int] private[ui] val stageIds = new mutable.ArrayBuffer[Int] // How many jobs or stages to retain graph metadata for private val retainedJobs = conf.getInt("spark.ui.retainedJobs", SparkUI.DEFAULT_RETAINED_JOBS) private val retainedStages = conf.getInt("spark.ui.retainedStages", SparkUI.DEFAULT_RETAINED_STAGES) /** * Return the graph metadata for all stages in the given job. * An empty list is returned if one or more of its stages has been cleaned up. */ def getOperationGraphForJob(jobId: Int): Seq[RDDOperationGraph] = synchronized { val skippedStageIds = jobIdToSkippedStageIds.getOrElse(jobId, Seq.empty) val graphs = jobIdToStageIds.getOrElse(jobId, Seq.empty) .flatMap { sid => stageIdToGraph.get(sid) } // Mark any skipped stages as such graphs.foreach { g => val stageId = g.rootCluster.id.replaceAll(RDDOperationGraph.STAGE_CLUSTER_PREFIX, "").toInt if (skippedStageIds.contains(stageId) && !g.rootCluster.name.contains("skipped")) { g.rootCluster.setName(g.rootCluster.name + " (skipped)") } } graphs } /** Return the graph metadata for the given stage, or None if no such information exists. */ def getOperationGraphForStage(stageId: Int): Option[RDDOperationGraph] = synchronized { stageIdToGraph.get(stageId) } /** On job start, construct a RDDOperationGraph for each stage in the job for display later. */ override def onJobStart(jobStart: SparkListenerJobStart): Unit = synchronized { val jobId = jobStart.jobId val stageInfos = jobStart.stageInfos jobIds += jobId jobIdToStageIds(jobId) = jobStart.stageInfos.map(_.stageId).sorted stageInfos.foreach { stageInfo => val stageId = stageInfo.stageId stageIds += stageId stageIdToJobId(stageId) = jobId stageIdToGraph(stageId) = RDDOperationGraph.makeOperationGraph(stageInfo) trimStagesIfNecessary() } trimJobsIfNecessary() } /** Keep track of stages that have completed. */ override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = synchronized { val stageId = stageCompleted.stageInfo.stageId if (stageIdToJobId.contains(stageId)) { // Note: Only do this if the stage has not already been cleaned up // Otherwise, we may never clean this stage from `completedStageIds` completedStageIds += stageCompleted.stageInfo.stageId } } /** On job end, find all stages in this job that are skipped and mark them as such. */ override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = synchronized { val jobId = jobEnd.jobId jobIdToStageIds.get(jobId).foreach { stageIds => val skippedStageIds = stageIds.filter { sid => !completedStageIds.contains(sid) } // Note: Only do this if the job has not already been cleaned up // Otherwise, we may never clean this job from `jobIdToSkippedStageIds` jobIdToSkippedStageIds(jobId) = skippedStageIds } } /** Clean metadata for old stages if we have exceeded the number to retain. */ private def trimStagesIfNecessary(): Unit = { if (stageIds.size >= retainedStages) { val toRemove = math.max(retainedStages / 10, 1) stageIds.take(toRemove).foreach { id => cleanStage(id) } stageIds.trimStart(toRemove) } } /** Clean metadata for old jobs if we have exceeded the number to retain. */ private def trimJobsIfNecessary(): Unit = { if (jobIds.size >= retainedJobs) { val toRemove = math.max(retainedJobs / 10, 1) jobIds.take(toRemove).foreach { id => cleanJob(id) } jobIds.trimStart(toRemove) } } /** Clean metadata for the given stage, its job, and all other stages that belong to the job. */ private[ui] def cleanStage(stageId: Int): Unit = { completedStageIds.remove(stageId) stageIdToGraph.remove(stageId) stageIdToJobId.remove(stageId).foreach { jobId => cleanJob(jobId) } } /** Clean metadata for the given job and all stages that belong to it. */ private[ui] def cleanJob(jobId: Int): Unit = { jobIdToSkippedStageIds.remove(jobId) jobIdToStageIds.remove(jobId).foreach { stageIds => stageIds.foreach { stageId => cleanStage(stageId) } } } }
gioenn/xSpark
core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraphListener.scala
Scala
apache-2.0
6,093
/* * Copyright 2016 Coursera Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.coursera.naptime.actions import akka.stream.Materializer import akka.util.ByteString import com.typesafe.scalalogging.StrictLogging import org.coursera.naptime.model.KeyFormat import org.coursera.naptime.NaptimeActionException import org.coursera.naptime.RestError import org.coursera.naptime.access.HeaderAccessControl import play.api.http.Status import play.api.libs.json.JsArray import play.api.libs.json.JsError import play.api.libs.json.JsObject import play.api.libs.json.JsSuccess import play.api.libs.json.JsValue import play.api.libs.json.Json import play.api.libs.json.OFormat import play.api.libs.json.Reads import play.api.libs.streams.Accumulator import play.api.mvc.BodyParser import play.api.mvc.BodyParsers import play.api.mvc.RequestHeader import play.api.mvc.Result import scala.concurrent.ExecutionContext import scala.util.control.NonFatal /** * A builder that helps build Rest Actions. */ class RestActionBuilder[RACType, AuthType, BodyType, ResourceKeyType, ResourceType, ResponseType]( auth: HeaderAccessControl[AuthType], bodyParser: BodyParser[BodyType], errorHandler: PartialFunction[Throwable, RestError])( implicit keyFormat: KeyFormat[ResourceKeyType], resourceFormat: OFormat[ResourceType], ec: ExecutionContext, mat: Materializer) extends RestActionBuilderTerminators[ RACType, AuthType, BodyType, ResourceKeyType, ResourceType, ResponseType] { /** * Set the body type. */ def body[NewBodyType](bodyParser: BodyParser[NewBodyType]): DefinedBodyTypeRestActionBuilder[ RACType, AuthType, NewBodyType, ResourceKeyType, ResourceType, ResponseType] = new DefinedBodyTypeRestActionBuilder(auth, bodyParser, errorHandler) /** * Set the authentication framework. */ def auth[NewAuthType](auth: HeaderAccessControl[NewAuthType]): RestActionBuilder[ RACType, NewAuthType, BodyType, ResourceKeyType, ResourceType, ResponseType] = new RestActionBuilder(auth, bodyParser, errorHandler) /** * Adds an error handling function to allow exceptions to generate custom errors. * * Note: all of the partial functions are stacked, with later functions getting an earlier crack * at an exception to handle it. * * @param errorHandler Error handling partial function. * @return the immutable RestActionBuilder to be used to build the naptime resource action. */ def catching(errorHandler: PartialFunction[Throwable, RestError]) : RestActionBuilder[RACType, AuthType, BodyType, ResourceKeyType, ResourceType, ResponseType] = new RestActionBuilder(auth, bodyParser, errorHandler.orElse(this.errorHandler)) /** * Set the response type. * TODO: is this necessary? */ def returning[NewResponseType](): RestActionBuilder[ RACType, AuthType, BodyType, ResourceKeyType, ResourceType, NewResponseType] = new RestActionBuilder(auth, bodyParser, errorHandler) def rawJsonBody(maxLength: Int = 100 * 1024) = body(BodyParsers.parse.tolerantJson(maxLength)) def jsonBody[NewBodyType](implicit reads: Reads[NewBodyType]): DefinedBodyTypeRestActionBuilder[ RACType, AuthType, NewBodyType, ResourceKeyType, ResourceType, ResponseType] = { jsonBody[NewBodyType]() } def jsonBody[NewBodyType](maxLength: Int = 100 * 1024)( implicit reads: Reads[NewBodyType]): DefinedBodyTypeRestActionBuilder[ RACType, AuthType, NewBodyType, ResourceKeyType, ResourceType, ResponseType] = { val parser: BodyParser[NewBodyType] = new BodyParser[NewBodyType] with StrictLogging { override def apply( rh: RequestHeader): Accumulator[ByteString, Either[Result, NewBodyType]] = { val innerParser = BodyParsers.parse.tolerantJson(maxLength) innerParser(rh).map(_.right.map(toJsObj).joinRight) } private[this] def toJsObj(js: JsValue): Either[Result, NewBodyType] = { try { js.validate[NewBodyType] match { case JsSuccess(obj, _) => Right(obj) case JsError(parseErrors) => val errorDetails = JsObject(for { (path, errors) <- parseErrors } yield { path.toString -> JsArray(for { error <- errors } yield { Json.obj("message" -> error.message, "args" -> error.args.map(_.toString)) }) }) val response = NaptimeActionException( httpCode = Status.BAD_REQUEST, errorCode = None, message = Some("JSON didn't validate"), details = Some(errorDetails)) Left(response.result) } } catch { case e: IllegalArgumentException => logger.info(s"Request failed validation.", e) val resp = NaptimeActionException( httpCode = Status.BAD_REQUEST, errorCode = Some("request.validation"), message = Some(e.getMessage), details = None, cause = Some(e)) Left(resp.result) case NonFatal(e) => logger.error(s"Unknown exception while parsing body of request.", e) throw e } } } body(parser) } override protected def bodyBuilder[Category, Response](): BodyBuilder[Category, Response] = { new RestActionBodyBuilder[ Category, AuthType, BodyType, ResourceKeyType, ResourceType, Response](auth, bodyParser, errorHandler) } }
coursera/naptime
naptime/src/main/scala/org/coursera/naptime/actions/RestActionBuilder.scala
Scala
apache-2.0
6,245
package io.surfkit.elasticsearch import java.io.IOException import java.net.URLEncoder import java.util.UUID import akka.Done import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding import akka.http.scaladsl.model.HttpHeader.ParsingResult import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model._ import akka.http.scaladsl.model.ws._ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream._ import akka.stream.scaladsl._ import play.api.libs.json._ import scala.concurrent.{Future, Await, Promise} import scala.util.{Failure, Success} import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.FiniteDuration import akka.http.scaladsl.model.headers._ /** * Created by coreyauger */ // TODO: take a look at pipelining-limit = 1 object ESClient{ /*def buildRequest(gremlin: String, bindings: Map[String,String] = Map.empty[String, String] ) = { Gremlin.Request( requestId = UUID.randomUUID, op = Gremlin.Op.eval, processor = "", args = Gremlin.Args( gremlin = gremlin, bindings = bindings, language = Gremlin.Language.`gremlin-groovy` ) ) }*/ val reportFailure:scala.PartialFunction[scala.Throwable, Unit] = { case t:Throwable => println("[ERROR]: " + t.getMessage) t.printStackTrace() } } class ESClient(host:String = "localhost", port: Int = 9200, responder:Option[ActorRef] = None, implicit val system: ActorSystem = ActorSystem()) { import ES._ private[this] val decider: Supervision.Decider = { case _ => Supervision.Resume } implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withSupervisionStrategy(decider)) private[this] val poolClientFlow = Http().cachedHostConnectionPool[Promise[HttpResponse]](host = host, port = port) private[this] val queue = Source.queue[(HttpRequest, Promise[HttpResponse])](500000, OverflowStrategy.backpressure) .via(poolClientFlow) .toMat(Sink.foreach({ case ((Success(resp), p)) => p.success(resp) case ((Failure(e), p)) => p.failure(e) }))(Keep.left) .run def shutdown = { Http().shutdownAllConnectionPools() } def request(req: HttpRequest = HttpRequest(uri = "/")):Future[String] = { println(s"~requesting: ${req.uri}") val promise = Promise[HttpResponse] val request = req -> promise val response = queue.offer(request).flatMap(buffered => { if (buffered == QueueOfferResult.Enqueued) promise.future else Future.failed(new RuntimeException("Failed to queue elastic search request.")) }) response.flatMap{ r => r.status match { case OK => Unmarshal(r.entity).to[String] case Created => Unmarshal(r.entity).to[String] case _ => Unmarshal(r.entity).to[String].flatMap { entity => val error = s"[ERROR] - ${host}:${port} HTTP request to (${req.uri}) failed with status code (${r.status}) and entity '$entity'" println(error) Future.failed(new IOException(error)) } } } } def header(key: String, value: String): Option[HttpHeader] = HttpHeader.parse(key, value) match { case ParsingResult.Ok(header, errors) ⇒ Option(header) case _ ⇒ None } def headers(headersMap: Map[String, String]): List[HttpHeader] = headersMap.flatMap { case (key, value) ⇒ header(key, value) }.toList def mkEntity(body: String): HttpEntity.Strict = HttpEntity(ContentTypes.`application/json`, body) def mkRequest(requestBuilder: RequestBuilding#RequestBuilder, url: String, body: String = "", queryParamsMap: Map[String, String] = Map.empty, headersMap: Map[String, String] = Map.empty) = requestBuilder(url + queryString(queryParamsMap), mkEntity(body)).withHeaders(headersMap.map{ case (n, v) => RawHeader(n, v) }.to[collection.immutable.Seq] ) def queryString(p:Map[String, String]):String = p.headOption.map(_ => "?").getOrElse("") + p.map(x => s"${x._1}=${URLEncoder.encode(x._2,"UTF-8")}").mkString("&") def api[T <: ES.ESResponse](req: HttpRequest)(implicit fjs: Reads[T]):Future[T] = request(req).map{s => fjs.reads(Json.parse(s)) match{ case x:JsSuccess[T] => x.get case x:JsError => println(s"[ERROR] ES parse json: for request(${req}) ${s}") println(s"[ERROR] ES parse error for request(${req}): ${x.errors}") throw new RuntimeException(s"[ERROR] ES parse error: ${x.errors}") } } def health(params: Map[String, String] = Map.empty[String, String]):Future[ES.Health] = api[ES.Health](mkRequest(RequestBuilding.Get, "/_cluster/health", "", params)) def search(index: String = "", `type`: String = "", body: JsValue, params: Map[String, String] = Map.empty[String, String]):Future[ES.Search] = { val uri = List(index, `type`, "_search").mkString("/","/","") api[ES.Search](mkRequest(RequestBuilding.Post, uri, body.toString, params)) } def searchLite(index: String = "", `type`: String = "", query: String, params: Map[String, String] = Map.empty[String, String]):Future[ES.Search] = { val uri = List(index, `type`, "_search").mkString("/","/","") api[ES.Search](mkRequest(RequestBuilding.Get, uri, "", params + ("q" -> query) )) } def get(index: String = "", `type`: String = "", id: String, params: Map[String, String] = Map.empty[String, String]):Future[ES.IndexLookup] = { val uri = List(index, `type`, id).mkString("/","/","") api[ES.IndexLookup](mkRequest(RequestBuilding.Get, uri, "", params )) } // TODO: what about custom mappings... // https://www.elastic.co/guide/en/elasticsearch/guide/current/mapping-intro.html def indexJs(index: String, `type`: String, id: String, json: JsValue, params: Map[String, String] = Map.empty[String, String]):Future[ES.IndexCreate] = this.index(index, `type`, id, json.toString, params) def index(index: ES.Index, json: JsValue, params: Map[String, String]):Future[ES.IndexCreate] = this.index(index._index, index._type, index._id, json.toString, params) def index(index: String, `type`: String, id: String, json: String, params: Map[String, String] = Map.empty[String, String]):Future[ES.IndexCreate] = { val uri = List(index, `type`, id).filter(_ != "").mkString("/","/","") api[ES.IndexCreate](mkRequest(RequestBuilding.Put, uri, json, params)) } def putMappingJs(index: String, `type`: String, json: JsValue, params: Map[String, String] = Map.empty[String, String]):Future[ES.Ack] = this.putMapping(index, `type`, json.toString, params) def putMapping(index: ES.Index, json: JsValue, params: Map[String, String]):Future[ES.Ack] = this.putMapping(index._index, index._type, json.toString, params) def putMapping(index: String, `type`: String, json: String, params: Map[String, String] = Map.empty[String, String]):Future[ES.Ack] = { val uri = List(index, `type`, "_mapping").filter(_ != "").mkString("/","/","") println(s"PUT MAPPING - ${json}") api[ES.Ack](mkRequest(RequestBuilding.Put, uri, json, params)) } def putIndex(index: String):Future[ES.Ack] = { val uri = List(index).filter(_ != "").mkString("/","/","") api[ES.Ack](mkRequest(RequestBuilding.Put, uri)) } def delete(index: ES.Index, params: Map[String, String]):Future[ES.Delete] = this.delete(index._index, index._type, index._id, params) def delete(index: String, `type`: String = "", id: String = "", params: Map[String, String] = Map.empty[String, String]):Future[ES.Delete] = { val uri = List(index, `type`, id).filter(_ != "").mkString("/","/","") api[ES.Delete](mkRequest(RequestBuilding.Delete, uri, "", params)) } def analyze(analyzer: String = "standard", text: String, params: Map[String, String] = Map.empty[String, String]): Future[ES.Tokens] = api[ES.Tokens](mkRequest(RequestBuilding.Get, "/_analyze", "", params ++ Map("analyzer" -> analyzer, "text" -> text))) def getMapping(index: String, `type`: String, params: Map[String, String] = Map.empty[String, String]):Future[JsValue] = { val uri = List(index, `type`, "_mapping").mkString("/","/","") request(mkRequest(RequestBuilding.Get, uri, "", params)).map(x => Json.parse(x)) } def bulk(index: String = "", `type`: String, bulk: ES.Bulk.Request, params: Map[String, String] = Map.empty[String, String]):Future[JsValue] = { val uri = List(index, `type`, "_bulk").mkString("/","/","") val req = mkRequest(RequestBuilding.Post, uri, ES.Bulk.FormatJson(bulk), params) request(req).map(x => Json.parse(x)) } }
coreyauger/reactive-elastic-search
src/main/scala/io/surfkit/elasticsearch/ESClient.scala
Scala
mit
8,692
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.apis.java import org.apache.parquet.hadoop.metadata.CompressionCodecName import org.apache.spark.api.java.JavaRDD import org.bdgenomics.adam.models.{ RecordGroupDictionary, SequenceDictionary } import org.bdgenomics.adam.rdd.ADAMContext._ import org.bdgenomics.adam.rdd.ADAMSaveAnyArgs import org.bdgenomics.formats.avro._ private class JavaSaveArgs(var outputPath: String, var blockSize: Int = 128 * 1024 * 1024, var pageSize: Int = 1 * 1024 * 1024, var compressionCodec: CompressionCodecName = CompressionCodecName.GZIP, var disableDictionaryEncoding: Boolean = false, var asSingleFile: Boolean = false) extends ADAMSaveAnyArgs { var sortFastqOutput = false } class JavaAlignmentRecordRDD(val jrdd: JavaRDD[AlignmentRecord], val sd: SequenceDictionary, val rgd: RecordGroupDictionary) extends Serializable { /** * Saves this RDD to disk as a Parquet file. * * @param filePath Path to save the file at. * @param blockSize Size per block. * @param pageSize Size per page. * @param compressCodec Name of the compression codec to use. * @param disableDictionaryEncoding Whether or not to disable bit-packing. */ def adamSave( filePath: java.lang.String, blockSize: java.lang.Integer, pageSize: java.lang.Integer, compressCodec: CompressionCodecName, disableDictionaryEncoding: java.lang.Boolean) { jrdd.rdd.saveAsParquet( new JavaSaveArgs(filePath, blockSize = blockSize, pageSize = pageSize, compressionCodec = compressCodec, disableDictionaryEncoding = disableDictionaryEncoding), sd, rgd) } /** * Saves this RDD to disk as a Parquet file. * * @param filePath Path to save the file at. */ def adamSave( filePath: java.lang.String) { jrdd.rdd.saveAsParquet( new JavaSaveArgs(filePath), sd, rgd) } /** * Saves this RDD to disk as a SAM/BAM file. * * @param filePath Path to save the file at. * @param sd A dictionary describing the contigs this file is aligned against. * @param rgd A dictionary describing the read groups in this file. * @param asSam If true, saves as SAM. If false, saves as BAM. * @param asSingleFile If true, saves output as a single file. * @param isSorted If the output is sorted, this will modify the header. */ def adamSAMSave( filePath: java.lang.String, asSam: java.lang.Boolean, asSingleFile: java.lang.Boolean, isSorted: java.lang.Boolean) { jrdd.rdd.adamSAMSave(filePath, sd, rgd, asSam = asSam, asSingleFile = asSingleFile, isSorted = isSorted) } }
rnpandya/adam
adam-apis/src/main/scala/org/bdgenomics/adam/apis/java/JavaAlignmentRecordRDD.scala
Scala
apache-2.0
3,632
package cs.ucla.edu.bwaspark.datatype class ExtParam() { var leftQs: Array[Byte] = _ var leftQlen: Int = -1 var leftRs: Array[Byte] = _ var leftRlen: Int = -1 var rightQs: Array[Byte] = _ var rightQlen: Int = -1 var rightRs: Array[Byte] = _ var rightRlen: Int = -1 var w: Int = -1 var mat: Array[Byte] = _ var oDel: Int = -1 var eDel: Int = -1 var oIns: Int = -1 var eIns: Int = -1 var penClip5: Int = -1 var penClip3: Int = -1 var zdrop: Int = -1 var h0: Int = -1 var regScore: Int = -1 var qBeg: Int = -1; //var rBeg: Long = -1l; //var qe: Int = -1; //var re: Long = -1l; var idx: Int = -1 //var rmax0: Long = -1l def display() { println("leftQlen: " + leftQlen) if (leftQlen > 0) leftQs.foreach(ele => {print(ele + " ")}) println() println("leftRlen: " + leftRlen) if (leftRlen > 0) leftRs.foreach(ele => {print(ele + " ")}) println() println("rightQlen: " + rightQlen) if (rightQlen > 0 ) rightQs.foreach(ele => {print(ele + " ")}) println() println("rightRlen: " + rightRlen) if (rightRlen > 0) rightRs.foreach(ele => {print(ele + " ")}) println() println("w: " + w) println("oDel: " + oDel) println("eDel: " + eDel) println("oIns: " + oIns) println("eIns: " + eIns) println("penClip5: " + penClip5) println("penClip3: " + penClip3) println("zdrop: " + zdrop) println("h0: " + h0) println("regScore: " + regScore) println("qBeg: " + qBeg) //println("rBeg: " + rBeg) //println("qe: " + qe) //println("re: " + re) println("idx: " + idx) //println("rmax0: " + rmax0) } } class ExtRet() { var qBeg: Int = -1 var rBeg: Long = -1 var qEnd: Int = -1 var rEnd: Long = -1 var score: Int = -1 var trueScore: Int = -1 var width: Int = -1 var idx: Int = -1 def display() { println("qBeg: " + qBeg) println("rBeg: " + rBeg) println("qEnd: " + qEnd) println("rEnd: " + rEnd) println("score: " + score) println("trueScore: " + trueScore) println("width: " + width) println("idx: " + idx) } }
peterpengwei/bwa-spark-fpga
src/main/scala/cs/ucla/edu/bwaspark/datatype/ExtensionParameters.scala
Scala
gpl-2.0
2,001
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.scala.examples import java.io.File import org.apache.commons.io.FileUtils import org.apache.flink.core.fs.FileSystem.WriteMode import org.apache.flink.streaming.api.TimeCharacteristic import org.apache.flink.streaming.api.scala._ import org.apache.flink.streaming.scala.examples.join.WindowJoin import org.apache.flink.streaming.scala.examples.join.WindowJoin.{Grade, Salary} import org.apache.flink.streaming.test.examples.join.WindowJoinData import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase import org.apache.flink.test.util.TestBaseUtils import org.junit.Test class WindowJoinITCase extends StreamingMultipleProgramsTestBase { @Test def testProgram(): Unit = { val resultPath: String = File.createTempFile("result-path", "dir").toURI().toString() try { val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime) val grades: DataStream[Grade] = env .fromCollection(WindowJoinData.GRADES_INPUT.split("\n")) .map( line => { val fields = line.split(",") Grade(fields(1), fields(2).toInt) }) val salaries: DataStream[Salary] = env .fromCollection(WindowJoinData.SALARIES_INPUT.split("\n")) .map( line => { val fields = line.split(",") Salary(fields(1), fields(2).toInt) }) WindowJoin.joinStreams(grades, salaries, 100) .writeAsText(resultPath, WriteMode.OVERWRITE) env.execute() TestBaseUtils.checkLinesAgainstRegexp(resultPath, "^Person\\([a-z]+,(\\d),(\\d)+\\)") } finally { try { FileUtils.deleteDirectory(new File(resultPath)) } catch { case _ : Throwable => } } } }
WangTaoTheTonic/flink
flink-examples/flink-examples-streaming/src/test/scala/org/apache/flink/streaming/scala/examples/WindowJoinITCase.scala
Scala
apache-2.0
2,671
package com.twitter.finagle.mysql import com.twitter.finagle._ import com.twitter.finagle.param.Stats import com.twitter.finagle.stats.StatsReceiver import com.twitter.logging.Logger import com.twitter.util.{Future, Return, Stopwatch, Throw, Time} object RollbackFactory { private val RollbackQuery = QueryRequest("ROLLBACK") private val log = Logger.get() val Role: Stack.Role = Stack.Role("RollbackFactory") private[finagle] def module: Stackable[ServiceFactory[Request, Result]] = new Stack.Module1[Stats, ServiceFactory[Request, Result]] { val role: Stack.Role = Role val description: String = "Installs a rollback factory in the stack" def make( sr: Stats, next: ServiceFactory[Request, Result] ): ServiceFactory[Request, Result] = { new RollbackFactory(next, sr.statsReceiver) } } } /** * A `ServiceFactory` that ensures a ROLLBACK statement is issued when a service is put * back into the connection pool. * * @see https://dev.mysql.com/doc/en/implicit-commit.html */ final class RollbackFactory(client: ServiceFactory[Request, Result], statsReceiver: StatsReceiver) extends ServiceFactoryProxy(client) { import RollbackFactory._ private[this] val rollbackLatencyStat = statsReceiver.stat(s"rollback_latency_ms") private[this] def wrap(underlying: Service[Request, Result]): Service[Request, Result] = new ServiceProxy[Request, Result](underlying) { override def close(deadline: Time): Future[Unit] = { val elapsed = Stopwatch.start() self(RollbackQuery).transform { result => rollbackLatencyStat.add(elapsed().inMillis) result match { case Return(_) => self.close(deadline) case Throw(_: ChannelClosedException) => // Don't log the exception on ChannelClosedExceptions because it is noisy. // We want to close the connection if we can't issue a rollback // since we assume it isn't a "clean" connection to put back into // the pool. poisonAndClose(deadline) case Throw(t) => log.warning( t, "rollback failed when putting service back into pool, closing connection" ) // We want to close the connection if we can't issue a rollback // since we assume it isn't a "clean" connection to put back into // the pool. poisonAndClose(deadline) } } } private[this] def poisonAndClose(deadline: Time): Future[Unit] = { self(PoisonConnectionRequest).transform { _ => self.close(deadline) } } } private[this] val wrapFn: Service[Request, Result] => Service[Request, Result] = { svc => wrap(svc) } override def apply(conn: ClientConnection): Future[Service[Request, Result]] = super.apply(conn).map(wrapFn) }
luciferous/finagle
finagle-mysql/src/main/scala/com/twitter/finagle/mysql/RollbackFactory.scala
Scala
apache-2.0
2,948
package com.eugene.impatience_scala import scala.util.Random /** * Created by eugene on 16/2/27. */ object Chap1 { def main(args: Array[String]) { val x = math.sqrt(3) println(3-x*x) val y = BigInt(2).pow(1024) println(y) val z = BigInt.probablePrime(100, Random); val a = BigInt(Random.nextInt()).toString(36) println(a) val s = "hello" println(s(0)) println(s.take(1)) println(s.takeRight(1)) println(s.reverse(0)) } }
Ernestyj/ScalaStudy
src/com/eugene/impatience_scala/Chap1.scala
Scala
gpl-3.0
537
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.controller import collection._ import collection.JavaConversions._ import java.util.concurrent.atomic.AtomicBoolean import kafka.common.{TopicAndPartition, StateChangeFailedException} import kafka.utils.{ZkUtils, Logging} import org.I0Itec.zkclient.IZkChildListener import org.apache.log4j.Logger import kafka.controller.Callbacks._ import kafka.utils.Utils._ /** * This class represents the state machine for replicas. It defines the states that a replica can be in, and * transitions to move the replica to another legal state. The different states that a replica can be in are - * 1. NewReplica : The controller can create new replicas during partition reassignment. In this state, a * replica can only get become follower state change request. Valid previous * state is NonExistentReplica * 2. OnlineReplica : Once a replica is started and part of the assigned replicas for its partition, it is in this * state. In this state, it can get either become leader or become follower state change requests. * Valid previous state are NewReplica, OnlineReplica or OfflineReplica * 3. OfflineReplica : If a replica dies, it moves to this state. This happens when the broker hosting the replica * is down. Valid previous state are NewReplica, OnlineReplica * 4. ReplicaDeletionStarted: If replica deletion starts, it is moved to this state. Valid previous state is OfflineReplica * 5. ReplicaDeletionSuccessful: If replica responds with no error code in response to a delete replica request, it is * moved to this state. Valid previous state is ReplicaDeletionStarted * 6. ReplicaDeletionIneligible: If replica deletion fails, it is moved to this state. Valid previous state is ReplicaDeletionStarted * 7. NonExistentReplica: If a replica is deleted successfully, it is moved to this state. Valid previous state is * ReplicaDeletionSuccessful */ class ReplicaStateMachine(controller: KafkaController) extends Logging { private val controllerContext = controller.controllerContext private val controllerId = controller.config.brokerId private val zkClient = controllerContext.zkClient var replicaState: mutable.Map[PartitionAndReplica, ReplicaState] = mutable.Map.empty val brokerRequestBatch = new ControllerBrokerRequestBatch(controller) private val hasStarted = new AtomicBoolean(false) this.logIdent = "[Replica state machine on controller " + controller.config.brokerId + "]: " private val stateChangeLogger = KafkaController.stateChangeLogger /** * Invoked on successful controller election. First registers a broker change listener since that triggers all * state transitions for replicas. Initializes the state of replicas for all partitions by reading from zookeeper. * Then triggers the OnlineReplica state change for all replicas. */ def startup() { // initialize replica state initializeReplicaState() hasStarted.set(true) // move all Online replicas to Online handleStateChanges(controllerContext.allLiveReplicas(), OnlineReplica) info("Started replica state machine with initial state -> " + replicaState.toString()) } // register broker change listener def registerListeners() { registerBrokerChangeListener() } /** * Invoked on controller shutdown. */ def shutdown() { hasStarted.set(false) replicaState.clear() } /** * This API is invoked by the broker change controller callbacks and the startup API of the state machine * @param replicas The list of replicas (brokers) that need to be transitioned to the target state * @param targetState The state that the replicas should be moved to * The controller's allLeaders cache should have been updated before this */ def handleStateChanges(replicas: Set[PartitionAndReplica], targetState: ReplicaState, callbacks: Callbacks = (new CallbackBuilder).build) { if(replicas.size > 0) { info("Invoking state change to %s for replicas %s".format(targetState, replicas.mkString(","))) try { brokerRequestBatch.newBatch() replicas.foreach(r => handleStateChange(r, targetState, callbacks)) brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.correlationId.getAndIncrement) }catch { case e: Throwable => error("Error while moving some replicas to %s state".format(targetState), e) } } } /** * This API exercises the replica's state machine. It ensures that every state transition happens from a legal * previous state to the target state. Valid state transitions are: * NonExistentReplica --> NewReplica * --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the * partition to every live broker * * NewReplica -> OnlineReplica * --add the new replica to the assigned replica list if needed * * OnlineReplica,OfflineReplica -> OnlineReplica * --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the * partition to every live broker * * NewReplica,OnlineReplica,OfflineReplica,ReplicaDeletionIneligible -> OfflineReplica * --send StopReplicaRequest to the replica (w/o deletion) * --remove this replica from the isr and send LeaderAndIsr request (with new isr) to the leader replica and * UpdateMetadata request for the partition to every live broker. * * OfflineReplica -> ReplicaDeletionStarted * --send StopReplicaRequest to the replica (with deletion) * * ReplicaDeletionStarted -> ReplicaDeletionSuccessful * -- mark the state of the replica in the state machine * * ReplicaDeletionStarted -> ReplicaDeletionIneligible * -- mark the state of the replica in the state machine * * ReplicaDeletionSuccessful -> NonExistentReplica * -- remove the replica from the in memory partition replica assignment cache * @param partitionAndReplica The replica for which the state transition is invoked * @param targetState The end state that the replica should be moved to */ def handleStateChange(partitionAndReplica: PartitionAndReplica, targetState: ReplicaState, callbacks: Callbacks) { val topic = partitionAndReplica.topic val partition = partitionAndReplica.partition val replicaId = partitionAndReplica.replica val topicAndPartition = TopicAndPartition(topic, partition) if (!hasStarted.get) throw new StateChangeFailedException(("Controller %d epoch %d initiated state change of replica %d for partition %s " + "to %s failed because replica state machine has not started") .format(controllerId, controller.epoch, replicaId, topicAndPartition, targetState)) val currState = replicaState.getOrElseUpdate(partitionAndReplica, NonExistentReplica) try { val replicaAssignment = controllerContext.partitionReplicaAssignment(topicAndPartition) targetState match { case NewReplica => assertValidPreviousStates(partitionAndReplica, List(NonExistentReplica), targetState) // start replica as a follower to the current leader for its partition val leaderIsrAndControllerEpochOpt = ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition) leaderIsrAndControllerEpochOpt match { case Some(leaderIsrAndControllerEpoch) => if(leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId) throw new StateChangeFailedException("Replica %d for partition %s cannot be moved to NewReplica" .format(replicaId, topicAndPartition) + "state as it is being requested to become leader") brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(replicaId), topic, partition, leaderIsrAndControllerEpoch, replicaAssignment) case None => // new leader request will be sent to this replica when one gets elected } replicaState.put(partitionAndReplica, NewReplica) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case ReplicaDeletionStarted => assertValidPreviousStates(partitionAndReplica, List(OfflineReplica), targetState) replicaState.put(partitionAndReplica, ReplicaDeletionStarted) // send stop replica command brokerRequestBatch.addStopReplicaRequestForBrokers(List(replicaId), topic, partition, deletePartition = true, callbacks.stopReplicaResponseCallback) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case ReplicaDeletionIneligible => assertValidPreviousStates(partitionAndReplica, List(ReplicaDeletionStarted), targetState) replicaState.put(partitionAndReplica, ReplicaDeletionIneligible) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case ReplicaDeletionSuccessful => assertValidPreviousStates(partitionAndReplica, List(ReplicaDeletionStarted), targetState) replicaState.put(partitionAndReplica, ReplicaDeletionSuccessful) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case NonExistentReplica => assertValidPreviousStates(partitionAndReplica, List(ReplicaDeletionSuccessful), targetState) // remove this replica from the assigned replicas list for its partition val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) controllerContext.partitionReplicaAssignment.put(topicAndPartition, currentAssignedReplicas.filterNot(_ == replicaId)) replicaState.remove(partitionAndReplica) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case OnlineReplica => assertValidPreviousStates(partitionAndReplica, List(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible), targetState) replicaState(partitionAndReplica) match { case NewReplica => // add this replica to the assigned replicas list for its partition val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) if(!currentAssignedReplicas.contains(replicaId)) controllerContext.partitionReplicaAssignment.put(topicAndPartition, currentAssignedReplicas :+ replicaId) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case _ => // check if the leader for this partition ever existed controllerContext.partitionLeadershipInfo.get(topicAndPartition) match { case Some(leaderIsrAndControllerEpoch) => brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(replicaId), topic, partition, leaderIsrAndControllerEpoch, replicaAssignment) replicaState.put(partitionAndReplica, OnlineReplica) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) case None => // that means the partition was never in OnlinePartition state, this means the broker never // started a log for that partition and does not have a high watermark value for this partition } } replicaState.put(partitionAndReplica, OnlineReplica) case OfflineReplica => assertValidPreviousStates(partitionAndReplica, List(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible), targetState) // send stop replica command to the replica so that it stops fetching from the leader brokerRequestBatch.addStopReplicaRequestForBrokers(List(replicaId), topic, partition, deletePartition = false) // As an optimization, the controller removes dead replicas from the ISR val leaderAndIsrIsEmpty: Boolean = controllerContext.partitionLeadershipInfo.get(topicAndPartition) match { case Some(currLeaderIsrAndControllerEpoch) => controller.removeReplicaFromIsr(topic, partition, replicaId) match { case Some(updatedLeaderIsrAndControllerEpoch) => // send the shrunk ISR state change request to all the remaining alive replicas of the partition. val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) if (!controller.deleteTopicManager.isPartitionToBeDeleted(topicAndPartition)) { brokerRequestBatch.addLeaderAndIsrRequestForBrokers(currentAssignedReplicas.filterNot(_ == replicaId), topic, partition, updatedLeaderIsrAndControllerEpoch, replicaAssignment) } replicaState.put(partitionAndReplica, OfflineReplica) stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s" .format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState)) false case None => true } case None => true } if (leaderAndIsrIsEmpty) throw new StateChangeFailedException( "Failed to change state of replica %d for partition %s since the leader and isr path in zookeeper is empty" .format(replicaId, topicAndPartition)) } } catch { case t: Throwable => stateChangeLogger.error("Controller %d epoch %d initiated state change of replica %d for partition [%s,%d] from %s to %s failed" .format(controllerId, controller.epoch, replicaId, topic, partition, currState, targetState), t) } } def areAllReplicasForTopicDeleted(topic: String): Boolean = { val replicasForTopic = controller.controllerContext.replicasForTopic(topic) val replicaStatesForTopic = replicasForTopic.map(r => (r, replicaState(r))).toMap debug("Are all replicas for topic %s deleted %s".format(topic, replicaStatesForTopic)) replicaStatesForTopic.foldLeft(true)((deletionState, r) => deletionState && r._2 == ReplicaDeletionSuccessful) } def isAtLeastOneReplicaInDeletionStartedState(topic: String): Boolean = { val replicasForTopic = controller.controllerContext.replicasForTopic(topic) val replicaStatesForTopic = replicasForTopic.map(r => (r, replicaState(r))).toMap replicaStatesForTopic.foldLeft(false)((deletionState, r) => deletionState || r._2 == ReplicaDeletionStarted) } def replicasInState(topic: String, state: ReplicaState): Set[PartitionAndReplica] = { replicaState.filter(r => r._1.topic.equals(topic) && r._2 == state).keySet } def isAnyReplicaInState(topic: String, state: ReplicaState): Boolean = { replicaState.exists(r => r._1.topic.equals(topic) && r._2 == state) } def replicasInDeletionStates(topic: String): Set[PartitionAndReplica] = { val deletionStates = Set(ReplicaDeletionStarted, ReplicaDeletionSuccessful, ReplicaDeletionIneligible) replicaState.filter(r => r._1.topic.equals(topic) && deletionStates.contains(r._2)).keySet } private def assertValidPreviousStates(partitionAndReplica: PartitionAndReplica, fromStates: Seq[ReplicaState], targetState: ReplicaState) { assert(fromStates.contains(replicaState(partitionAndReplica)), "Replica %s should be in the %s states before moving to %s state" .format(partitionAndReplica, fromStates.mkString(","), targetState) + ". Instead it is in %s state".format(replicaState(partitionAndReplica))) } private def registerBrokerChangeListener() = { zkClient.subscribeChildChanges(ZkUtils.BrokerIdsPath, new BrokerChangeListener()) } /** * Invoked on startup of the replica's state machine to set the initial state for replicas of all existing partitions * in zookeeper */ private def initializeReplicaState() { for((topicPartition, assignedReplicas) <- controllerContext.partitionReplicaAssignment) { val topic = topicPartition.topic val partition = topicPartition.partition assignedReplicas.foreach { replicaId => val partitionAndReplica = PartitionAndReplica(topic, partition, replicaId) controllerContext.liveBrokerIds.contains(replicaId) match { case true => replicaState.put(partitionAndReplica, OnlineReplica) case false => // mark replicas on dead brokers as failed for topic deletion, if they belong to a topic to be deleted. // This is required during controller failover since during controller failover a broker can go down, // so the replicas on that broker should be moved to ReplicaDeletionIneligible to be on the safer side. replicaState.put(partitionAndReplica, ReplicaDeletionIneligible) } } } } def partitionsAssignedToBroker(topics: Seq[String], brokerId: Int):Seq[TopicAndPartition] = { controllerContext.partitionReplicaAssignment.filter(_._2.contains(brokerId)).keySet.toSeq } /** * This is the zookeeper listener that triggers all the state transitions for a replica */ class BrokerChangeListener() extends IZkChildListener with Logging { this.logIdent = "[BrokerChangeListener on Controller " + controller.config.brokerId + "]: " def handleChildChange(parentPath : String, currentBrokerList : java.util.List[String]) { info("Broker change listener fired for path %s with children %s".format(parentPath, currentBrokerList.mkString(","))) inLock(controllerContext.controllerLock) { if (hasStarted.get) { ControllerStats.leaderElectionTimer.time { try { val curBrokerIds = currentBrokerList.map(_.toInt).toSet val newBrokerIds = curBrokerIds -- controllerContext.liveOrShuttingDownBrokerIds val newBrokerInfo = newBrokerIds.map(ZkUtils.getBrokerInfo(zkClient, _)) val newBrokers = newBrokerInfo.filter(_.isDefined).map(_.get) val deadBrokerIds = controllerContext.liveOrShuttingDownBrokerIds -- curBrokerIds controllerContext.liveBrokers = curBrokerIds.map(ZkUtils.getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get) info("Newly added brokers: %s, deleted brokers: %s, all live brokers: %s" .format(newBrokerIds.mkString(","), deadBrokerIds.mkString(","), controllerContext.liveBrokerIds.mkString(","))) newBrokers.foreach(controllerContext.controllerChannelManager.addBroker(_)) deadBrokerIds.foreach(controllerContext.controllerChannelManager.removeBroker(_)) if(newBrokerIds.size > 0) controller.onBrokerStartup(newBrokerIds.toSeq) if(deadBrokerIds.size > 0) controller.onBrokerFailure(deadBrokerIds.toSeq) } catch { case e: Throwable => error("Error while handling broker changes", e) } } } } } } } sealed trait ReplicaState { def state: Byte } case object NewReplica extends ReplicaState { val state: Byte = 1 } case object OnlineReplica extends ReplicaState { val state: Byte = 2 } case object OfflineReplica extends ReplicaState { val state: Byte = 3 } case object ReplicaDeletionStarted extends ReplicaState { val state: Byte = 4} case object ReplicaDeletionSuccessful extends ReplicaState { val state: Byte = 5} case object ReplicaDeletionIneligible extends ReplicaState { val state: Byte = 6} case object NonExistentReplica extends ReplicaState { val state: Byte = 7 }
stealthly/kafka
core/src/main/scala/kafka/controller/ReplicaStateMachine.scala
Scala
apache-2.0
22,109
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.kubernetes.integrationtest.backend.GCE import io.fabric8.kubernetes.client.{ConfigBuilder, DefaultKubernetesClient} import org.apache.spark.deploy.kubernetes.config.resolveK8sMaster import org.apache.spark.deploy.kubernetes.integrationtest.backend.IntegrationTestBackend import org.apache.spark.deploy.kubernetes.integrationtest.constants.GCE_TEST_BACKEND private[spark] class GCETestBackend(val master: String) extends IntegrationTestBackend { private var defaultClient: DefaultKubernetesClient = _ override def initialize(): Unit = { var k8ConfBuilder = new ConfigBuilder() .withApiVersion("v1") .withMasterUrl(resolveK8sMaster(master)) defaultClient = new DefaultKubernetesClient(k8ConfBuilder.build) } override def getKubernetesClient(): DefaultKubernetesClient = { defaultClient } override def name(): String = GCE_TEST_BACKEND }
kimoonkim/spark
resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/kubernetes/integrationtest/backend/GCE/GCETestBackend.scala
Scala
apache-2.0
1,710
package com.rocketfuel.sdbc.postgresql import org.scalatest._ import com.rocketfuel.sdbc.PostgreSql._ class QSeqSetterSpec extends FunSuite { test("implicit Seq[Int] conversion works") { assertCompiles("Seq(1,2,3): ParameterValue") } test("implicit Seq[Option[Int]] conversion works") { assertCompiles("Seq(1,2,3).map(Some.apply): ParameterValue") } test("implicit Seq[Seq[Int]] conversion works") { assertCompiles("Seq(Seq(1),Seq(2),Seq(3)): ParameterValue") } test("implicit Seq[Option[Seq[Int]]] conversion works") { assertCompiles("Seq(Some(Seq(1)),None,Some(Seq(3))): ParameterValue") } test("implicit Seq[Seq[Option[Int]]] conversion works") { assertCompiles("Seq(Seq(Some(1), Some(2)), Seq(None)): ParameterValue") } }
rocketfuel/sdbc
postgresql/src/test/scala/com/rocketfuel/sdbc/postgresql/QSeqSetterSpec.scala
Scala
bsd-3-clause
774
package model import model.impl.PlayerNameEnum.PlayerNameEnum import model.impl.Tile import model.impl.TileNameEnum.TileNameEnum import util.position.Position trait FieldTrait { def isOccupied(pos: Position): Boolean def changeTilePos(player: PlayerNameEnum, posOld: Position, posNew: Position): Boolean def getTileName(player: PlayerNameEnum, pos: Position): TileNameEnum def getPlayerTiles(player: PlayerNameEnum): Set[Tile] def getStrongerTilesWhoAround(playerAround: PlayerNameEnum, pos: Position, playerPos: PlayerNameEnum): List[Position] def isSurroundByOwnTile(player: PlayerNameEnum, posFrom_Ignore: Position, posTo_Observe: Position): Boolean def getPlayerName(pos: Position): PlayerNameEnum def removeTile(pos: Position): Boolean def addTile(playerName: PlayerNameEnum, tileName: TileNameEnum, pos: Position): Boolean var actualPlayerName: PlayerNameEnum def changePlayer(): PlayerNameEnum var winPlayerName: PlayerNameEnum def hasNoRabbits(playerName: PlayerNameEnum): Boolean def hasRabbitOnOtherSide(playerName: PlayerNameEnum): Boolean }
MartinLei/Arimaa
src/main/scala/model/FieldTrait.scala
Scala
mit
1,096
package org.jetbrains.plugins.scala.packagesearch.configuration import com.intellij.ide.ui.search.{SearchableOptionContributor, SearchableOptionProcessor} import com.jetbrains.packagesearch.intellij.plugin.configuration.ui.PackageSearchGeneralConfigurable class SbtSearchableOptionContributor extends SearchableOptionContributor { override def processOptions(processor: SearchableOptionProcessor): Unit = { addSearchConfigurationMap(processor, "sbt", "configuration") } def addSearchConfigurationMap(processor: SearchableOptionProcessor, entries: String*):Unit = { entries.foreach(entry => { processor.addOptions(entry, null, entry, PackageSearchGeneralConfigurable.ID, null, false) }) } }
JetBrains/intellij-scala
scala/integration/packagesearch/src/org/jetbrains/plugins/scala/packagesearch/configuration/SbtSearchableOptionContributor.scala
Scala
apache-2.0
719
package com.robot.core import scalaz.\\/ /** * Board having rows & columns **/ final class Board private(val rows:Int,val columns:Int) { def contains(p: Point): Boolean = p match { case Point(x, y) => !(x < 0 || x > columns - 1 || y < 0 || y > rows - 1) } def getAt(x: Int, y: Int): Option[Point] = { val p = Point(x, y) if (contains(p)) Some(p) else None } override def equals(o: Any) = o match { case that: Board => that.rows == this.rows && that.columns == this.columns case _ => false } override def hashCode = 41 * (41 + rows) + columns } object Board { def apply(rows: Int, columns: Int): \\/[String, Board] = if (rows > 0 && columns > 0) \\/.right[String, Board](new Board(rows, columns)) else \\/.left[String, Board]("Rows/Columns should be >0") }
ratheeshmohan/robotappscala
src/main/scala/com/robot/core/board.scala
Scala
apache-2.0
810
// Copyright 2014 Commonwealth Bank of Australia // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package au.com.cba.omnia.ebenezer package cli import au.com.cba.omnia.ebenezer.introspect._ import au.com.cba.omnia.ebenezer.fs.Glob import org.apache.hadoop.fs.Path import org.apache.hadoop.fs.FileSystem import org.apache.hadoop.conf.Configuration object Cat { def run(patterns: List[String]): Unit = { val conf = new Configuration val paths = Glob.patterns(conf, patterns) val records = paths.foldLeft(Iterator[Record]())((iter, path) => iter ++ ParquetIntrospectTools.iteratorFromPath(conf, path)) records.foreach(println) } def main(args: Array[String]): Unit = run(args.toList) }
shyam334/ebenezer
tools/src/main/scala/au/com/cba/omnia/ebenezer/cli/Cat.scala
Scala
apache-2.0
1,247
/** * Copyright (c) 2012 Petr Kozelek <[email protected]> * * The full copyright and license information is presented * in the file LICENSE that was distributed with this source code. */ package mql.model.semantic import org.scalatest.FlatSpec import mql.EqualitySpec import org.scalatest.matchers.ShouldMatchers._ import TableSpec._ import ColumnSpec._ object ColumnSpec { val col1Tab1 = new Column(tab1, "C1") val col1Tab2 = new Column(tab1, "C2") val col2Tab1 = new Column(tab2, "C1") } class ColumnSpec extends FlatSpec with EqualitySpec { val col12Tab1 = new Column(tab1, "C1") val col13Tab1 = new Column(tab1, "C1") val col21tab1 = new Column(tab1, "C2") "Equal columns (in different tables)" should "not be equal" in { col1Tab1 should not equal col2Tab1 } basicSymmetry((col1Tab1, col12Tab1), col21tab1) "Equal columns" should behave like transitiveObjects(col1Tab1, col12Tab1, col13Tab1) }
footcha/MQL
test/main/scala/mql/model/semantic/ColumnSpec.scala
Scala
bsd-3-clause
937
package com.datastax.examples.meetup.websocket import com.datastax.examples.meetup.model._ import org.apache.spark.storage.StorageLevel import scalawebsocket.WebSocket import org.apache.spark.streaming.receiver.Receiver import org.apache.spark.Logging import org.json4s._ import org.json4s.jackson.JsonMethods._ class WebSocketReceiver(url: String, storageLevel: StorageLevel) extends Receiver[MeetupRsvp](storageLevel) with Logging { @volatile private var webSocket: WebSocket = _ def onStart() { try{ logInfo("Connecting to WebSocket: " + url) val newWebSocket = WebSocket().open(url).onTextMessage({ msg: String => parseJson(msg) }) setWebSocket(newWebSocket) logInfo("Connected to: WebSocket" + url) } catch { case e: Exception => restart("Error starting WebSocket stream", e) } } def onStop() { setWebSocket(null) logInfo("WebSocket receiver stopped") } private def setWebSocket(newWebSocket: WebSocket) = synchronized { if (webSocket != null) { webSocket.shutdown() } webSocket = newWebSocket } private def parseJson(jsonStr: String): Unit = { implicit lazy val formats = DefaultFormats try { val json = parse(jsonStr) val rsvp = json.extract[MeetupRsvp] store(rsvp) } catch { case e: MappingException => logError("Unable to map JSON message to MeetupRsvp object:" + e.msg) case e: Exception => logError("Unable to map JSON message to MeetupRsvp object") } } }
rstml/datastax-spark-streaming-demo
src/main/scala/com/datastax/examples/meetup/websocket/WebSocketReceiver.scala
Scala
apache-2.0
1,513
/* * Copyright 2001-2014 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalactic.anyvals import org.scalatest._ import org.scalatest.prop.PropertyChecks // SKIP-SCALATESTJS,NATIVE-START import scala.collection.immutable.NumericRange // SKIP-SCALATESTJS,NATIVE-END import OptionValues._ import scala.collection.mutable.WrappedArray //import org.scalactic.StrictCheckedEquality import Double.NaN import org.scalactic.Equality import org.scalactic.{Pass, Fail} import org.scalactic.{Good, Bad} import scala.util.{Try, Success, Failure} trait PosZDoubleSpecSupport { implicit val doubleEquality: Equality[Double] = new Equality[Double] { override def areEqual(a: Double, b: Any): Boolean = (a, b) match { case (a, bDouble: Double) if a.isNaN && bDouble.isNaN => true case _ => a == b } } implicit val floatEquality: Equality[Float] = new Equality[Float] { override def areEqual(a: Float, b: Any): Boolean = (a, b) match { case (a, bFloat: Float) if a.isNaN && bFloat.isNaN => true case _ => a == b } } implicit def tryEquality[T]: Equality[Try[T]] = new Equality[Try[T]] { override def areEqual(a: Try[T], b: Any): Boolean = a match { case Success(double: Double) if double.isNaN => // This is because in scala.js x/0 results to NaN not ArithmetricException like in jvm, and we need to make sure Success(NaN) == Success(NaN) is true to pass the test. b match { case Success(bDouble: Double) if bDouble.isNaN => true case _ => false } case _: Success[_] => a == b case Failure(ex) => b match { case _: Success[_] => false case Failure(otherEx) => ex.getClass == otherEx.getClass && ex.getMessage == otherEx.getMessage case _ => false } } } } class PosZDoubleSpec extends funspec.AnyFunSpec with matchers.should.Matchers with PropertyChecks with PosZDoubleSpecSupport { describe("A PosZDouble") { describe("should offer a from factory method that") { it("returns Some[PosZDouble] if the passed Double is greater than or equal to 0") { PosZDouble.from(0.0).value.value shouldBe 0.0 PosZDouble.from(50.23).value.value shouldBe 50.23 PosZDouble.from(100.0).value.value shouldBe 100.0 } it("returns None if the passed Double is NOT greater than or equal to 0") { PosZDouble.from(-0.00001) shouldBe None PosZDouble.from(-99.9) shouldBe None } } describe("should offer an ensuringValid factory method that") { it("returns PosZDouble if the passed Double is greater than or equal to 0") { PosZDouble.ensuringValid(0.0).value shouldBe 0.0 PosZDouble.ensuringValid(50.23).value shouldBe 50.23 PosZDouble.ensuringValid(100.0).value shouldBe 100.0 PosZDouble.ensuringValid(Double.PositiveInfinity).value shouldBe Double.PositiveInfinity } it("throws AssertionError if the passed Double is NOT greater than or equal to 0") { an [AssertionError] should be thrownBy PosZDouble.ensuringValid(-0.00001) an [AssertionError] should be thrownBy PosZDouble.ensuringValid(-99.9) an [AssertionError] should be thrownBy PosZDouble.ensuringValid(Double.NegativeInfinity) // SKIP-DOTTY-START // https://github.com/lampepfl/dotty/issues/6710 an [AssertionError] should be thrownBy PosZDouble.ensuringValid(Double.NaN) // SKIP-DOTTY-END } } describe("should offer a tryingValid factory method that") { import TryValues._ it("returns a PosZDouble wrapped in a Success if the passed Double is greater than or equal 0") { PosZDouble.tryingValid(0.0).success.value.value shouldBe 0.0 PosZDouble.tryingValid(50.0).success.value.value shouldBe 50.0 PosZDouble.tryingValid(100.0f).success.value.value shouldBe 100.0 } it("returns an AssertionError wrapped in a Failure if the passed Double is lesser than 0") { PosZDouble.tryingValid(-1.0).failure.exception shouldBe an [AssertionError] PosZDouble.tryingValid(-99.0).failure.exception shouldBe an [AssertionError] } } describe("should offer a passOrElse factory method that") { it("returns a Pass if the given Double is greater than or equal 0") { PosZDouble.passOrElse(0.0)(i => i) shouldBe Pass PosZDouble.passOrElse(50.0)(i => i) shouldBe Pass PosZDouble.passOrElse(100.0)(i => i) shouldBe Pass } it("returns an error value produced by passing the given Double to the given function if the passed Double is lesser than 0, wrapped in a Fail") { PosZDouble.passOrElse(-1.0)(i => i) shouldBe Fail(-1.0) PosZDouble.passOrElse(-99.0)(i => i + 3.0) shouldBe Fail(-96.0) } } describe("should offer a goodOrElse factory method that") { it("returns a PosZDouble wrapped in a Good if the given Double is greater than or equal 0") { PosZDouble.goodOrElse(0.0)(i => i) shouldBe Good(PosZDouble(0.0)) PosZDouble.goodOrElse(50.0)(i => i) shouldBe Good(PosZDouble(50.0)) PosZDouble.goodOrElse(100.0)(i => i) shouldBe Good(PosZDouble(100.0)) } it("returns an error value produced by passing the given Double to the given function if the passed Double is lesser than 0, wrapped in a Bad") { PosZDouble.goodOrElse(-1.0)(i => i) shouldBe Bad(-1.0) PosZDouble.goodOrElse(-99.0)(i => i + 3.0f) shouldBe Bad(-96.0) } } describe("should offer a rightOrElse factory method that") { it("returns a PosZDouble wrapped in a Right if the given Double is greater than or equal 0") { PosZDouble.rightOrElse(0.0)(i => i) shouldBe Right(PosZDouble(0.0)) PosZDouble.rightOrElse(50.0)(i => i) shouldBe Right(PosZDouble(50.0)) PosZDouble.rightOrElse(100.0)(i => i) shouldBe Right(PosZDouble(100.0)) } it("returns an error value produced by passing the given Double to the given function if the passed Double is lesser than 0, wrapped in a Left") { PosZDouble.rightOrElse(-1.0)(i => i) shouldBe Left(-1.0) PosZDouble.rightOrElse(-99.0)(i => i + 3.0f) shouldBe Left(-96.0) } } describe("should offer an isValid predicate method that") { it("returns true if the passed Double is greater than or equal to 0") { PosZDouble.isValid(50.23) shouldBe true PosZDouble.isValid(100.0) shouldBe true PosZDouble.isValid(0.0) shouldBe true PosZDouble.isValid(-0.0) shouldBe true PosZDouble.isValid(-0.00001) shouldBe false PosZDouble.isValid(-99.9) shouldBe false } } describe("should offer a fromOrElse factory method that") { it("returns a PosZDouble if the passed Double is greater than or equal to 0") { PosZDouble.fromOrElse(50.23, PosZDouble(42.0)).value shouldBe 50.23 PosZDouble.fromOrElse(100.0, PosZDouble(42.0)).value shouldBe 100.0 PosZDouble.fromOrElse(0.0, PosZDouble(42.0)).value shouldBe 0.0 } it("returns a given default if the passed Double is NOT greater than or equal to 0") { PosZDouble.fromOrElse(-0.00001, PosZDouble(42.0)).value shouldBe 42.0 PosZDouble.fromOrElse(-99.9, PosZDouble(42.0)).value shouldBe 42.0 } } it("should offer MaxValue, MinValue, and MinPositiveValue factory methods") { PosZDouble.MaxValue shouldEqual PosZDouble.from(Double.MaxValue).get PosZDouble.MinValue shouldEqual PosZDouble(0.0) PosZDouble.MinPositiveValue shouldEqual PosZDouble.from(Double.MinPositiveValue).get } it("should offer a PositiveInfinity factory method") { PosZDouble.PositiveInfinity shouldEqual PosZDouble.ensuringValid(Double.PositiveInfinity) } it("should not offer a PositiveInfinity factory method") { "PosZDouble.NegativeInfinity" shouldNot compile } it("should offer a isPosInfinity method that returns true if the instance is PositiveInfinity") { PosZDouble.ensuringValid(Float.PositiveInfinity).isPosInfinity shouldBe true PosZDouble(1.0f).isPosInfinity shouldBe false } it("should not offer a isNegInfinity method") { "PosZDouble(1.0f).isNegInfinity" shouldNot compile } it("should be sortable") { val xs = List(PosZDouble(2.2), PosZDouble(0.0), PosZDouble(1.1), PosZDouble(3.3)) xs.sorted shouldEqual List(PosZDouble(0.0), PosZDouble(1.1), PosZDouble(2.2), PosZDouble(3.3)) } describe("when created with apply method") { it("should compile when 8 is passed in") { "PosZDouble(8)" should compile PosZDouble(8).value shouldEqual 8.0 "PosZDouble(8L)" should compile PosZDouble(8L).value shouldEqual 8.0 "PosZDouble(8.0F)" should compile PosZDouble(8.0F).value shouldEqual 8.0 "PosZDouble(8.0)" should compile PosZDouble(8.0).value shouldEqual 8.0 } it("should compile when 0 is passed in") { "PosZDouble(0)" should compile PosZDouble(0).value shouldEqual 0.0 "PosZDouble(0L)" should compile PosZDouble(0L).value shouldEqual 0.0 "PosZDouble(0.0F)" should compile PosZDouble(0.0F).value shouldEqual 0.0 "PosZDouble(0.0)" should compile PosZDouble(0.0).value shouldEqual 0.0 } it("should not compile when -8 is passed in") { "PosZDouble(-8)" shouldNot compile "PosZDouble(-8L)" shouldNot compile "PosZDouble(-8.0F)" shouldNot compile "PosZDouble(-8.0)" shouldNot compile } it("should not compile when x is passed in") { val a: Int = -8 "PosZDouble(a)" shouldNot compile val b: Long = -8L "PosZDouble(b)" shouldNot compile val c: Float = -8.0F "PosZDouble(c)" shouldNot compile val d: Double = -8.0 "PosZDouble(d)" shouldNot compile } } describe("when specified as a plain-old Double") { def takesPosZDouble(poz: PosZDouble): Double = poz.value it("should compile when 8 is passed in") { "takesPosZDouble(8)" should compile takesPosZDouble(8) shouldEqual 8.0 "takesPosZDouble(8L)" should compile takesPosZDouble(8L) shouldEqual 8.0 "takesPosZDouble(8.0F)" should compile takesPosZDouble(8.0F) shouldEqual 8.0 "takesPosZDouble(8.0)" should compile takesPosZDouble(8.0) shouldEqual 8.0 } it("should compile when 0 is passed in") { "takesPosZDouble(0)" should compile takesPosZDouble(0) shouldEqual 0.0 "takesPosZDouble(0L)" should compile takesPosZDouble(0L) shouldEqual 0.0 "takesPosZDouble(0.0F)" should compile takesPosZDouble(0.0F) shouldEqual 0.0 "takesPosZDouble(0.0)" should compile takesPosZDouble(0.0) shouldEqual 0.0 } it("should not compile when -8 is passed in") { "takesPosZDouble(-8)" shouldNot compile "takesPosZDouble(-8L)" shouldNot compile "takesPosZDouble(-8.0F)" shouldNot compile "takesPosZDouble(-8.0)" shouldNot compile } it("should not compile when x is passed in") { val x: Int = -8 "takesPosZDouble(x)" shouldNot compile val b: Long = -8L "takesPosZDouble(b)" shouldNot compile val c: Float = -8.0F "takesPosZDouble(c)" shouldNot compile val d: Double = -8.0 "takesPosZDouble(d)" shouldNot compile } } it("should offer a unary + method that is consistent with Double") { forAll { (p: PosZDouble) => (+p).toDouble shouldEqual (+(p.toDouble)) } } it("should offer a unary - method that returns NegZDouble") { forAll { (p: PosZDouble) => (-p) shouldEqual (NegZDouble.ensuringValid(-(p.toDouble))) } } it("should offer a 'plus' method that takes a PosZDouble and returns a PosDouble") { forAll { (posZDouble1: PosZDouble, posZDouble2: PosZDouble) => (posZDouble1 plus posZDouble2) should === (PosZDouble.ensuringValid(posZDouble1.toDouble + posZDouble2.toDouble)) } val examples = Table( ( "posZDouble1", "posZDouble2" ), ( PosZDouble.MinValue, PosZDouble.MinValue ), ( PosZDouble.MinValue, PosZDouble.MinPositiveValue ), ( PosZDouble.MinValue, PosZDouble.MaxValue ), ( PosZDouble.MinValue, PosZDouble.PositiveInfinity ), ( PosZDouble.MaxValue, PosZDouble.MinValue ), ( PosZDouble.MaxValue, PosZDouble.MinPositiveValue ), ( PosZDouble.MaxValue, PosZDouble.MaxValue ), ( PosZDouble.MaxValue, PosZDouble.PositiveInfinity ), ( PosZDouble.PositiveInfinity, PosZDouble.MinValue ), ( PosZDouble.PositiveInfinity, PosZDouble.MinPositiveValue ), ( PosZDouble.PositiveInfinity, PosZDouble.MaxValue ), ( PosZDouble.PositiveInfinity, PosZDouble.PositiveInfinity ) ) forAll (examples) { (a, b) => (a plus b).value should be >= 0.0 } // Sanity check that implicit widening conversions work too. // Here a PosDouble gets widened to a PosZDouble. PosZDouble(1.0) plus PosDouble(2.0) should === (PosZDouble(3.0)) } it("should offer 'min' and 'max' methods that are consistent with Double") { forAll { (pzdouble1: PosZDouble, pzdouble2: PosZDouble) => pzdouble1.max(pzdouble2).toDouble shouldEqual pzdouble1.toDouble.max(pzdouble2.toDouble) pzdouble1.min(pzdouble2).toDouble shouldEqual pzdouble1.toDouble.min(pzdouble2.toDouble) } } it("should offer an 'isWhole' method that is consistent with Double") { forAll { (pzdouble: PosZDouble) => pzdouble.isWhole shouldEqual pzdouble.toDouble.isWhole } } it("should offer 'round', 'ceil', and 'floor' methods that are consistent with Double") { forAll { (pzdouble: PosZDouble) => pzdouble.round.toDouble shouldEqual pzdouble.toDouble.round pzdouble.ceil.toDouble shouldEqual pzdouble.toDouble.ceil pzdouble.floor.toDouble shouldEqual pzdouble.toDouble.floor } } it("should offer 'toRadians' and 'toDegrees' methods that are consistent with Double") { forAll { (pzdouble: PosZDouble) => pzdouble.toRadians shouldEqual pzdouble.toDouble.toRadians } } it("should offer widening methods for basic types that are consistent with Double") { forAll { (pzdouble: PosZDouble) => def widen(value: Double): Double = value widen(pzdouble) shouldEqual widen(pzdouble.toDouble) } } it("should offer an ensuringValid method that takes a Double => Double, throwing AssertionError if the result is invalid") { PosZDouble(33.0).ensuringValid(_ + 1.0) shouldEqual PosZDouble(34.0) PosZDouble(33.0).ensuringValid(_ => Double.PositiveInfinity) shouldEqual PosZDouble.ensuringValid(Double.PositiveInfinity) an [AssertionError] should be thrownBy { PosZDouble.MaxValue.ensuringValid(_ - PosZDouble.MaxValue - 1) } an [AssertionError] should be thrownBy { PosZDouble.MaxValue.ensuringValid(_ => Double.NegativeInfinity) } // SKIP-DOTTY-START // https://github.com/lampepfl/dotty/issues/6710 an [AssertionError] should be thrownBy { PosZDouble.MaxValue.ensuringValid(_ => Double.NaN) } // SKIP-DOTTY-END } it("should offer an isFinite method that returns true if the value does not represent infinity") { forAll { (n: PosZFiniteDouble) => (n: PosZDouble).isFinite should be (true) PosZDouble.PositiveInfinity.isFinite should be (false) } } } }
scalatest/scalatest
jvm/scalactic-test/src/test/scala/org/scalactic/anyvals/PosZDoubleSpec.scala
Scala
apache-2.0
16,473
/* * Copyright 2018 Analytics Zoo Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.zoo.pipeline.api.keras2.layers import com.intel.analytics.bigdl.nn.Graph.ModuleNode import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.utils.Shape import com.intel.analytics.zoo.pipeline.api.Net import com.intel.analytics.zoo.pipeline.api.keras.layers.Merge import scala.reflect.ClassTag /** * Layer that computes the minimum (element-wise) a list of inputs. * * It takes as input a list of nodes, * all of the same shape, and returns * a single node (also of the same shape). */ class Minimum[T: ClassTag]( override val inputShape: Shape = null)(implicit ev: TensorNumeric[T]) extends Merge[T](layers = null, mode = "min", inputShape = inputShape) with Net { } object Minimum { def apply[@specialized(Float, Double) T: ClassTag](inputShape: Shape = null) (implicit ev: TensorNumeric[T]): Minimum[T] = { new Minimum[T](inputShape) } def minimum[@specialized(Float, Double) T: ClassTag](inputs: List[ModuleNode[T]]) (implicit ev: TensorNumeric[T]): ModuleNode[T] = { val layer = new Minimum[T]() layer.inputs(inputs.toArray) } }
intel-analytics/analytics-zoo
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras2/layers/Minimum.scala
Scala
apache-2.0
1,765
package opennlp.scalabha.cluster /** * A confusion matrix for comparing gold clusters to some predicted clusters. * * @param goldLabels the set of labels from the gold labeled data * @param predictedOutcomes a set of (usually autogenerated) labels for the predicted clusters * @param counts the matrix, where each cell is the number of data points that had a given gold label and predicted label */ class ClusterConfusionMatrix( goldLabels: IndexedSeq[String], predictedOutcomes: IndexedSeq[String], counts: IndexedSeq[IndexedSeq[Int]]) { // Create a string representation. Be lazy so that we only do it once. lazy val stringRep = { val lengthOfRow = counts(0).mkString.length + counts(0).length * 7 val tableString = counts.zip(goldLabels).map { case (goldLine, goldLabel) => (goldLine.mkString("\\t") + "\\t|\\t" + goldLine.sum + "\\t[" + goldLabel + "]") }.mkString("\\n") ("-" * 80 + "\\n" + "Confusion matrix.\\n" + "Columns give predicted counts. Rows give gold counts.\\n" + "-" * 80 + "\\n" + tableString + "\\n" + "-" * lengthOfRow + "\\n" + counts.transpose.map(_.sum).mkString("\\t") + "\\n" + predictedOutcomes.map("[" + _ + "]").mkString("\\t") + "\\n") } // Get the string representation. override def toString = stringRep } /** * A companion object for constructing ClusterConfusionMatrices. */ object ClusterConfusionMatrix { /** * Construct a confusion matrix for comparing gold clusters * to some predicted clusters. * * @param goldClusterIds a sequence of cluster ids for each data point * @param numPredictedClusters the number of clusters predicted * @param predictedClusterIndices sequence of cluster indices (0 to numPredictedClusters-1) for each data point * @return a ClusterConfusionMatrix with the relevant comparisions */ def apply( goldClusterIds: IndexedSeq[String], numPredictedClusters: Int, predictedClusterIndices: IndexedSeq[Int]) = { val goldLabels = goldClusterIds.toSet.toIndexedSeq val goldIndices = goldLabels.zipWithIndex.toMap val numGoldClusters = goldIndices.size val counts = Array.fill(numGoldClusters, numPredictedClusters)(0) goldClusterIds.zip(predictedClusterIndices).map { case (goldId, predIndex) => counts(goldIndices(goldId))(predIndex) += 1 } new ClusterConfusionMatrix( goldLabels, (0 until numPredictedClusters).map(_.toString), counts.map(_.toIndexedSeq).toIndexedSeq) } }
eponvert/Scalabha
src/main/scala/opennlp/scalabha/cluster/Evaluation.scala
Scala
apache-2.0
2,534
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.frontend.v2_3.ast.functions import org.neo4j.cypher.internal.frontend.v2_3.ast.{Function, SimpleTypedFunction} import org.neo4j.cypher.internal.frontend.v2_3.symbols._ case object Sin extends Function with SimpleTypedFunction { def name = "sin" val signatures = Vector( Signature(argumentTypes = Vector(CTFloat), outputType = CTFloat) ) }
HuangLS/neo4j
community/cypher/frontend-2.3/src/main/scala/org/neo4j/cypher/internal/frontend/v2_3/ast/functions/Sin.scala
Scala
apache-2.0
1,182
def tuple = (1, Map(1 -> 2)) println(/*start*/tuple._2(1)/*end*/) //Int
ilinum/intellij-scala
testdata/typeInference/bugs5/SCL3278A.scala
Scala
apache-2.0
71
package com.google.protobuf import com.google.protobuf.DescriptorProtos._ object Descriptors { class EnumValueDescriptor { def getName(): String = ??? def getNumber(): Int = ??? } class EnumDescriptor { def getName(): String = ??? def getContainingType(): Descriptor = ??? def getValues(): java.util.List[EnumValueDescriptor] = ??? def findValueByName(name: String): EnumValueDescriptor = ??? def findValueByNumber(number: Int): EnumValueDescriptor = ??? } class FieldDescriptor { def getName(): String = ??? def getContainingType(): Descriptor = ??? def getNumber(): Int = ??? def getType(): FieldDescriptor.Type = ??? def isOptional(): Boolean = ??? def isRepeated(): Boolean = ??? def isRequired(): Boolean = ??? } object FieldDescriptor { sealed trait Type object Type { case object DOUBLE extends Type case object FLOAT extends Type case object INT64 extends Type case object UINT64 extends Type case object INT32 extends Type case object FIXED64 extends Type case object FIXED32 extends Type case object BOOL extends Type case object STRING extends Type case object GROUP extends Type case object MESSAGE extends Type case object BYTES extends Type case object UINT32 extends Type case object ENUM extends Type case object SFIXED32 extends Type case object SFIXED64 extends Type case object SINT32 extends Type case object SINT64 extends Type } } class ServiceDescriptor { class MethodDescriptorList { def get(i: Int): MethodDescriptor = new MethodDescriptor() } def getMethods(): MethodDescriptorList = new MethodDescriptorList } class MethodDescriptor {} class FileDescriptor { def getMessageTypes(): java.util.List[Descriptor] = ??? def getEnumTypes(): java.util.List[EnumDescriptor] = ??? class ServiceDescriptorList() { def get(i: Int): ServiceDescriptor = new ServiceDescriptor() } def getServices(): ServiceDescriptorList = new ServiceDescriptorList() } object FileDescriptor { def buildFrom( p: FileDescriptorProto, deps: Array[FileDescriptor] ): FileDescriptor = new FileDescriptor } class Descriptor { def getContainingType(): Descriptor = ??? def getFields(): java.util.List[FieldDescriptor] = ??? def getNestedTypes(): java.util.List[Descriptor] = ??? def getEnumTypes(): java.util.List[EnumDescriptor] = ??? def getFullName(): String = ??? } }
trueaccord/protobuf-scala-runtime
shared/src/main/scala/com/google/protobuf/Descriptors.scala
Scala
apache-2.0
2,590
package nl.malienkolders.htm.battle package comet import snippet._ import _root_.net.liftweb._ import http._ import common._ import actor._ import util._ import Helpers._ import _root_.scala.xml.{ NodeSeq, Text } import _root_.java.util.Date import nl.malienkolders.htm.lib.model._ import dispatch._ import Http._ import net.liftweb.json._ import nl.malienkolders.htm.lib._ import scala.concurrent._ import ExecutionContext.Implicits.global object BattleServer extends LiftActor with ListenerManager { private var currentRound: Box[MarshalledRound] = Empty private var currentPool: Box[MarshalledPoolSummary] = Empty private var currentFight: Box[MarshalledFight] = Empty private var timer: Long = 0 private var lastTimerStart: Long = -1 private var timerRunning = false private var fightStartedAt: Long = -1 private var scores: List[Score] = List() private var breakTimeReached = false implicit val formats = Serialization.formats(NoTypeHints) def currentTime = timer + (if (timerRunning) System.currentTimeMillis - lastTimerStart else 0) def currentScore = scores.foldLeft(TotalScore(0, 0, 0, 0, 0, 0, 0, 0)) { case (TotalScore(a, aa, b, ba, d, as, bs, x), s) => TotalScore(a + s.a, aa + s.aAfter, b + s.b, ba + s.bAfter, d + s.double, as, bs, x + (if (s.isExchange) 1 else 0)) } def exchangeCount = scores.count(_.isExchange) override def lowPriority = { case RequestCurrentPool => reply(currentPool) case SubscribePool(pool) => currentPool = Full(pool) updateListeners case UnsubscribePool(pool) => currentPool = Empty updateListeners case PoolSubscription(pool) => reply(currentPool.isDefined && currentPool.get == pool) case RequestCurrentFight => reply((currentRound, currentPool, currentFight)) case SetCurrentFight(pool) => { println("SET CURRENT FIGHT") currentPool = Full(pool) println("POOL: " + pool.toString) val roundReq = :/(ShowAdminConnection.adminHost) / "api" / "round" / pool.round.id.toString val round = Http(roundReq OK as.String).fold[Option[MarshalledRound]]( _ => None, success => Some(parse(success).extract[MarshalledRound])).apply println("ROUND: " + round) if (round.isDefined) { currentRound = Full(round.get) val req = :/(ShowAdminConnection.adminHost) / "api" / "pool" / pool.id.toString / "fight" / "pop" currentFight = Http(req OK as.String).fold[Box[MarshalledFight]]( _ => Empty, success => Full(Serialization.read[MarshalledFight](success))).apply } timer = 0 timerRunning = false breakTimeReached = false println("INIT VIEWER") ViewerServer ! InitFight(currentRound, currentPool, currentFight) ViewerServer ! StopTimer(timer) ViewerServer ! UpdateScores(currentScore) ViewerServer ! ShowView(new FightView) if (currentFight.isDefined) { println("PEEK NEXT FIGHT") try { val req = :/(ShowAdminConnection.adminHost) / "api" / "pool" / pool.id.toString / "fight" / "peek" val nextFight = Http(req OK as.String).fold[Box[MarshalledFight]]( _ => Empty, success => success match { case "false" => Empty case _ => Full(Serialization.read[MarshalledFight](success)) }).apply ViewerServer ! ShowMessage( nextFight match { case Full(f) => "Next up: %s (red) vs %s (blue)" format (f.fighterA.shortName, f.fighterB.shortName) case _ => "" }, -1) } catch { case _: Throwable => ViewerServer ! ShowMessage("", -1) } } println("UPDATE LISTENERS") updateListeners reply((currentRound, currentPool, currentFight)) } case UpdateTimer => { if (!breakTimeReached && (currentRound.get.breakInFightAt > 0) && (currentTime > currentRound.get.breakInFightAt)) { breakTimeReached = true timerRunning = false timer = currentRound.get.breakInFightAt ViewerServer ! StopTimer(timer) ViewerServer ! UpdateScores(currentScore) } if ((currentRound.get.timeLimitOfFight > 0) && (currentTime > currentRound.get.timeLimitOfFight)) { timerRunning = false timer = currentRound.get.timeLimitOfFight ViewerServer ! StopTimer(timer) ViewerServer ! UpdateScores(currentScore) } if (timerRunning) { Schedule.schedule(this, UpdateTimer, 1 second) updateListeners(TimerUpdate(currentTime)) } else { updateListeners } } case Start => { if ((currentRound.get.exchangeLimit > 0) && (exchangeCount >= currentRound.get.exchangeLimit)) { S.notice("Exchange limit reached!") } else if ((currentRound.get.timeLimitOfFight > 0) && (timer >= currentRound.get.timeLimitOfFight)) { S.notice("Time limit reached!") } else { ViewerServer ! UpdateScores(currentScore) if (fightStartedAt < 0) fightStartedAt = new Date().getTime() if (!timerRunning) { lastTimerStart = System.currentTimeMillis() timerRunning = true ViewerServer ! StartTimer(timer) } this ! UpdateTimer } } case Stop => { if (timerRunning) { timerRunning = false timer += System.currentTimeMillis - lastTimerStart } ViewerServer ! StopTimer(timer) ViewerServer ! UpdateScores(currentScore) updateListeners } case s: Score => { if (!s.isExchange || currentRound.get.exchangeLimit == 0 || exchangeCount < currentRound.get.exchangeLimit) { scores = s :: scores ViewerServer ! UpdateScores(currentScore) updateListeners(ScoreUpdate(scores)) } } case Undo => { scores = scores.drop(1) ViewerServer ! UpdateScores(currentScore) updateListeners(ScoreUpdate(scores)) } case Cancel => { if (timerRunning) { timerRunning = false timer += System.currentTimeMillis - lastTimerStart } val req = :/(ShowAdminConnection.adminHost) / "api" / "fight" / "cancel" <:< Map("Content-Type" -> "application/json") val f = currentFight.get val result = Http(req.POST << Serialization.write(MarshalledFight( f.id, f.pool, f.round, f.order, f.fighterA, f.fighterB, fightStartedAt, new Date().getTime(), timer, List()))).fold[Boolean](_ => false, resp => resp.getResponseBody().toBoolean).apply if (result) { currentFight = Empty timer = 0 timerRunning = false scores = List() ViewerServer ! ShowMessage("", -1) BattleServer ! UpdateViewer(new PoolOverview) ViewerServer ! ShowView(new PoolOverview) } updateListeners } case Finish => { if (timerRunning) { timerRunning = false timer += System.currentTimeMillis - lastTimerStart } val req = :/(ShowAdminConnection.adminHost) / "api" / "fight" / "confirm" <:< Map("Content-Type" -> "application/json") val f = currentFight.get val result = Http(req.POST << Serialization.write(MarshalledFight( f.id, f.pool, f.round, f.order, f.fighterA, f.fighterB, fightStartedAt, new Date().getTime(), timer, scores.map(s => MarshalledScore( s.timeInFight, s.timeInWorld, s.a, s.b, s.aAfter, s.bAfter, s.double, s.remark, s.isSpecial, s.isExchange))))).fold[Boolean](_ => false, resp => resp.getResponseBody().toBoolean).apply if (result) { currentFight = Empty timer = 0 timerRunning = false scores = List() ViewerServer ! ShowMessage("", -1) BattleServer ! UpdateViewer(new PoolOverview) ViewerServer ! ShowView(new PoolOverview) } updateListeners } case UpdateViewer(v) => v match { case _: PoolOverview => ViewerServer ! InitPoolOverview( currentPool.map { cp => val req = :/(ShowAdminConnection.adminHost) / "api" / "pool" / cp.id.toString / "viewer" Http(req OK as.String).fold( _ => Empty, success => Full(Serialization.read[MarshalledViewerPool](success))).apply }.getOrElse(Empty)) case _: PoolRanking => ViewerServer ! InitPoolRanking( currentPool.map { cp => val req = :/(ShowAdminConnection.adminHost) / "api" / "pool" / cp.id.toString / "ranking" Http(req OK as.String).fold( _ => Empty, success => Full(Serialization.read[MarshalledPoolRanking](success))).apply }.getOrElse(Empty)) case _: FightView => ViewerServer ! InitFight(currentRound, currentPool, currentFight) ViewerServer ! UpdateScores(currentScore) case _ => // ignore } case msg => println(msg.toString) } def peek(p: MarshalledPoolSummary) = { val req = :/(ShowAdminConnection.adminHost) / "api" / "pool" / p.id.toString / "fight" / "peek" Http(req OK as.String).fold[Box[MarshalledFight]]( _ => Empty, success => success match { case "false" => Empty case _ => Full(Serialization.read[MarshalledFight](success)) }).apply } def nextFight = currentPool.map(p => peek(p)).getOrElse(Empty) def createUpdate = BattleServerUpdate(currentRound, currentPool, currentFight, nextFight, timer, scores) } case class BattleServerUpdate(currentRound: Box[MarshalledRound], currentPool: Box[MarshalledPoolSummary], currentFight: Box[MarshalledFight], nextFight: Box[MarshalledFight], currentTime: Long, scores: List[Score]) case object RequestCurrentPool case class SubscribePool(pool: MarshalledPoolSummary) case class UnsubscribePool(pool: MarshalledPoolSummary) case class PoolSubscription(pool: MarshalledPoolSummary) case object RequestCurrentFight case class SetCurrentFight(pool: MarshalledPoolSummary) case class UpdateViewer(v: View) case object UpdateTimer case object Start case object Stop case object Undo case object Finish case object Cancel case class TimerUpdate(time: Long) case class ScoreUpdate(scores: List[Score]) case class Score(a: Int, aAfter: Int, b: Int, bAfter: Int, double: Int, timeInWorld: Long, timeInFight: Long, remark: String, isSpecial: Boolean, isExchange: Boolean) case class ScorePoints(a: Int, aAfter: Int, b: Int, bAfter: Int, double: Int, remark: String, isSpecial: Boolean, isExchange: Boolean)
hema-tournament-manager/htm
htm-battle/src/main/scala/nl/malienkolders/htm/battle/comet/BattleServer.scala
Scala
apache-2.0
11,075
package mesosphere.marathon /** * The accepted resource roles default behavior defines whether which values are selected for the accepted resource roles by default * * - [[AcceptedResourceRolesDefaultBehavior.Any]] indicates that either reserved or unreserved resources will be accepted. (default) * - [[AcceptedResourceRolesDefaultBehavior.Unreserved]] Only unreserved ( offers with role '*' ) will be accepted. * - [[AcceptedResourceRolesDefaultBehavior.Reserved]] Only reserved (offers with the role of the starting task) will be accepted. * */ sealed trait AcceptedResourceRolesDefaultBehavior { val name: String override def toString: String = name } object AcceptedResourceRolesDefaultBehavior { case object Any extends AcceptedResourceRolesDefaultBehavior { override val name: String = "Any" } case object Unreserved extends AcceptedResourceRolesDefaultBehavior { override val name: String = "Unreserved" } case object Reserved extends AcceptedResourceRolesDefaultBehavior { override val name: String = "Reserved" } val all = Seq(Any, Unreserved, Reserved) def fromString(s: String): Option[AcceptedResourceRolesDefaultBehavior] = { s.toLowerCase match { case "any" => Some(AcceptedResourceRolesDefaultBehavior.Any) case "unreserved" => Some(AcceptedResourceRolesDefaultBehavior.Unreserved) case "reserved" => Some(AcceptedResourceRolesDefaultBehavior.Reserved) case _ => None } } }
mesosphere/marathon
src/main/scala/mesosphere/marathon/AcceptedResourceRolesDefaultBehavior.scala
Scala
apache-2.0
1,479
package org.example1.usage import org.example1.declaration.data.A import org.example1.declaration.{X => X_Renamed} trait Usage_WithRename_OfMovedClass_OtherImportCommon1 { val a: A = ??? val x: X_Renamed = ??? }
JetBrains/intellij-scala
scala/scala-impl/testdata/move/allInOne/before/org/example1/usage/Usage_WithRename_OfMovedClass_OtherImportCommon1.scala
Scala
apache-2.0
218
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.spark.jts.util import org.locationtech.jts.geom.Geometry import org.locationtech.jts.io._ trait WKTUtils { private[this] val readerPool = new ThreadLocal[WKTReader]{ override def initialValue = new WKTReader } private[this] val writerPool = new ThreadLocal[WKTWriter]{ override def initialValue = new WKTWriter } def read(s: String): Geometry = readerPool.get.read(s) def write(g: Geometry): String = writerPool.get.write(g) } trait WKBUtils { private[this] val readerPool = new ThreadLocal[WKBReader]{ override def initialValue = new WKBReader } private[this] val writer2dPool = new ThreadLocal[WKBWriter]{ override def initialValue = new WKBWriter(2) } private[this] val writer3dPool = new ThreadLocal[WKBWriter]{ override def initialValue = new WKBWriter(3) } def read(s: String): Geometry = read(s.getBytes) def read(b: Array[Byte]): Geometry = readerPool.get.read(b) def write(g: Geometry): Array[Byte] = { val writer = if (is2d(g)) { writer2dPool } else { writer3dPool } writer.get.write(g) } private def is2d(geometry: Geometry): Boolean = { // don't trust coord.getDimensions - it always returns 3 in jts // instead, check for NaN for the z dimension // note that we only check the first coordinate - if a geometry is written with different // dimensions in each coordinate, some information may be lost if (geometry == null) { true } else { val coord = geometry.getCoordinate // check for dimensions - use NaN != NaN to verify z coordinate // TODO check for M coordinate when added to JTS coord == null || java.lang.Double.isNaN(coord.getZ) } } } object WKTUtils extends WKTUtils object WKBUtils extends WKBUtils
aheyne/geomesa
geomesa-spark/geomesa-spark-jts/src/main/scala/org/locationtech/geomesa/spark/jts/util/WKUtils.scala
Scala
apache-2.0
2,243
import scala.language.experimental.macros import scala.reflect.macros.Context object Macros { def impl(c: Context) = { import c.universe._ c.Expr[Unit](q"""println("Hello World")""") } def hello: Unit = macro impl }
hbutani/hive-lineage
macros/src/main/scala/Macros.scala
Scala
apache-2.0
232
package net.debasishg.sw15.algebra package domain.trade package algebra import java.util.{ Date, Calendar } import scalaz._ import Scalaz._ import \\/._ /* trait Trade {this: RefModel => case class TradeModel (account: Account, instrument: Instrument, refNo: String, market: Market, unitPrice: BigDecimal, quantity: BigDecimal, tradeDate: Date = today, valueDate: Option[Date] = None, taxFees: Option[List[(TaxFeeId, BigDecimal)]] = None, netAmount: Option[BigDecimal] = None) */
debasishg/scalaworld15
src/main/scala/sw15/algebra/algebra/trade.scala
Scala
apache-2.0
499
package lila.puzzle import play.api.data._ import play.api.data.Forms._ object DataForm { val round = Form(single( "win" -> number )) val vote = Form(single( "vote" -> number )) }
clarkerubber/lila
modules/puzzle/src/main/DataForm.scala
Scala
agpl-3.0
200
package edu.mit.csail.sdg.hsqldb.data.access.query.setOps import edu.mit.csail.sdg.hsqldb.data.access.QueryExpr /** * Created by IntelliJ IDEA. * User: Dwayne * Date: 7/9/11 * Time: 9:00 AM * To change this template use File | Settings | File Templates. */ case class Union(left: QueryExpr, right: QueryExpr) extends QueryExpr{ def toSql = "%s UNION %s" format (left.toSql, right.toSql) } object Union { def unions(xs: QueryExpr*) = if (xs.size < 2) xs.head else xs reduceLeft (Union(_, _)) } case class UnionAll(left: QueryExpr, right: QueryExpr) extends QueryExpr{ def toSql = "%s UNION ALL %s" format (left.toSql, right.toSql) }
dlreeves/ormolu
src/edu/mit/csail/sdg/hsqldb/data/access/query/setOps/Union.scala
Scala
mit
675
package gitbucket.core.service import gitbucket.core.model.Profile._ import profile.simple._ import gitbucket.core.util.StringUtil._ import gitbucket.core.util.Implicits._ import gitbucket.core.model._ import scala.slick.jdbc.{StaticQuery => Q} import Q.interpolation trait IssuesService { import IssuesService._ def getIssue(owner: String, repository: String, issueId: String)(implicit s: Session) = if (issueId forall (_.isDigit)) Issues filter (_.byPrimaryKey(owner, repository, issueId.toInt)) firstOption else None def getComments(owner: String, repository: String, issueId: Int)(implicit s: Session) = IssueComments filter (_.byIssue(owner, repository, issueId)) list /** @return IssueComment and commentedUser and Issue */ def getCommentsForApi(owner: String, repository: String, issueId: Int)(implicit s: Session): List[(IssueComment, Account, Issue)] = IssueComments.filter(_.byIssue(owner, repository, issueId)) .filter(_.action inSetBind Set("comment" , "close_comment", "reopen_comment")) .innerJoin(Accounts).on( (t1, t2) => t1.commentedUserName === t2.userName ) .innerJoin(Issues).on{ case ((t1, t2), t3) => t3.byIssue(t1.userName, t1.repositoryName, t1.issueId) } .map{ case ((t1, t2), t3) => (t1, t2, t3) } .list def getComment(owner: String, repository: String, commentId: String)(implicit s: Session) = if (commentId forall (_.isDigit)) IssueComments filter { t => t.byPrimaryKey(commentId.toInt) && t.byRepository(owner, repository) } firstOption else None def getIssueLabels(owner: String, repository: String, issueId: Int)(implicit s: Session) = IssueLabels .innerJoin(Labels).on { (t1, t2) => t1.byLabel(t2.userName, t2.repositoryName, t2.labelId) } .filter ( _._1.byIssue(owner, repository, issueId) ) .map ( _._2 ) .list def getIssueLabel(owner: String, repository: String, issueId: Int, labelId: Int)(implicit s: Session) = IssueLabels filter (_.byPrimaryKey(owner, repository, issueId, labelId)) firstOption /** * Returns the count of the search result against issues. * * @param condition the search condition * @param onlyPullRequest if true then counts only pull request, false then counts both of issue and pull request. * @param repos Tuple of the repository owner and the repository name * @return the count of the search result */ def countIssue(condition: IssueSearchCondition, onlyPullRequest: Boolean, repos: (String, String)*)(implicit s: Session): Int = Query(searchIssueQuery(repos, condition, onlyPullRequest).length).first /** * Returns the Map which contains issue count for each labels. * * @param owner the repository owner * @param repository the repository name * @param condition the search condition * @return the Map which contains issue count for each labels (key is label name, value is issue count) */ def countIssueGroupByLabels(owner: String, repository: String, condition: IssueSearchCondition, filterUser: Map[String, String])(implicit s: Session): Map[String, Int] = { searchIssueQuery(Seq(owner -> repository), condition.copy(labels = Set.empty), false) .innerJoin(IssueLabels).on { (t1, t2) => t1.byIssue(t2.userName, t2.repositoryName, t2.issueId) } .innerJoin(Labels).on { case ((t1, t2), t3) => t2.byLabel(t3.userName, t3.repositoryName, t3.labelId) } .groupBy { case ((t1, t2), t3) => t3.labelName } .map { case (labelName, t) => labelName -> t.length } .toMap } def getCommitStatues(issueList:Seq[(String, String, Int)])(implicit s: Session) :Map[(String, String, Int), CommitStatusInfo] ={ if(issueList.isEmpty){ Map.empty } else { import scala.slick.jdbc._ val issueIdQuery = issueList.map(i => "(PR.USER_NAME=? AND PR.REPOSITORY_NAME=? AND PR.ISSUE_ID=?)").mkString(" OR ") implicit val qset = SetParameter[Seq[(String, String, Int)]] { case (seq, pp) => for (a <- seq) { pp.setString(a._1) pp.setString(a._2) pp.setInt(a._3) } } import gitbucket.core.model.Profile.commitStateColumnType val query = Q.query[Seq[(String, String, Int)], (String, String, Int, Int, Int, Option[String], Option[CommitState], Option[String], Option[String])](s""" SELECT SUMM.USER_NAME, SUMM.REPOSITORY_NAME, SUMM.ISSUE_ID, CS_ALL, CS_SUCCESS , CSD.CONTEXT, CSD.STATE, CSD.TARGET_URL, CSD.DESCRIPTION FROM (SELECT PR.USER_NAME , PR.REPOSITORY_NAME , PR.ISSUE_ID , COUNT(CS.STATE) AS CS_ALL , SUM(CS.STATE='success') AS CS_SUCCESS , PR.COMMIT_ID_TO AS COMMIT_ID FROM PULL_REQUEST PR JOIN COMMIT_STATUS CS ON PR.USER_NAME=CS.USER_NAME AND PR.REPOSITORY_NAME=CS.REPOSITORY_NAME AND PR.COMMIT_ID_TO=CS.COMMIT_ID WHERE $issueIdQuery GROUP BY PR.USER_NAME, PR.REPOSITORY_NAME, PR.ISSUE_ID) as SUMM LEFT OUTER JOIN COMMIT_STATUS CSD ON SUMM.CS_ALL = 1 AND SUMM.COMMIT_ID = CSD.COMMIT_ID"""); query(issueList).list.map{ case(userName, repositoryName, issueId, count, successCount, context, state, targetUrl, description) => (userName, repositoryName, issueId) -> CommitStatusInfo(count, successCount, context, state, targetUrl, description) }.toMap } } /** * Returns the search result against issues. * * @param condition the search condition * @param pullRequest if true then returns only pull requests, false then returns only issues. * @param offset the offset for pagination * @param limit the limit for pagination * @param repos Tuple of the repository owner and the repository name * @return the search result (list of tuples which contain issue, labels and comment count) */ def searchIssue(condition: IssueSearchCondition, pullRequest: Boolean, offset: Int, limit: Int, repos: (String, String)*) (implicit s: Session): List[IssueInfo] = { // get issues and comment count and labels val result = searchIssueQueryBase(condition, pullRequest, offset, limit, repos) .leftJoin (IssueLabels) .on { case ((t1, t2), t3) => t1.byIssue(t3.userName, t3.repositoryName, t3.issueId) } .leftJoin (Labels) .on { case (((t1, t2), t3), t4) => t3.byLabel(t4.userName, t4.repositoryName, t4.labelId) } .leftJoin (Milestones) .on { case ((((t1, t2), t3), t4), t5) => t1.byMilestone(t5.userName, t5.repositoryName, t5.milestoneId) } .map { case ((((t1, t2), t3), t4), t5) => (t1, t2.commentCount, t4.labelId.?, t4.labelName.?, t4.color.?, t5.title.?) } .list .splitWith { (c1, c2) => c1._1.userName == c2._1.userName && c1._1.repositoryName == c2._1.repositoryName && c1._1.issueId == c2._1.issueId } val status = getCommitStatues(result.map(_.head._1).map(is => (is.userName, is.repositoryName, is.issueId))) result.map { issues => issues.head match { case (issue, commentCount, _, _, _, milestone) => IssueInfo(issue, issues.flatMap { t => t._3.map ( Label(issue.userName, issue.repositoryName, _, t._4.get, t._5.get) )} toList, milestone, commentCount, status.get(issue.userName, issue.repositoryName, issue.issueId)) }} toList } /** for api * @return (issue, issueUser, commentCount, pullRequest, headRepo, headOwner) */ def searchPullRequestByApi(condition: IssueSearchCondition, offset: Int, limit: Int, repos: (String, String)*) (implicit s: Session): List[(Issue, Account, Int, PullRequest, Repository, Account)] = { // get issues and comment count and labels searchIssueQueryBase(condition, true, offset, limit, repos) .innerJoin(PullRequests).on { case ((t1, t2), t3) => t3.byPrimaryKey(t1.userName, t1.repositoryName, t1.issueId) } .innerJoin(Repositories).on { case (((t1, t2), t3), t4) => t4.byRepository(t1.userName, t1.repositoryName) } .innerJoin(Accounts).on { case ((((t1, t2), t3), t4), t5) => t5.userName === t1.openedUserName } .innerJoin(Accounts).on { case (((((t1, t2), t3), t4), t5), t6) => t6.userName === t4.userName } .map { case (((((t1, t2), t3), t4), t5), t6) => (t1, t5, t2.commentCount, t3, t4, t6) } .list } private def searchIssueQueryBase(condition: IssueSearchCondition, pullRequest: Boolean, offset: Int, limit: Int, repos: Seq[(String, String)]) (implicit s: Session) = searchIssueQuery(repos, condition, pullRequest) .innerJoin(IssueOutline).on { (t1, t2) => t1.byIssue(t2.userName, t2.repositoryName, t2.issueId) } .sortBy { case (t1, t2) => (condition.sort match { case "created" => t1.registeredDate case "comments" => t2.commentCount case "updated" => t1.updatedDate }) match { case sort => condition.direction match { case "asc" => sort asc case "desc" => sort desc } } } .drop(offset).take(limit) /** * Assembles query for conditional issue searching. */ private def searchIssueQuery(repos: Seq[(String, String)], condition: IssueSearchCondition, pullRequest: Boolean)(implicit s: Session) = Issues filter { t1 => repos .map { case (owner, repository) => t1.byRepository(owner, repository) } .foldLeft[Column[Boolean]](false) ( _ || _ ) && (t1.closed === (condition.state == "closed").bind) && //(t1.milestoneId === condition.milestoneId.get.get.bind, condition.milestoneId.flatten.isDefined) && (t1.milestoneId.? isEmpty, condition.milestone == Some(None)) && (t1.assignedUserName === condition.assigned.get.bind, condition.assigned.isDefined) && (t1.openedUserName === condition.author.get.bind, condition.author.isDefined) && (t1.pullRequest === pullRequest.bind) && // Milestone filter (Milestones filter { t2 => (t2.byPrimaryKey(t1.userName, t1.repositoryName, t1.milestoneId)) && (t2.title === condition.milestone.get.get.bind) } exists, condition.milestone.flatten.isDefined) && // Label filter (IssueLabels filter { t2 => (t2.byIssue(t1.userName, t1.repositoryName, t1.issueId)) && (t2.labelId in (Labels filter { t3 => (t3.byRepository(t1.userName, t1.repositoryName)) && (t3.labelName inSetBind condition.labels) } map(_.labelId))) } exists, condition.labels.nonEmpty) && // Visibility filter (Repositories filter { t2 => (t2.byRepository(t1.userName, t1.repositoryName)) && (t2.isPrivate === (condition.visibility == Some("private")).bind) } exists, condition.visibility.nonEmpty) && // Organization (group) filter (t1.userName inSetBind condition.groups, condition.groups.nonEmpty) && // Mentioned filter ((t1.openedUserName === condition.mentioned.get.bind) || t1.assignedUserName === condition.mentioned.get.bind || (IssueComments filter { t2 => (t2.byIssue(t1.userName, t1.repositoryName, t1.issueId)) && (t2.commentedUserName === condition.mentioned.get.bind) } exists), condition.mentioned.isDefined) } def createIssue(owner: String, repository: String, loginUser: String, title: String, content: Option[String], assignedUserName: Option[String], milestoneId: Option[Int], isPullRequest: Boolean = false)(implicit s: Session) = // next id number sql"SELECT ISSUE_ID + 1 FROM ISSUE_ID WHERE USER_NAME = $owner AND REPOSITORY_NAME = $repository FOR UPDATE".as[Int] .firstOption.filter { id => Issues insert Issue( owner, repository, id, loginUser, milestoneId, assignedUserName, title, content, false, currentDate, currentDate, isPullRequest) // increment issue id IssueId .filter (_.byPrimaryKey(owner, repository)) .map (_.issueId) .update (id) > 0 } get def registerIssueLabel(owner: String, repository: String, issueId: Int, labelId: Int)(implicit s: Session) = IssueLabels insert IssueLabel(owner, repository, issueId, labelId) def deleteIssueLabel(owner: String, repository: String, issueId: Int, labelId: Int)(implicit s: Session) = IssueLabels filter(_.byPrimaryKey(owner, repository, issueId, labelId)) delete def createComment(owner: String, repository: String, loginUser: String, issueId: Int, content: String, action: String)(implicit s: Session): Int = IssueComments.autoInc insert IssueComment( userName = owner, repositoryName = repository, issueId = issueId, action = action, commentedUserName = loginUser, content = content, registeredDate = currentDate, updatedDate = currentDate) def updateIssue(owner: String, repository: String, issueId: Int, title: String, content: Option[String])(implicit s: Session) = Issues .filter (_.byPrimaryKey(owner, repository, issueId)) .map { t => (t.title, t.content.?, t.updatedDate) } .update (title, content, currentDate) def updateAssignedUserName(owner: String, repository: String, issueId: Int, assignedUserName: Option[String])(implicit s: Session) = Issues.filter (_.byPrimaryKey(owner, repository, issueId)).map(_.assignedUserName?).update (assignedUserName) def updateMilestoneId(owner: String, repository: String, issueId: Int, milestoneId: Option[Int])(implicit s: Session) = Issues.filter (_.byPrimaryKey(owner, repository, issueId)).map(_.milestoneId?).update (milestoneId) def updateComment(commentId: Int, content: String)(implicit s: Session) = IssueComments .filter (_.byPrimaryKey(commentId)) .map { t => t.content -> t.updatedDate } .update (content, currentDate) def deleteComment(commentId: Int)(implicit s: Session) = IssueComments filter (_.byPrimaryKey(commentId)) delete def updateClosed(owner: String, repository: String, issueId: Int, closed: Boolean)(implicit s: Session) = Issues .filter (_.byPrimaryKey(owner, repository, issueId)) .map { t => t.closed -> t.updatedDate } .update (closed, currentDate) /** * Search issues by keyword. * * @param owner the repository owner * @param repository the repository name * @param query the keywords separated by whitespace. * @return issues with comment count and matched content of issue or comment */ def searchIssuesByKeyword(owner: String, repository: String, query: String) (implicit s: Session): List[(Issue, Int, String)] = { import slick.driver.JdbcDriver.likeEncode val keywords = splitWords(query.toLowerCase) // Search Issue val issues = Issues .filter(_.byRepository(owner, repository)) .innerJoin(IssueOutline).on { case (t1, t2) => t1.byIssue(t2.userName, t2.repositoryName, t2.issueId) } .filter { case (t1, t2) => keywords.map { keyword => (t1.title.toLowerCase like (s"%${likeEncode(keyword)}%", '^')) || (t1.content.toLowerCase like (s"%${likeEncode(keyword)}%", '^')) } .reduceLeft(_ && _) } .map { case (t1, t2) => (t1, 0, t1.content.?, t2.commentCount) } // Search IssueComment val comments = IssueComments .filter(_.byRepository(owner, repository)) .innerJoin(Issues).on { case (t1, t2) => t1.byIssue(t2.userName, t2.repositoryName, t2.issueId) } .innerJoin(IssueOutline).on { case ((t1, t2), t3) => t2.byIssue(t3.userName, t3.repositoryName, t3.issueId) } .filter { case ((t1, t2), t3) => keywords.map { query => t1.content.toLowerCase like (s"%${likeEncode(query)}%", '^') }.reduceLeft(_ && _) } .map { case ((t1, t2), t3) => (t2, t1.commentId, t1.content.?, t3.commentCount) } issues.union(comments).sortBy { case (issue, commentId, _, _) => issue.issueId -> commentId }.list.splitWith { case ((issue1, _, _, _), (issue2, _, _, _)) => issue1.issueId == issue2.issueId }.map { _.head match { case (issue, _, content, commentCount) => (issue, commentCount, content.getOrElse("")) } }.toList } def closeIssuesFromMessage(message: String, userName: String, owner: String, repository: String)(implicit s: Session) = { extractCloseId(message).foreach { issueId => for(issue <- getIssue(owner, repository, issueId) if !issue.closed){ createComment(owner, repository, userName, issue.issueId, "Close", "close") updateClosed(owner, repository, issue.issueId, true) } } } } object IssuesService { import javax.servlet.http.HttpServletRequest val IssueLimit = 25 case class IssueSearchCondition( labels: Set[String] = Set.empty, milestone: Option[Option[String]] = None, author: Option[String] = None, assigned: Option[String] = None, mentioned: Option[String] = None, state: String = "open", sort: String = "created", direction: String = "desc", visibility: Option[String] = None, groups: Set[String] = Set.empty){ def isEmpty: Boolean = { labels.isEmpty && milestone.isEmpty && author.isEmpty && assigned.isEmpty && state == "open" && sort == "created" && direction == "desc" && visibility.isEmpty } def nonEmpty: Boolean = !isEmpty def toFilterString: String = ( List( Some(s"is:${state}"), author.map(author => s"author:${author}"), assigned.map(assignee => s"assignee:${assignee}"), mentioned.map(mentioned => s"mentions:${mentioned}") ).flatten ++ labels.map(label => s"label:${label}") ++ List( milestone.map { _ match { case Some(x) => s"milestone:${x}" case None => "no:milestone" }}, (sort, direction) match { case ("created" , "desc") => None case ("created" , "asc" ) => Some("sort:created-asc") case ("comments", "desc") => Some("sort:comments-desc") case ("comments", "asc" ) => Some("sort:comments-asc") case ("updated" , "desc") => Some("sort:updated-desc") case ("updated" , "asc" ) => Some("sort:updated-asc") }, visibility.map(visibility => s"visibility:${visibility}") ).flatten ++ groups.map(group => s"group:${group}") ).mkString(" ") def toURL: String = "?" + List( if(labels.isEmpty) None else Some("labels=" + urlEncode(labels.mkString(","))), milestone.map { _ match { case Some(x) => "milestone=" + urlEncode(x) case None => "milestone=none" }}, author .map(x => "author=" + urlEncode(x)), assigned .map(x => "assigned=" + urlEncode(x)), mentioned.map(x => "mentioned=" + urlEncode(x)), Some("state=" + urlEncode(state)), Some("sort=" + urlEncode(sort)), Some("direction=" + urlEncode(direction)), visibility.map(x => "visibility=" + urlEncode(x)), if(groups.isEmpty) None else Some("groups=" + urlEncode(groups.mkString(","))) ).flatten.mkString("&") } object IssueSearchCondition { private def param(request: HttpServletRequest, name: String, allow: Seq[String] = Nil): Option[String] = { val value = request.getParameter(name) if(value == null || value.isEmpty || (allow.nonEmpty && !allow.contains(value))) None else Some(value) } /** * Restores IssueSearchCondition instance from filter query. */ def apply(filter: String, milestones: Map[String, Int]): IssueSearchCondition = { val conditions = filter.split("[  \\t]+").flatMap { x => x.split(":") match { case Array(key, value) => Some((key, value)) case _ => None } }.groupBy(_._1).map { case (key, values) => key -> values.map(_._2).toSeq } val (sort, direction) = conditions.get("sort").flatMap(_.headOption).getOrElse("created-desc") match { case "created-asc" => ("created" , "asc" ) case "comments-desc" => ("comments", "desc") case "comments-asc" => ("comments", "asc" ) case "updated-desc" => ("comments", "desc") case "updated-asc" => ("comments", "asc" ) case _ => ("created" , "desc") } IssueSearchCondition( conditions.get("label").map(_.toSet).getOrElse(Set.empty), conditions.get("milestone").flatMap(_.headOption) match { case None => None case Some("none") => Some(None) case Some(x) => Some(Some(x)) //milestones.get(x).map(x => Some(x)) }, conditions.get("author").flatMap(_.headOption), conditions.get("assignee").flatMap(_.headOption), conditions.get("mentions").flatMap(_.headOption), conditions.get("is").getOrElse(Seq.empty).find(x => x == "open" || x == "closed").getOrElse("open"), sort, direction, conditions.get("visibility").flatMap(_.headOption), conditions.get("group").map(_.toSet).getOrElse(Set.empty) ) } /** * Restores IssueSearchCondition instance from request parameters. */ def apply(request: HttpServletRequest): IssueSearchCondition = IssueSearchCondition( param(request, "labels").map(_.split(",").toSet).getOrElse(Set.empty), param(request, "milestone").map { case "none" => None case x => Some(x) }, param(request, "author"), param(request, "assigned"), param(request, "mentioned"), param(request, "state", Seq("open", "closed")).getOrElse("open"), param(request, "sort", Seq("created", "comments", "updated")).getOrElse("created"), param(request, "direction", Seq("asc", "desc")).getOrElse("desc"), param(request, "visibility"), param(request, "groups").map(_.split(",").toSet).getOrElse(Set.empty) ) def page(request: HttpServletRequest) = try { val i = param(request, "page").getOrElse("1").toInt if(i <= 0) 1 else i } catch { case e: NumberFormatException => 1 } } case class CommitStatusInfo(count: Int, successCount: Int, context: Option[String], state: Option[CommitState], targetUrl: Option[String], description: Option[String]) case class IssueInfo(issue: Issue, labels: List[Label], milestone: Option[String], commentCount: Int, status:Option[CommitStatusInfo]) }
marklacroix/gitbucket
src/main/scala/gitbucket/core/service/IssuesService.scala
Scala
apache-2.0
23,268
package lila.round import akka.stream.OverflowStrategy import akka.stream.scaladsl._ import chess.Color import chess.format.Forsyth import chess.{ Centis, Replay } import play.api.libs.json._ import scala.concurrent.ExecutionContext import lila.common.Bus import lila.game.actorApi.FinishGame import lila.game.actorApi.MoveGameEvent import lila.game.{ Game, GameRepo } final class ApiMoveStream(gameRepo: GameRepo, gameJsonView: lila.game.JsonView)(implicit ec: ExecutionContext ) { def apply(game: Game, delayMoves: Boolean): Source[JsObject, _] = Source futureSource { val hasMoveDelay = delayMoves && game.hasClock val delayMovesBy = hasMoveDelay ?? 3 val delayKeepsFirstMoves = hasMoveDelay ?? 5 gameRepo.initialFen(game) map { initialFen => val buffer = scala.collection.mutable.Queue.empty[JsObject] var moves = 0 Source(List(gameJsonView(game, initialFen))) concat Source .queue[JsObject](16, akka.stream.OverflowStrategy.dropHead) .statefulMapConcat { () => js => moves += 1 if (game.finished || moves <= delayKeepsFirstMoves) List(js) else { buffer.enqueue(js) (buffer.size > delayMovesBy) ?? List(buffer.dequeue()) } } .mapMaterializedValue { queue => val clocks = ~(for { clk <- game.clock times <- game.bothClockStates } yield Vector(clk.config.initTime, clk.config.initTime) ++ times) val clockOffset = game.startColor.fold(0, 1) Replay.situations(game.pgnMoves, initialFen, game.variant) foreach { _.zipWithIndex foreach { case (s, index) => val clk = for { white <- clocks.lift(index + 1 - clockOffset) black <- clocks.lift(index - clockOffset) } yield (white, black) queue offer toJson( Forsyth exportBoard s.board, s.color, s.board.history.lastMove.map(_.uci), clk ) } } if (game.finished) { queue offer gameJsonView(game, initialFen) queue.complete() } else { val chans = List(MoveGameEvent makeChan game.id, "finishGame") val sub = Bus.subscribeFun(chans: _*) { case MoveGameEvent(g, fen, move) => queue.offer(toJson(g, fen, move.some)).unit case FinishGame(g, _, _) if g.id == game.id => queue offer gameJsonView(g, initialFen) (1 to buffer.size) foreach { _ => queue.offer(Json.obj()) } // push buffer content out queue.complete() } queue.watchCompletion() dforeach { _ => Bus.unsubscribe(sub, chans) } } } } } private def toJson(game: Game, fen: String, lastMoveUci: Option[String]): JsObject = toJson( fen, game.turnColor, lastMoveUci, game.clock.map { clk => (clk.remainingTime(chess.White), clk.remainingTime(chess.Black)) } ) private def toJson( boardFen: String, turnColor: Color, lastMoveUci: Option[String], clock: Option[(Centis, Centis)] ): JsObject = clock.foldLeft( Json .obj("fen" -> s"$boardFen ${turnColor.letter}") .add("lm" -> lastMoveUci) ) { case (js, clk) => js ++ Json.obj("wc" -> clk._1.roundSeconds, "bc" -> clk._2.roundSeconds) } }
luanlv/lila
modules/round/src/main/ApiMoveStream.scala
Scala
mit
3,758
/* * Copyright 2012-2014 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.comcast.xfinity.sirius.uberstore.data import com.comcast.xfinity.sirius.api.impl.OrderedEvent import com.comcast.xfinity.sirius.uberstore.common.Fnv1aChecksummer import scala.annotation.tailrec object UberDataFile { /** * Create a fully wired UberDataFile * * Uses: * - UberStoreBinaryFileOps for file operations * - Fnv1aChecksummer for checksums * - BinaryEventCodec for encoding events * * @param dataFileName the data file name, this will * be created if it does not exist * * @return fully constructed UberDataFile */ def apply(dataFileName: String, fileHandleFactory: UberDataFileHandleFactory): UberDataFile = { val fileOps = new UberStoreBinaryFileOps with Fnv1aChecksummer val codec = new BinaryEventCodec new UberDataFile(dataFileName, fileHandleFactory, fileOps, codec) } } /** * Lower level file access for UberStore data files. * * @param fileHandleFactory the UberDataFile.UberFileDesc to provide handles * to the underlying file. Extracted out for testing. * @param fileOps service class providing low level file operations * @param codec OrderedEventCodec for transforming OrderedEvents */ // TODO: use trait to hide this constructor but keep type visible? private[uberstore] class UberDataFile(dataFileName: String, fileHandleFactory: UberDataFileHandleFactory, fileOps: UberStoreFileOps, codec: OrderedEventCodec) { val writeHandle: UberDataFileWriteHandle = fileHandleFactory.createWriteHandle(dataFileName) var isClosed = false /** * Write an event to this file * * @param event the OrderedEvent to persist */ def writeEvent(event: OrderedEvent): Long = { if (isClosed) { throw new IllegalStateException("Attempting to write to closed UberDataFile") } fileOps.put(writeHandle, codec.serialize(event)) } /** * Fold left over this entire file * * @param acc0 initial accumulator value * @param foldFun fold function */ def foldLeft[T](acc0: T)(foldFun: (T, Long, OrderedEvent) => T): T = { foldLeftRange(0, Long.MaxValue)(acc0)(foldFun) } /** * Fold left starting at baseOffset, until the file pointer is at or beyond endOff. * * The caller is expected to put in a sane baseOff which corresponds with the start * of an actual event. * * This is a low low level API function that should not be taken lightly * * @param baseOff starting offset in the file to start at * @param endOff offset at or after which the operation should conclude, inclusive * @param acc0 initial accumulator value * @param foldFun the fold function * * @return T the final accumulator value */ def foldLeftRange[T](baseOff: Long, endOff: Long)(acc0: T)(foldFun: (T, Long, OrderedEvent) => T): T = { val readHandle = fileHandleFactory.createReadHandle(dataFileName, baseOff) try { foldLeftUntil(readHandle, endOff, acc0, foldFun) } finally { readHandle.close() } } // private low low low level fold left @tailrec private def foldLeftUntil[T](readHandle: UberDataFileReadHandle, maxOffset: Long, acc: T, foldFun: (T, Long, OrderedEvent) => T): T = { val offset = readHandle.offset() if (offset > maxOffset) { acc } else { fileOps.readNext(readHandle) match { case None => acc case Some(bytes) => val accNew = foldFun(acc, offset, codec.deserialize(bytes)) foldLeftUntil(readHandle, maxOffset, accNew, foldFun) } } } /** * Close open file handles. Only touching writeHandle here, since readHandles are opened and then * closed in a finally of the same block. This UberDataFile should not be used after close is called. */ def close(): Unit = { if (!isClosed) { writeHandle.close() isClosed = true } } override def finalize(): Unit = { close() } }
Comcast/sirius
src/main/scala/com/comcast/xfinity/sirius/uberstore/data/UberDataFile.scala
Scala
apache-2.0
4,665
package org.apache.mesos.chronos.scheduler.jobs import org.apache.mesos.chronos.scheduler.graph.JobGraph import org.apache.mesos.chronos.scheduler.state.PersistenceStore import org.joda.time._ import org.specs2.mock._ import org.specs2.mutable._ class JobSchedulerSpec extends SpecificationWithJUnit with Mockito { import MockJobUtils._ //TODO(FL): Write more specs for the REST framework. "JobScheduler" should { "Construct a task for a given time when the schedule is within epsilon" in { val epsilon = Minutes.minutes(1).toPeriod val jobName = "FOO" val schedule = Schedule.parse("R1/2012-01-01T00:00:01.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val singleJobStream = new ScheduleStream(jobName, schedule) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val horizon = Minutes.minutes(10).toPeriod //TODO(FL): Mock the DispatchQueue here and below val schedules = mockScheduler(horizon, null, mockGraph) val (task1, stream1) = schedules.next(new DateTime("2012-01-01T00:00:00.000Z"), singleJobStream) task1.get.due must beEqualTo(DateTime.parse("2012-01-01T00:00:01.000Z")) val (task2, stream2) = schedules.next(new DateTime("2012-01-01T00:01:00.000Z"), stream1.get) task2 must beNone stream2 must beNone } "Ignore a task that has been due past epsilon" in { val epsilon = Minutes.minutes(1).toPeriod val jobName = "FOO" val schedule = Schedule.parse("R1/2012-01-01T00:00:01.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val singleJobStream = new ScheduleStream(jobName, schedule) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val horizon = Minutes.minutes(10).toPeriod val schedules = mockScheduler(horizon, null, mockGraph) val (task1, stream1) = schedules.next(new DateTime("2012-01-01T00:01:01.000Z"), singleJobStream) task1 must beNone stream1 must beNone } "Get an empty stream if no-op schedule is given" in { val epsilon = Minutes.minutes(1).toPeriod val jobName = "FOO" val schedule: Schedule = Schedule.parse("R0/2012-01-01T00:00:01.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val singleJobStream = new ScheduleStream(jobName, schedule) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val horizon = Minutes.minutes(10).toPeriod val schedules = mockScheduler(horizon, null, mockGraph) val (task1, stream1) = schedules.next(new DateTime("2012-01-01T00:01:01.000Z"), singleJobStream) task1 must beNone stream1 must beNone } "Old schedule stream is removed" in { val epsilon = Minutes.minutes(1).toPeriod val jobName = "FOO" val schedule: Schedule = Schedule.parse("R1/2012-01-01T00:00:01.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val singleJobStream = new ScheduleStream(jobName, schedule) val horizon = Minutes.minutes(10).toPeriod val scheduler = mockScheduler(horizon, null, mockGraph) val newScheduleStreams = scheduler.iteration(DateTime.parse("2012-01-02T00:01:01.000Z"), List(singleJobStream)) newScheduleStreams.size must_== 0 } //"This is really not a unit test but an integration test! "Old schedule streams are removed but newer ones are kept" in { val epsilon = Seconds.seconds(20).toPeriod val jobName = "FOO" val schedule: Schedule = Schedule.parse("R10/2012-01-01T00:00:00.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) //2012-01-01T00:00:00.000 -> 2012-01-01T00:09:00.000 val jobStream = new ScheduleStream(jobName, schedule) // 1st planned invocation @ 2012-01-01T00:00:00.000Z (missed) // 2nd planned invocation @ 2012-01-01T00:01:00.000Z (executed) // 3rd planned invocation @ 2012-01-01T00:02:00.000Z (scheduled) // 4th planned invocation @ 2012-01-01T00:03:00.000Z (scheduled) // 5th planned invocation @ 2012-01-01T00:04:00.000Z (scheduled) // 6th planned invocation @ 2012-01-01T00:05:00.000Z (scheduled) // 7th planned invocation @ 2012-01-01T00:06:00.000Z (scheduled) // 8th planned invocation @ 2012-01-01T00:07:00.000Z (ahead of schedule horizon) val horizon = Minutes.minutes(5).toPeriod val mockTaskManager = mock[TaskManager] val mockGraph = mock[JobGraph] val mockPersistenceStore = mock[PersistenceStore] mockGraph.lookupVertex(jobName).returns(Some(job1)) val scheduler = mockScheduler(horizon, mockTaskManager, mockGraph) // First one passed, next invocation is 01:01 (b/c of 20 second epsilon) // Horizon is 5 minutes, so lookforward until 00:06:01.000Z val newScheduleStreams = scheduler.iteration(DateTime.parse("2012-01-01T00:01:01.000Z"), List(jobStream)) val (newJobName, newSchedule) = newScheduleStreams.head.head newSchedule.invocationTime must_== DateTime.parse("2012-01-01T00:07:00.000Z") there were 6.times(mockTaskManager).scheduleDelayedTask(any[ScheduledTask], anyLong, any[Boolean]) } "Future task beyond time-horizon should not be scheduled" in { val epsilon = Seconds.seconds(60).toPeriod val jobName = "FOO" val schedule: Schedule = Schedule.parse("R10/2012-01-10T00:00:00.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val jobStream = new ScheduleStream(jobName, schedule) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val horizon = Minutes.minutes(60).toPeriod val scheduler = mockScheduler(horizon, null, mockGraph) val newScheduleStreams = scheduler.iteration(DateTime.parse("2012-01-01T00:01:01.000Z"), List(jobStream)) val (newJobName, newSchedule) = newScheduleStreams.head.head newSchedule.invocationTime must_== DateTime.parse("2012-01-10T00:00:00.000Z") } "Multiple tasks must be scheduled if they're within epsilon and before time-horizon" in { val epsilon = Minutes.minutes(5).toPeriod val jobName = "FOO" val schedule: Schedule = Schedule.parse("R60/2012-01-01T00:00:00.000Z/PT1S").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val jobStream = new ScheduleStream(jobName, schedule) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val horizon = Minutes.minutes(1).toPeriod val mockTaskManager = mock[TaskManager] val scheduler = mockScheduler(horizon, mockTaskManager, mockGraph) val newScheduleStreams = scheduler.iteration(DateTime.parse("2012-01-01T00:01:00.000Z"), List(jobStream)) newScheduleStreams.size must beEqualTo(0) there were 60.times(mockTaskManager).scheduleDelayedTask(any[ScheduledTask], anyLong, any[Boolean]) } "Infinite task must be scheduled" in { val epsilon = Seconds.seconds(60).toPeriod val jobName = "FOO" val schedule: Schedule = Schedule.parse("R/2012-01-01T00:00:00.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(scheduleData = schedule, name = jobName, command = "", epsilon = epsilon) val jobStream = new ScheduleStream(jobName, schedule) val horizon = Seconds.seconds(30).toPeriod val mockTaskManager = mock[TaskManager] val mockGraph = mock[JobGraph] mockGraph.lookupVertex(jobName).returns(Some(job1)) val scheduler = mockScheduler(horizon, mockTaskManager, mockGraph) val newScheduleStreams = scheduler.iteration(DateTime.parse("2012-01-01T00:01:01.000Z"), List(jobStream)) there was one(mockTaskManager).scheduleDelayedTask(any[ScheduledTask], any[Long], any[Boolean]) } //TODO(FL): Write test that ensures that other tasks don't cause a stackoverflow "Job scheduler sans streams is empty" in { val scheduler = mockScheduler(Period.hours(1), null, null) val newScheduleStreams = scheduler.iteration(DateTime.parse("2012-01-01T00:01:00.000Z"), List()) newScheduleStreams.size must beEqualTo(0) } // TODO(FL): The interfaces have changed, this unit test needs to be added back in! // "New schedules can be added to JobScheduler" in { // val horizon = Seconds.seconds(30).toPeriod // val queue = mock[DispatchQueue] // val mockTaskManager = mock[TaskManager] // val jobGraph = mock[JobGraph] // val scheduler = new JobScheduler(horizon, queue, mockTaskManager, jobGraph, mock[PersistenceStore]) // val newScheduleStreams = scheduler.(DateTime.parse("2012-01-01T00:00:00.000Z"), List()) // newScheduleStreams.size must beEqualTo(0) // val newScheduler1 = scheduler.addSchedule("R1/2012-01-01T01:00:00.000Z/PT1M", new BaseJob("foo", Period.minutes(5))) // val newScheduler2 = newScheduler1.addSchedule("R1/2012-01-01T02:00:00.000Z/PT1M", new BaseJob("bar", Period.minutes(5))) // val updatedScheduleStreams = newScheduler2.checkAndSchedule(DateTime.parse("2012-01-01T00:01:00.000Z")) // updatedScheduleStreams.size must beEqualTo(0) // //TODO(FL): Implement a test verifying that the jobs have launched // //newScheduler2.numberOfScheduledJobs.get() must_== 2 // } } "Removing tasks must also remove the streams" in { val epsilon = Seconds.seconds(60).toPeriod val schedule = Schedule.parse("R/2012-01-01T00:00:00.000Z/PT1M").get val job1 = new InternalScheduleBasedJob(schedule, "FOO", "CMD", epsilon) val job2 = new InternalScheduleBasedJob(schedule, "BAR", "CMD", epsilon) val horizon = Seconds.seconds(30).toPeriod val mockTaskManager = mock[TaskManager] val jobGraph = mock[JobGraph] val store = mock[PersistenceStore] store.getTaskIds(Some(anyString)).returns(List()) jobGraph.lookupVertex(job1.name).returns(Some(job1)) jobGraph.lookupVertex(job2.name).returns(Some(job2)) jobGraph.getChildren(job2.name).returns(List()) val scheduler = mockScheduler(horizon, mockTaskManager, jobGraph, store) scheduler.leader.set(true) scheduler.registerJob(job1, persist = false, DateTime.parse("2012-01-01T00:00:05.000Z")) scheduler.registerJob(job2, persist = false, DateTime.parse("2012-01-01T00:00:10.000Z")) val res1: List[ScheduleStream] = scheduler.iteration(DateTime.parse("2012-01-01T00:00:00.000Z"), scheduler.streams) scheduler.deregisterJob(job2, persist = false) val res2: List[ScheduleStream] = scheduler.iteration(DateTime.parse("2012-01-01T00:05:00.000Z"), scheduler.streams) res2.size must_== 1 res2.head.jobName must_== job1.name } "Job scheduler persists job state after runs" in { val store = mock[PersistenceStore] val epsilon = Seconds.seconds(1).toPeriod val jobName = "FOO" val jobCmd = "BARCMD" val schedule = Schedule.parse("R/2012-01-01T00:00:00.000Z/PT1S").get val job1 = new InternalScheduleBasedJob(schedule, jobName, jobCmd, epsilon) val mockGraph = mock[JobGraph] mockGraph.lookupVertex(job1.name).returns(Some(job1)) val jobStream = new ScheduleStream(jobName, Schedule.parse("R/2012-01-01T00:00:00.000Z/PT1S").get) val scheduler = mockScheduler(Period.hours(1), mock[TaskManager], mockGraph, store) scheduler.leader.set(true) val startTime = DateTime.parse("2012-01-01T00:00:00.000Z") var t: DateTime = startTime var stream = scheduler.iteration(startTime, List(jobStream)) t = t.plus(Period.millis(1).toPeriod) stream = scheduler.iteration(t, stream) val nextSchedule: Schedule = schedule.copy(invocationTime = schedule.invocationTime.plus(schedule.period), offset = schedule.offset + 1) val job2 = new InternalScheduleBasedJob(nextSchedule, jobName, jobCmd, epsilon, 0) there was one(store).persistJob(job2) there was one(mockGraph).replaceVertex(job1, job2) } "Missed executions have to be skipped" in { val epsilon = Seconds.seconds(60).toPeriod val job1 = new InternalScheduleBasedJob(Schedule.parse("R5/2012-01-01T00:00:00.000Z/P1D").get, "job1", "CMD", epsilon) val mockTaskManager = mock[TaskManager] val jobGraph = new JobGraph val mockPersistenceStore = mock[PersistenceStore] val scheduler = mockScheduler(epsilon, mockTaskManager, jobGraph, mockPersistenceStore) val startTime = DateTime.parse("2012-01-03T00:00:00.000Z") scheduler.leader.set(true) scheduler.registerJob(job1, persist = true, startTime) val newStreams = scheduler.iteration(startTime, scheduler.streams) newStreams.head.schedule.invocationTime must_== Schedule.parse("R2/2012-01-04T00:00:00.000Z/P1D").get.invocationTime } }
BoopBoopBeepBoop/chronos
src/test/scala/org/apache/mesos/chronos/scheduler/jobs/JobSchedulerSpec.scala
Scala
apache-2.0
13,451
/* * The MIT License (MIT) * <p> * Copyright (c) 2020 * <p> * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * <p> * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * <p> * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package io.techcode.streamy.util.monitor import akka.actor.{PoisonPill, Props} import akka.testkit.TestProbe import io.techcode.streamy.StreamyTestSystem import io.techcode.streamy.config.StreamyConfig import io.techcode.streamy.event.MonitorEvent import io.techcode.streamy.event.MonitorEvent.Jvm.{BufferPool, GarbageCollector} import io.techcode.streamy.util.monitor.GarbageCollectorMonitorSpec.GarbageCollectorMonitorImpl import scala.concurrent.duration._ import scala.language.postfixOps /** * Garbage collector monitoring spec. */ class GarbageCollectorMonitorSpec extends StreamyTestSystem { "Garbage collector monitoring" can { "be started and stopped" in { val garbageCollectorMonitor = system.actorOf(Props(classOf[GarbageCollectorMonitor], StreamyConfig.GarbageCollectorMonitor( enabled = true, 10 minutes, 50, 25, 10 ))) val probe = TestProbe() probe watch garbageCollectorMonitor garbageCollectorMonitor ! PoisonPill probe.expectTerminated(garbageCollectorMonitor) } "monitor correctly process" in { system.eventStream.subscribe(testActor, classOf[MonitorEvent.GarbageCollectorOverhead]) val garbageCollectorMonitor = system.actorOf(Props(classOf[GarbageCollectorMonitorImpl], StreamyConfig.GarbageCollectorMonitor( enabled = true, 0.second, 50, 25, 10 ))) garbageCollectorMonitor ! GarbageCollectorMonitorSpec.Heartbeat expectMsg(GarbageCollectorMonitorSpec.Heartbeat) system.eventStream.publish(MonitorEvent.Jvm( timestamp = System.currentTimeMillis(), uptime = 986, memHeapUsed = 1126816, memHeapCommitted = 264241152, memHeapMax = 4188012544L, memNonHeapCommitted = 34865152, memNonHeapUsed = 30531400, thread = 11, threadPeak = 11, classLoaded = 4019, classLoadedTotal = 4019, classUnloaded = 0, bufferPools = Seq( BufferPool("mapped", 0, 0, 0), BufferPool("direct", 1, 8192, 8192) ), garbageCollectors = Seq( GarbageCollector("G1 Young Generation", 2, 11), GarbageCollector("G1 Old Generation", 0, 0) ) )) system.eventStream.publish(MonitorEvent.Jvm( timestamp = System.currentTimeMillis() + 100, uptime = 986, memHeapUsed = 1126816, memHeapCommitted = 264241152, memHeapMax = 4188012544L, memNonHeapCommitted = 34865152, memNonHeapUsed = 30531400, thread = 11, threadPeak = 11, classLoaded = 4019, classLoadedTotal = 4019, classUnloaded = 0, bufferPools = Seq( BufferPool("mapped", 0, 0, 0), BufferPool("direct", 1, 8192, 8192) ), garbageCollectors = Seq( GarbageCollector("G1 Young Generation", 3, 12), GarbageCollector("G1 Old Generation", 0, 0) ) )) expectMsgClass(classOf[MonitorEvent.GarbageCollectorOverhead]) } } } /** * Garbage collector monitor spec. */ object GarbageCollectorMonitorSpec { case object Heartbeat // Implement heartbeat class GarbageCollectorMonitorImpl( conf: StreamyConfig.GarbageCollectorMonitor ) extends GarbageCollectorMonitor(conf) { override def receive: Receive = { case _: Heartbeat.type => sender() ! Heartbeat case v => super.receive(v) } } }
amannocci/streamy
core/src/test/scala/io/techcode/streamy/util/monitor/GarbageCollectorMonitorSpec.scala
Scala
mit
4,603
package scorex.network import akka.actor.Props import scorex.ActorTestingCommons import scorex.block.Block._ import scorex.network.BlockchainSynchronizer.{InnerId, InnerIds} import scorex.settings.SettingsMock import scorex.transaction.History import scala.language.implicitConversions class HistoryReplierSpecification extends ActorTestingCommons { private implicit def toInnerIds(ids: Seq[Int]): InnerIds = ids.map { i => InnerId(toBlockId(i)) } private def mockHistory(blockIds: InnerIds): History = { val history = mock[History] history.lookForward _ expects (*,*) onCall { (parentSignature, howMany) => blockIds.dropWhile(_ != InnerId(parentSignature)).slice(1, howMany + 1).map(_.blockId) } anyNumberOfTimes() history } private object TestSettings extends SettingsMock { override lazy val maxChain = 5 } private val lastHistoryBlockId = 20 private val h = mockHistory(1 to lastHistoryBlockId) private trait App extends ApplicationMock { override lazy val settings = TestSettings override lazy val history: History = h } private val app = stub[App] import app.basicMessagesSpecsRepo._ // according to the protocol ids come in reverse order! private def sendSignatures(lastBlockId: Int, blockId: Int): Unit = dataFromNetwork(GetSignaturesSpec, (blockId to lastBlockId).reverse: BlockIds) private def expectedSignaturesSpec(blockIds: Seq[Int]): Unit = expectNetworkMessage(SignaturesSpec, blockIds) override protected val actorRef = system.actorOf(Props(classOf[HistoryReplier], app)) testSafely { "return block signatures" in { val last = 10 sendSignatures(last, 8) // according to the protocol ids come in reverse order! expectedSignaturesSpec(last to last + TestSettings.maxChain) } "history contains less block signatures than requested" in { sendSignatures(18, 16) expectedSignaturesSpec(18 to lastHistoryBlockId) } "last two block ids" in { sendSignatures(lastHistoryBlockId, lastHistoryBlockId - 1) expectedSignaturesSpec(lastHistoryBlockId - 1 to lastHistoryBlockId) } "no reply in case of last or non-exising block id" in { sendSignatures(lastHistoryBlockId, lastHistoryBlockId) sendSignatures(lastHistoryBlockId + 10, lastHistoryBlockId + 5) networkController.expectNoMsg(testDuration) } } }
B83YPoj/Waves
src/test/scala/scorex/network/HistoryReplierSpecification.scala
Scala
apache-2.0
2,395
package play.core.server.websocket import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.channel.Channel import java.nio.charset.Charset import org.jboss.netty.util.CharsetUtil import org.jboss.netty.handler.codec.http.websocketx.WebSocketFrame import org.jboss.netty.handler.codec.http.websocketx.TextWebSocketFrame import org.jboss.netty.handler.codec.http.websocketx.BinaryWebSocketFrame case class FrameFormatter[A](toFrame: A => WebSocketFrame, fromFrame: PartialFunction[WebSocketFrame, A]) extends play.api.mvc.WebSocket.FrameFormatter[A] { def transform[B](fba: B => A, fab: A => B): FrameFormatter[B] = { FrameFormatter[B]( toFrame.compose(fba), fromFrame.andThen(fab)) } } object Frames { val textFrame = FrameFormatter[String]( str => new TextWebSocketFrame(true, 0, str), { case frame: TextWebSocketFrame => frame.getText }) val binaryFrame = FrameFormatter[Array[Byte]]( bytes => new BinaryWebSocketFrame(true, 0, org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer(bytes)), { case frame: BinaryWebSocketFrame => frame.getBinaryData().array() }) val mixedFrame = FrameFormatter[Either[String, Array[Byte]]]( stringOrBytes => { stringOrBytes.fold( str => new TextWebSocketFrame(true, 0, str), bytes => new BinaryWebSocketFrame(true, 0, org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer(bytes)) ) }, { case frame: TextWebSocketFrame => Left(frame.getText) case frame: BinaryWebSocketFrame => Right(frame.getBinaryData.array) } ) }
michaelahlers/team-awesome-wedding
vendor/play-2.2.1/framework/src/play/src/main/scala/play/core/server/websocket/Frame.scala
Scala
mit
1,567
package chrome.webRequest.bindings import scala.scalajs.js @js.native trait WebRequestBodyDetails extends WebRequestDetails { /** * Contains the HTTP request body data. Only provided if extraInfoSpec contains 'requestBody'. * @since Chrome 23. */ val requestBody: WebRequestBody = js.native }
lucidd/scala-js-chrome
bindings/src/main/scala/chrome/webRequest/bindings/WebRequestBodyDetails.scala
Scala
mit
311
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.filter.expression import java.util.Date import org.locationtech.geomesa.utils.geotools.converters.FastConverter import org.opengis.filter.FilterVisitor import org.opengis.filter.MultiValuedFilter.MatchAction import org.opengis.filter.expression.{Expression, Literal} import org.opengis.filter.temporal.{After, Before, BinaryTemporalOperator, During} import org.opengis.temporal.Period /** * Fast temporal filters that avoid repeatedly evaluating literals */ object FastTemporalOperator { def after(exp1: Expression, exp2: Literal): After = new FastAfterLiteral(exp1, exp2) def after(exp1: Literal, exp2: Expression): After = new FastAfterExpression(exp1, exp2) def before(exp1: Expression, exp2: Literal): Before = new FastBeforeLiteral(exp1, exp2) def before(exp1: Literal, exp2: Expression): Before = new FastBeforeExpression(exp1, exp2) def during(exp1: Expression, exp2: Literal): During = new FastDuring(exp1, exp2) /** * After filter comparing an expression (e.g. property or function) to a literal * * @param exp1 expression * @param exp2 literal */ private final class FastAfterLiteral(exp1: Expression, exp2: Literal) extends FastTemporalOperator(exp1, exp2, After.NAME) with After { private val lit = FastConverter.convert(exp2.evaluate(null), classOf[Date]) override def evaluate(obj: AnyRef): Boolean = { val date = exp1.evaluate(obj).asInstanceOf[Date] date != null && date.after(lit) } override def accept(visitor: FilterVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData) } /** * After filter comparing a literal to an expression (e.g. property or function) * * @param exp1 literal * @param exp2 expression */ private final class FastAfterExpression(exp1: Literal, exp2: Expression) extends FastTemporalOperator(exp1, exp2, After.NAME) with After { private val lit = FastConverter.convert(exp1.evaluate(null), classOf[Date]) override def evaluate(obj: AnyRef): Boolean = { val date = exp2.evaluate(obj).asInstanceOf[Date] date != null && lit.after(date) } override def accept(visitor: FilterVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData) } /** * Before filter comparing an expression (e.g. property or function) to a literal * * @param exp1 expression * @param exp2 literal */ private final class FastBeforeLiteral(exp1: Expression, exp2: Literal) extends FastTemporalOperator(exp1, exp2, Before.NAME) with Before { private val lit = FastConverter.convert(exp2.evaluate(null), classOf[Date]) override def evaluate(obj: AnyRef): Boolean = { val date = exp1.evaluate(obj).asInstanceOf[Date] date != null && date.before(lit) } override def accept(visitor: FilterVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData) } /** * Before filter comparing a literal to an expression (e.g. property or function) * * @param exp1 literal * @param exp2 expression */ private final class FastBeforeExpression(exp1: Literal, exp2: Expression) extends FastTemporalOperator(exp1, exp2, Before.NAME) with Before { private val lit = FastConverter.convert(exp1.evaluate(null), classOf[Date]) override def evaluate(obj: AnyRef): Boolean = { val date = exp1.evaluate(obj).asInstanceOf[Date] date != null && lit.before(date) } override def accept(visitor: FilterVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData) } /** * During filter comparing an expression (e.g. property or function) to a literal * * @param exp1 expression * @param exp2 literal */ private final class FastDuring(exp1: Expression, exp2: Literal) extends FastTemporalOperator(exp1, exp2, During.NAME) with During { private val lit = FastConverter.convert(exp2.evaluate(null), classOf[Period]) private val beg = lit.getBeginning.getPosition.getDate private val end = lit.getEnding.getPosition.getDate override def evaluate(obj: AnyRef): Boolean = { val date = exp1.evaluate(obj).asInstanceOf[Date] date != null && date.after(beg) && date.before(end) } override def accept(visitor: FilterVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData) } } sealed private abstract class FastTemporalOperator(exp1: Expression, exp2: Expression, private val op: String) extends BinaryTemporalOperator { override def getExpression1: Expression = exp1 override def getExpression2: Expression = exp2 override def getMatchAction: MatchAction = MatchAction.ANY override def toString: String = s"[ $exp1 $op $exp2 ]" def canEqual(other: Any): Boolean = other.isInstanceOf[FastTemporalOperator] override def equals(other: Any): Boolean = other match { case that: FastTemporalOperator => (that canEqual this) && exp1 == that.getExpression1 && exp2 == that.getExpression2 && op == that.op case _ => false } override def hashCode(): Int = Seq(exp1, exp2, op).map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b) }
aheyne/geomesa
geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/expression/FastTemporalOperator.scala
Scala
apache-2.0
5,627
package isabelle.eclipse.ui.views.outline import org.eclipse.jface.resource.ResourceManager import org.eclipse.jface.viewers.LabelProvider import org.eclipse.swt.graphics.Image import isabelle.Library import isabelle.Thy_Syntax.Structure import isabelle.eclipse.ui.internal.IsabelleImages /** * Label provider for Thy_Syntax.Structure entries. * * @author Andrius Velykis */ /* Adapted from Isabelle_Sidekick_Structure */ class TheoryStructureLabelProvider(resourceManager: ResourceManager) extends LabelProvider { private val cmdImages = Map( "qed" -> IsabelleImages.SUCCESS, "done" -> IsabelleImages.SUCCESS, "by" -> IsabelleImages.SUCCESS, "apply" -> IsabelleImages.COMMAND_APPLY, "proof" -> IsabelleImages.COMMAND_PROOF ).withDefaultValue(IsabelleImages.ISABELLE_ITEM) override def getImage(obj: AnyRef): Image = { val imgDesc = obj match { case TheoryStructureEntry(_, Structure.Block(nameText, _), _, _) => { val name = nameText.trim if (name.startsWith("lemma") || name.startsWith("theorem")) { IsabelleImages.LEMMA } else if (name.startsWith("text")) { IsabelleImages.TEXT } else if (name.startsWith("theory")) { IsabelleImages.ISABELLE_FILE } else { IsabelleImages.HEADING } } case TheoryStructureEntry(_, Structure.Atom(command), _, _) => cmdImages(command.name) case _ => IsabelleImages.ISABELLE_ITEM } resourceManager.createImageWithDefault(imgDesc) } override def getText(obj: AnyRef): String = obj match { case entry: TheoryStructureEntry => entry.entry match { case Structure.Block(nameText, body) => { val name = flattenTrim(nameText) if (name.startsWith("text")) { // get just the text textContents(name) } else { name } } case Structure.Atom(command) => command.name } case _ => super.getText(obj) } /** * Trims the text of whitespace and compacts/replaces all consecutive whitespace (including linebreaks) * with single space - flattens the text. */ private def flattenTrim(text: String) = text.trim.replaceAll("\\\\s+", " ") /** * Extracts contents of `text {* *}` command */ private def textContents(text: String): String = { // get just the text val offset = (text.indexOf("{*") + 2) max 4 val endOffset = { val end = text.lastIndexOf("*}") if (end < 0) text.length else end } val textContents = text.substring(offset, endOffset) val trimmed = textContents.trim // replace text that is too long with ellipsis val limit = 50 if (trimmed.length > limit) { trimmed.substring(0, limit - 3) + "..." } else { trimmed } } }
andriusvelykis/isabelle-eclipse
isabelle.eclipse.ui/src/isabelle/eclipse/ui/views/outline/TheoryStructureLabelProvider.scala
Scala
epl-1.0
2,830