code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.hibernate.cache.rediscala.tests
import java.util.Properties
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
import org.hibernate.SessionFactory
import org.hibernate.cache.rediscala.SingletonRedisRegionFactory
import org.hibernate.cache.rediscala.tests.domain.Account
import org.hibernate.cfg.AvailableSettings
import org.hibernate.engine.transaction.internal.jdbc.JdbcTransactionFactory
import org.springframework.context.annotation.{Bean, Configuration}
import org.springframework.dao.annotation.PersistenceExceptionTranslationPostProcessor
import org.springframework.orm.hibernate4.{HibernateExceptionTranslator, HibernateTransactionManager, LocalSessionFactoryBean}
import org.springframework.transaction.annotation.EnableTransactionManagement
/**
* org.hibernate.cache.rediscala.tests.HibernateRedisConfiguration
*
* @author 배성혁 [email protected]
* @since 2014. 2. 21. 오후 4:50
*/
@Configuration
@EnableTransactionManagement
class HibernateRedisConfiguration {
def databaseName: String = "hibernate"
def getMappedPackageNames: Array[String] = Array(classOf[Account].getPackage.getName)
def hibernateProperties: Properties = {
val props = new Properties()
props.setProperty(AvailableSettings.HBM2DDL_AUTO, "create")
props.setProperty(AvailableSettings.FORMAT_SQL, "true")
props.setProperty(AvailableSettings.SHOW_SQL, "false")
props.setProperty(AvailableSettings.POOL_SIZE, "100")
// Secondary Cache
props.setProperty(AvailableSettings.USE_SECOND_LEVEL_CACHE, "true")
props.setProperty(AvailableSettings.USE_QUERY_CACHE, "true")
props.setProperty(AvailableSettings.CACHE_REGION_FACTORY, classOf[SingletonRedisRegionFactory].getName)
props.setProperty(AvailableSettings.CACHE_REGION_PREFIX, "hibernate")
props.setProperty(AvailableSettings.GENERATE_STATISTICS, "true")
props.setProperty(AvailableSettings.USE_STRUCTURED_CACHE, "true")
props.setProperty(AvailableSettings.TRANSACTION_STRATEGY, classOf[JdbcTransactionFactory].getName)
props.setProperty(AvailableSettings.CACHE_PROVIDER_CONFIG, "hibernate-redis.properties")
props
}
@Bean
def dataSource = {
val config = new HikariConfig()
config.setDataSourceClassName("org.h2.jdbcx.JdbcDataSource")
config.addDataSourceProperty("url", "jdbc:h2:mem:test;MVCC=true")
config.addDataSourceProperty("user", "sa")
config.addDataSourceProperty("password", "")
config.setInitializationFailFast(true)
config.setMaximumPoolSize(100)
config.setMinimumIdle(2)
new HikariDataSource(config)
}
@Bean
def sessionFactory: SessionFactory = {
val factoryBean = new LocalSessionFactoryBean()
factoryBean.setPackagesToScan(getMappedPackageNames: _*)
factoryBean.setDataSource(dataSource)
factoryBean.setHibernateProperties(hibernateProperties)
factoryBean.afterPropertiesSet()
factoryBean.getObject
}
@Bean
def transactionManager =
new HibernateTransactionManager(sessionFactory)
@Bean
def hibernateExceptionTranslator =
new HibernateExceptionTranslator()
@Bean
def exceptionTranslation =
new PersistenceExceptionTranslationPostProcessor()
}
| debop/hibernate-rediscala | src/test/scala/org/hibernate/cache/rediscala/tests/HibernateRedisConfiguration.scala | Scala | apache-2.0 | 3,201 |
/**
* Created by Brent on 11/23/2016.
*/
import breeze.numerics.{atan, pow}
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.io.Source
import scala.util.Try
object DataPipeline {
def indexColumnsAux(s: Array[String], i: Int): HashMap[Int, String] = {
if (i < s.length) {
(new HashMap[Int, String] += (i -> s(i))) ++: indexColumnsAux(s, i + 1)
}//if
else new HashMap[Int, String]()
}
def indexColumns(s: String): HashMap[Int, String] = {
val split = s.split(",")
indexColumnsAux(split, 0)
}
def hashInstanceAux(s: Array[String], i: Int, indexes: HashMap[Int, String]): HashMap[String, Double] = {
if (i < s.length && Try(s(i).toDouble).isSuccess) {
(new HashMap[String, Double]() += (indexes(i) -> s(i).toDouble)) ++: hashInstanceAux(s, i+1, indexes)
}
else if (i < s.length && s(i).equals("")) {
(new HashMap[String, Double]() += (indexes(i) -> 0.0)) ++: hashInstanceAux(s, i+1, indexes)
}
else if (i < s.length) hashInstanceAux(s, i+1, indexes)
else new HashMap[String, Double]()
}
def hashInstance(s: String, indexes: HashMap[Int, String]): HashMap[String, Double] = {
val split = s.split(",")
hashInstanceAux(split, 0, indexes)
}
def calculateWetBulb(temp: Double, humidity: Double): Double = {
temp * atan(0.151977 * pow(humidity + 8.313659, 1/2)) + atan(temp + humidity) - atan(humidity - 1.676331) + 0.00391838 * pow(humidity, 3/2) * atan(0.023101 * humidity) - 4.686035
}
def addGroundTruth(inst: HashMap[String, Double]): HashMap[String, Double] = {
if (inst.contains("temp_air_2m_C") && inst.contains("rh_2m_pct"))
inst += ("wet-bulb" -> calculateWetBulb(inst("temp_air_2m_C"), inst("rh_2m_pct")))
else new mutable.HashMap[String, Double]()
}
def readFile(filePath: String): List[HashMap[String, Double]] = {
val src = Source.fromFile(filePath)
val fileList = src.getLines().toList
val indexes = indexColumns(fileList.head)
val instances = fileList.drop(1)
instances.map( s => addGroundTruth(hashInstance(s, indexes))).filter( (inst) => inst.nonEmpty && inst.contains("wet-bulb") )
}
}
| snakes-in-the-box/galapagos | src/main/scala/DataPipeline.scala | Scala | mit | 2,182 |
package models.actors.dataimport
import javax.inject.Inject
import akka.actor.{Actor, ActorRef}
import models.db.DataImportRepository
import models.entity.Task
import models.service.ProfileFileParser
import play.api.Logger
object DataImportActor {
trait Factory {
def apply(): Actor
}
case class CheckFileAlreadyImported(ref: ActorRef, task: Task, path: String)
case class SaveNewImport(path: String, userId: Int)
}
class DataImportActor @Inject()(val dataImportRepository: DataImportRepository,
val profileFileParser: ProfileFileParser) extends Actor {
import DataImportActor._
import play.api.libs.concurrent.Execution.Implicits._
def receive: Receive = {
case CheckFileAlreadyImported(ref, task, path) => {
val dataImport = profileFileParser.parseFileData(path, task.userId)
dataImportRepository.getByYearMonth(dataImport) map {
case Some(dataImport) => {
Logger.debug("Found: "+dataImport)
ref ! ManagerActor.FileAlreadyImported(task, dataImport)
}
case None => {
Logger.debug("File not imported yet: "+path)
ref ! ManagerActor.FileNotImported(task, path)
}
}
context.stop(self)
}
case SaveNewImport(path, userId) => {
val dataImport = profileFileParser.parseFileData(path, userId)
dataImportRepository.insert(dataImport)
context.stop(self)
}
}
}
| LeonardoZ/SAEB | app/models/actors/dataimport/DataImportActor.scala | Scala | mit | 1,499 |
/*
rule = Http4sFs2Linters
*/
package fix
import cats.effect._
import fs2._
object Fs2SyncCompilerTest {
def usesSyncInnocently[F[_]](implicit F: Sync[F]) = F.delay(println("hi"))
def usesSyncCompiler[F[_]](implicit F: Sync[F]) = Stream(1, 2, 3).covary[F].compile.drain // assert: Http4sFs2Linters.noFs2SyncCompiler
def usesConcurrentCompiler[F[_]](implicit F: Concurrent[F]) = Stream(1, 2, 3).covary[F].compile.drain
}
| rossabaker/http4s | scalafix-internal/input/src/main/scala/fix/Fs2SyncCompilerTest.scala | Scala | apache-2.0 | 429 |
package lila.user
import org.joda.time.DateTime
import lila.db.BSON.BSONJodaDateTimeHandler
import reactivemongo.bson._
final class TrophyApi(coll: lila.db.Types.Coll) {
private implicit val trophyKindBSONHandler = new BSONHandler[BSONString, Trophy.Kind] {
def read(bsonString: BSONString): Trophy.Kind =
Trophy.Kind byKey bsonString.value err s"No such trophy kind: ${bsonString.value}"
def write(x: Trophy.Kind) = BSONString(x.key)
}
private implicit val trophyBSONHandler = Macros.handler[Trophy]
def award(userId: String, kind: Trophy.Kind): Funit =
coll insert Trophy.make(userId, kind) void
def award(userId: String, kind: Trophy.Kind.type => Trophy.Kind): Funit =
award(userId, kind(Trophy.Kind))
def awardMarathonWinner(userId: String): Funit = award(userId, Trophy.Kind.MarathonWinner)
def findByUser(user: User, max: Int = 12): Fu[List[Trophy]] =
coll.find(BSONDocument("user" -> user.id)).cursor[Trophy].collect[List](max)
}
| Happy0/lila | modules/user/src/main/TrophyApi.scala | Scala | mit | 985 |
package fi.pyppe.ircbot.slave
import org.joda.time.{Duration, DateTime}
import fi.pyppe.ircbot.event.Message
trait TimedChannelMaybeSayer extends MaybeSayer {
protected def onReact(m: Message): Option[String]
protected def silentPeriod: Duration
private final var lastReactions = Map[String, DateTime]()
final override def react(m: Message): Option[String] =
if (lastReaction(m).plus(silentPeriod).isBeforeNow)
onReact(m) match {
case response @ Some(_) =>
lastReactions += m.channel -> DateTime.now
response
case None =>
None
}
else
None
private def lastReaction(m: Message): DateTime =
lastReactions.getOrElse(m.channel, DateTime.now.minusMonths(1))
}
| Pyppe/akka-ircbot | slave/src/main/scala/fi/pyppe/ircbot/slave/TimedChannelMaybeSayer.scala | Scala | mit | 750 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.scheduler
import java.io.NotSerializableException
import java.util.Properties
import java.util.concurrent.{LinkedBlockingQueue, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import spark._
import spark.executor.TaskMetrics
import spark.partial.{ApproximateActionListener, ApproximateEvaluator, PartialResult}
import spark.scheduler.cluster.TaskInfo
import spark.storage.{BlockManager, BlockManagerMaster}
import spark.util.{MetadataCleaner, TimeStampedHashMap}
/**
* A Scheduler subclass that implements stage-oriented scheduling. It computes a DAG of stages for
* each job, keeps track of which RDDs and stage outputs are materialized, and computes a minimal
* schedule to run the job. Subclasses only need to implement the code to send a task to the cluster
* and to report fetch failures (the submitTasks method, and code to add CompletionEvents).
*/
private[spark]
class DAGScheduler(
taskSched: TaskScheduler,
mapOutputTracker: MapOutputTracker,
blockManagerMaster: BlockManagerMaster,
env: SparkEnv)
extends TaskSchedulerListener with Logging {
def this(taskSched: TaskScheduler) {
this(taskSched, SparkEnv.get.mapOutputTracker, SparkEnv.get.blockManager.master, SparkEnv.get)
}
taskSched.setListener(this)
// Called by TaskScheduler to report task's starting.
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {
eventQueue.put(BeginEvent(task, taskInfo))
}
// Called by TaskScheduler to report task completions or failures.
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Map[Long, Any],
taskInfo: TaskInfo,
taskMetrics: TaskMetrics) {
eventQueue.put(CompletionEvent(task, reason, result, accumUpdates, taskInfo, taskMetrics))
}
// Called by TaskScheduler when an executor fails.
override def executorLost(execId: String) {
eventQueue.put(ExecutorLost(execId))
}
// Called by TaskScheduler when a host is added
override def executorGained(execId: String, hostPort: String) {
eventQueue.put(ExecutorGained(execId, hostPort))
}
// Called by TaskScheduler to cancel an entire TaskSet due to repeated failures.
override def taskSetFailed(taskSet: TaskSet, reason: String) {
eventQueue.put(TaskSetFailed(taskSet, reason))
}
// The time, in millis, to wait for fetch failure events to stop coming in after one is detected;
// this is a simplistic way to avoid resubmitting tasks in the non-fetchable map stage one by one
// as more failure events come in
val RESUBMIT_TIMEOUT = 50L
// The time, in millis, to wake up between polls of the completion queue in order to potentially
// resubmit failed stages
val POLL_TIMEOUT = 10L
private val eventQueue = new LinkedBlockingQueue[DAGSchedulerEvent]
val nextRunId = new AtomicInteger(0)
val nextStageId = new AtomicInteger(0)
val idToStage = new TimeStampedHashMap[Int, Stage]
val shuffleToMapStage = new TimeStampedHashMap[Int, Stage]
private[spark] val stageToInfos = new TimeStampedHashMap[Stage, StageInfo]
private[spark] val sparkListeners = ArrayBuffer[SparkListener]()
var cacheLocs = new HashMap[Int, Array[List[String]]]
// For tracking failed nodes, we use the MapOutputTracker's generation number, which is
// sent with every task. When we detect a node failing, we note the current generation number
// and failed executor, increment it for new tasks, and use this to ignore stray ShuffleMapTask
// results.
// TODO: Garbage collect information about failure generations when we know there are no more
// stray messages to detect.
val failedGeneration = new HashMap[String, Long]
val idToActiveJob = new HashMap[Int, ActiveJob]
val waiting = new HashSet[Stage] // Stages we need to run whose parents aren't done
val running = new HashSet[Stage] // Stages we are running right now
val failed = new HashSet[Stage] // Stages that must be resubmitted due to fetch failures
val pendingTasks = new TimeStampedHashMap[Stage, HashSet[Task[_]]] // Missing tasks from each stage
var lastFetchFailureTime: Long = 0 // Used to wait a bit to avoid repeated resubmits
val activeJobs = new HashSet[ActiveJob]
val resultStageToJob = new HashMap[Stage, ActiveJob]
val metadataCleaner = new MetadataCleaner("DAGScheduler", this.cleanup)
// Start a thread to run the DAGScheduler event loop
def start() {
new Thread("DAGScheduler") {
setDaemon(true)
override def run() {
DAGScheduler.this.run()
}
}.start()
}
private def getCacheLocs(rdd: RDD[_]): Array[List[String]] = {
if (!cacheLocs.contains(rdd.id)) {
val blockIds = rdd.partitions.indices.map(index=> "rdd_%d_%d".format(rdd.id, index)).toArray
val locs = BlockManager.blockIdsToExecutorLocations(blockIds, env, blockManagerMaster)
cacheLocs(rdd.id) = blockIds.map(locs.getOrElse(_, Nil))
}
cacheLocs(rdd.id)
}
private def clearCacheLocs() {
cacheLocs.clear()
}
/**
* Get or create a shuffle map stage for the given shuffle dependency's map side.
* The priority value passed in will be used if the stage doesn't already exist with
* a lower priority (we assume that priorities always increase across jobs for now).
*/
private def getShuffleMapStage(shuffleDep: ShuffleDependency[_,_], priority: Int): Stage = {
shuffleToMapStage.get(shuffleDep.shuffleId) match {
case Some(stage) => stage
case None =>
val stage = newStage(shuffleDep.rdd, Some(shuffleDep), priority)
shuffleToMapStage(shuffleDep.shuffleId) = stage
stage
}
}
/**
* Create a Stage for the given RDD, either as a shuffle map stage (for a ShuffleDependency) or
* as a result stage for the final RDD used directly in an action. The stage will also be given
* the provided priority.
*/
private def newStage(
rdd: RDD[_],
shuffleDep: Option[ShuffleDependency[_,_]],
priority: Int,
callSite: Option[String] = None)
: Stage =
{
if (shuffleDep != None) {
// Kind of ugly: need to register RDDs with the cache and map output tracker here
// since we can't do it in the RDD constructor because # of partitions is unknown
logInfo("Registering RDD " + rdd.id + " (" + rdd.origin + ")")
mapOutputTracker.registerShuffle(shuffleDep.get.shuffleId, rdd.partitions.size)
}
val id = nextStageId.getAndIncrement()
val stage = new Stage(id, rdd, shuffleDep, getParentStages(rdd, priority), priority, callSite)
idToStage(id) = stage
stageToInfos(stage) = StageInfo(stage)
stage
}
/**
* Get or create the list of parent stages for a given RDD. The stages will be assigned the
* provided priority if they haven't already been created with a lower priority.
*/
private def getParentStages(rdd: RDD[_], priority: Int): List[Stage] = {
val parents = new HashSet[Stage]
val visited = new HashSet[RDD[_]]
def visit(r: RDD[_]) {
if (!visited(r)) {
visited += r
// Kind of ugly: need to register RDDs with the cache here since
// we can't do it in its constructor because # of partitions is unknown
for (dep <- r.dependencies) {
dep match {
case shufDep: ShuffleDependency[_,_] =>
parents += getShuffleMapStage(shufDep, priority)
case _ =>
visit(dep.rdd)
}
}
}
}
visit(rdd)
parents.toList
}
private def getMissingParentStages(stage: Stage): List[Stage] = {
val missing = new HashSet[Stage]
val visited = new HashSet[RDD[_]]
def visit(rdd: RDD[_]) {
if (!visited(rdd)) {
visited += rdd
if (getCacheLocs(rdd).contains(Nil)) {
for (dep <- rdd.dependencies) {
dep match {
case shufDep: ShuffleDependency[_,_] =>
val mapStage = getShuffleMapStage(shufDep, stage.priority)
if (!mapStage.isAvailable) {
missing += mapStage
}
case narrowDep: NarrowDependency[_] =>
visit(narrowDep.rdd)
}
}
}
}
}
visit(stage.rdd)
missing.toList
}
/**
* Returns (and does not submit) a JobSubmitted event suitable to run a given job, and a
* JobWaiter whose getResult() method will return the result of the job when it is complete.
*
* The job is assumed to have at least one partition; zero partition jobs should be handled
* without a JobSubmitted event.
*/
private[scheduler] def prepareJob[T, U: ClassManifest](
finalRdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
callSite: String,
allowLocal: Boolean,
resultHandler: (Int, U) => Unit,
properties: Properties = null)
: (JobSubmitted, JobWaiter[U]) =
{
assert(partitions.size > 0)
val waiter = new JobWaiter(partitions.size, resultHandler)
val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _]
val toSubmit = JobSubmitted(finalRdd, func2, partitions.toArray, allowLocal, callSite, waiter,
properties)
return (toSubmit, waiter)
}
def runJob[T, U: ClassManifest](
finalRdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
callSite: String,
allowLocal: Boolean,
resultHandler: (Int, U) => Unit,
properties: Properties = null)
{
if (partitions.size == 0) {
return
}
// Check to make sure we are not launching a task on a partition that does not exist.
val maxPartitions = finalRdd.partitions.length
partitions.find(p => p >= maxPartitions).foreach { p =>
throw new IllegalArgumentException(
"Attempting to access a non-existent partition: " + p + ". " +
"Total number of partitions: " + maxPartitions)
}
val (toSubmit: JobSubmitted, waiter: JobWaiter[_]) = prepareJob(
finalRdd, func, partitions, callSite, allowLocal, resultHandler, properties)
eventQueue.put(toSubmit)
waiter.awaitResult() match {
case JobSucceeded => {}
case JobFailed(exception: Exception, _) =>
logInfo("Failed to run " + callSite)
throw exception
}
}
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
callSite: String,
timeout: Long,
properties: Properties = null)
: PartialResult[R] =
{
val listener = new ApproximateActionListener(rdd, func, evaluator, timeout)
val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _]
val partitions = (0 until rdd.partitions.size).toArray
eventQueue.put(JobSubmitted(rdd, func2, partitions, false, callSite, listener, properties))
return listener.awaitResult() // Will throw an exception if the job fails
}
/**
* Process one event retrieved from the event queue.
* Returns true if we should stop the event loop.
*/
private[scheduler] def processEvent(event: DAGSchedulerEvent): Boolean = {
event match {
case JobSubmitted(finalRDD, func, partitions, allowLocal, callSite, listener, properties) =>
val runId = nextRunId.getAndIncrement()
val finalStage = newStage(finalRDD, None, runId, Some(callSite))
val job = new ActiveJob(runId, finalStage, func, partitions, callSite, listener, properties)
clearCacheLocs()
logInfo("Got job " + job.runId + " (" + callSite + ") with " + partitions.length +
" output partitions (allowLocal=" + allowLocal + ")")
logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
logInfo("Parents of final stage: " + finalStage.parents)
logInfo("Missing parents: " + getMissingParentStages(finalStage))
if (allowLocal && finalStage.parents.size == 0 && partitions.length == 1) {
// Compute very short actions like first() or take() with no parent stages locally.
runLocally(job)
} else {
sparkListeners.foreach(_.onJobStart(SparkListenerJobStart(job, properties)))
idToActiveJob(runId) = job
activeJobs += job
resultStageToJob(finalStage) = job
submitStage(finalStage)
}
case ExecutorGained(execId, hostPort) =>
handleExecutorGained(execId, hostPort)
case ExecutorLost(execId) =>
handleExecutorLost(execId)
case begin: BeginEvent =>
sparkListeners.foreach(_.onTaskStart(SparkListenerTaskStart(begin.task, begin.taskInfo)))
case completion: CompletionEvent =>
sparkListeners.foreach(_.onTaskEnd(SparkListenerTaskEnd(completion.task,
completion.reason, completion.taskInfo, completion.taskMetrics)))
handleTaskCompletion(completion)
case TaskSetFailed(taskSet, reason) =>
abortStage(idToStage(taskSet.stageId), reason)
case StopDAGScheduler =>
// Cancel any active jobs
for (job <- activeJobs) {
val error = new SparkException("Job cancelled because SparkContext was shut down")
job.listener.jobFailed(error)
sparkListeners.foreach(_.onJobEnd(SparkListenerJobEnd(job, JobFailed(error, None))))
}
return true
}
return false
}
/**
* Resubmit any failed stages. Ordinarily called after a small amount of time has passed since
* the last fetch failure.
*/
private[scheduler] def resubmitFailedStages() {
logInfo("Resubmitting failed stages")
clearCacheLocs()
val failed2 = failed.toArray
failed.clear()
for (stage <- failed2.sortBy(_.priority)) {
submitStage(stage)
}
}
/**
* Check for waiting or failed stages which are now eligible for resubmission.
* Ordinarily run on every iteration of the event loop.
*/
private[scheduler] def submitWaitingStages() {
// TODO: We might want to run this less often, when we are sure that something has become
// runnable that wasn't before.
logTrace("Checking for newly runnable parent stages")
logTrace("running: " + running)
logTrace("waiting: " + waiting)
logTrace("failed: " + failed)
val waiting2 = waiting.toArray
waiting.clear()
for (stage <- waiting2.sortBy(_.priority)) {
submitStage(stage)
}
}
/**
* The main event loop of the DAG scheduler, which waits for new-job / task-finished / failure
* events and responds by launching tasks. This runs in a dedicated thread and receives events
* via the eventQueue.
*/
private def run() {
SparkEnv.set(env)
while (true) {
val event = eventQueue.poll(POLL_TIMEOUT, TimeUnit.MILLISECONDS)
if (event != null) {
logDebug("Got event of type " + event.getClass.getName)
}
if (event != null) {
if (processEvent(event)) {
return
}
}
val time = System.currentTimeMillis() // TODO: use a pluggable clock for testability
// Periodically resubmit failed stages if some map output fetches have failed and we have
// waited at least RESUBMIT_TIMEOUT. We wait for this short time because when a node fails,
// tasks on many other nodes are bound to get a fetch failure, and they won't all get it at
// the same time, so we want to make sure we've identified all the reduce tasks that depend
// on the failed node.
if (failed.size > 0 && time > lastFetchFailureTime + RESUBMIT_TIMEOUT) {
resubmitFailedStages()
} else {
submitWaitingStages()
}
}
}
/**
* Run a job on an RDD locally, assuming it has only a single partition and no dependencies.
* We run the operation in a separate thread just in case it takes a bunch of time, so that we
* don't block the DAGScheduler event loop or other concurrent jobs.
*/
protected def runLocally(job: ActiveJob) {
logInfo("Computing the requested partition locally")
new Thread("Local computation of job " + job.runId) {
override def run() {
runLocallyWithinThread(job)
}
}.start()
}
// Broken out for easier testing in DAGSchedulerSuite.
protected def runLocallyWithinThread(job: ActiveJob) {
try {
SparkEnv.set(env)
val rdd = job.finalStage.rdd
val split = rdd.partitions(job.partitions(0))
val taskContext = new TaskContext(job.finalStage.id, job.partitions(0), 0)
try {
val result = job.func(taskContext, rdd.iterator(split, taskContext))
job.listener.taskSucceeded(0, result)
} finally {
taskContext.executeOnCompleteCallbacks()
}
} catch {
case e: Exception =>
job.listener.jobFailed(e)
}
}
/** Submits stage, but first recursively submits any missing parents. */
private def submitStage(stage: Stage) {
logDebug("submitStage(" + stage + ")")
if (!waiting(stage) && !running(stage) && !failed(stage)) {
val missing = getMissingParentStages(stage).sortBy(_.id)
logDebug("missing: " + missing)
if (missing == Nil) {
logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents")
submitMissingTasks(stage)
running += stage
} else {
for (parent <- missing) {
submitStage(parent)
}
waiting += stage
}
}
}
/** Called when stage's parents are available and we can now do its task. */
private def submitMissingTasks(stage: Stage) {
logDebug("submitMissingTasks(" + stage + ")")
// Get our pending tasks and remember them in our pendingTasks entry
val myPending = pendingTasks.getOrElseUpdate(stage, new HashSet)
myPending.clear()
var tasks = ArrayBuffer[Task[_]]()
if (stage.isShuffleMap) {
for (p <- 0 until stage.numPartitions if stage.outputLocs(p) == Nil) {
val locs = getPreferredLocs(stage.rdd, p)
tasks += new ShuffleMapTask(stage.id, stage.rdd, stage.shuffleDep.get, p, locs)
}
} else {
// This is a final stage; figure out its job's missing partitions
val job = resultStageToJob(stage)
for (id <- 0 until job.numPartitions if (!job.finished(id))) {
val partition = job.partitions(id)
val locs = getPreferredLocs(stage.rdd, partition)
tasks += new ResultTask(stage.id, stage.rdd, job.func, partition, locs, id)
}
}
// must be run listener before possible NotSerializableException
// should be "StageSubmitted" first and then "JobEnded"
val properties = idToActiveJob(stage.priority).properties
sparkListeners.foreach(_.onStageSubmitted(
SparkListenerStageSubmitted(stage, tasks.size, properties)))
if (tasks.size > 0) {
// Preemptively serialize a task to make sure it can be serialized. We are catching this
// exception here because it would be fairly hard to catch the non-serializable exception
// down the road, where we have several different implementations for local scheduler and
// cluster schedulers.
try {
SparkEnv.get.closureSerializer.newInstance().serialize(tasks.head)
} catch {
case e: NotSerializableException =>
abortStage(stage, e.toString)
running -= stage
return
}
logInfo("Submitting " + tasks.size + " missing tasks from " + stage + " (" + stage.rdd + ")")
myPending ++= tasks
logDebug("New pending tasks: " + myPending)
taskSched.submitTasks(
new TaskSet(tasks.toArray, stage.id, stage.newAttemptId(), stage.priority, properties))
if (!stage.submissionTime.isDefined) {
stage.submissionTime = Some(System.currentTimeMillis())
}
} else {
logDebug("Stage " + stage + " is actually done; %b %d %d".format(
stage.isAvailable, stage.numAvailableOutputs, stage.numPartitions))
running -= stage
}
}
/**
* Responds to a task finishing. This is called inside the event loop so it assumes that it can
* modify the scheduler's internal state. Use taskEnded() to post a task end event from outside.
*/
private def handleTaskCompletion(event: CompletionEvent) {
val task = event.task
val stage = idToStage(task.stageId)
def markStageAsFinished(stage: Stage) = {
val serviceTime = stage.submissionTime match {
case Some(t) => "%.03f".format((System.currentTimeMillis() - t) / 1000.0)
case _ => "Unkown"
}
logInfo("%s (%s) finished in %s s".format(stage, stage.name, serviceTime))
stage.completionTime = Some(System.currentTimeMillis)
val stageComp = StageCompleted(stageToInfos(stage))
sparkListeners.foreach{_.onStageCompleted(stageComp)}
running -= stage
}
event.reason match {
case Success =>
logInfo("Completed " + task)
if (event.accumUpdates != null) {
Accumulators.add(event.accumUpdates) // TODO: do this only if task wasn't resubmitted
}
pendingTasks(stage) -= task
stageToInfos(stage).taskInfos += event.taskInfo -> event.taskMetrics
task match {
case rt: ResultTask[_, _] =>
resultStageToJob.get(stage) match {
case Some(job) =>
if (!job.finished(rt.outputId)) {
job.finished(rt.outputId) = true
job.numFinished += 1
// If the whole job has finished, remove it
if (job.numFinished == job.numPartitions) {
idToActiveJob -= stage.priority
activeJobs -= job
resultStageToJob -= stage
markStageAsFinished(stage)
sparkListeners.foreach(_.onJobEnd(SparkListenerJobEnd(job, JobSucceeded)))
}
job.listener.taskSucceeded(rt.outputId, event.result)
}
case None =>
logInfo("Ignoring result from " + rt + " because its job has finished")
}
case smt: ShuffleMapTask =>
val status = event.result.asInstanceOf[MapStatus]
val execId = status.location.executorId
logDebug("ShuffleMapTask finished on " + execId)
if (failedGeneration.contains(execId) && smt.generation <= failedGeneration(execId)) {
logInfo("Ignoring possibly bogus ShuffleMapTask completion from " + execId)
} else {
stage.addOutputLoc(smt.partition, status)
}
if (running.contains(stage) && pendingTasks(stage).isEmpty) {
markStageAsFinished(stage)
logInfo("looking for newly runnable stages")
logInfo("running: " + running)
logInfo("waiting: " + waiting)
logInfo("failed: " + failed)
if (stage.shuffleDep != None) {
// We supply true to increment the generation number here in case this is a
// recomputation of the map outputs. In that case, some nodes may have cached
// locations with holes (from when we detected the error) and will need the
// generation incremented to refetch them.
// TODO: Only increment the generation number if this is not the first time
// we registered these map outputs.
mapOutputTracker.registerMapOutputs(
stage.shuffleDep.get.shuffleId,
stage.outputLocs.map(list => if (list.isEmpty) null else list.head).toArray,
true)
}
clearCacheLocs()
if (stage.outputLocs.count(_ == Nil) != 0) {
// Some tasks had failed; let's resubmit this stage
// TODO: Lower-level scheduler should also deal with this
logInfo("Resubmitting " + stage + " (" + stage.name +
") because some of its tasks had failed: " +
stage.outputLocs.zipWithIndex.filter(_._1 == Nil).map(_._2).mkString(", "))
submitStage(stage)
} else {
val newlyRunnable = new ArrayBuffer[Stage]
for (stage <- waiting) {
logInfo("Missing parents for " + stage + ": " + getMissingParentStages(stage))
}
for (stage <- waiting if getMissingParentStages(stage) == Nil) {
newlyRunnable += stage
}
waiting --= newlyRunnable
running ++= newlyRunnable
for (stage <- newlyRunnable.sortBy(_.id)) {
logInfo("Submitting " + stage + " (" + stage.rdd + "), which is now runnable")
submitMissingTasks(stage)
}
}
}
}
case Resubmitted =>
logInfo("Resubmitted " + task + ", so marking it as still running")
pendingTasks(stage) += task
case FetchFailed(bmAddress, shuffleId, mapId, reduceId) =>
// Mark the stage that the reducer was in as unrunnable
val failedStage = idToStage(task.stageId)
running -= failedStage
failed += failedStage
// TODO: Cancel running tasks in the stage
logInfo("Marking " + failedStage + " (" + failedStage.name +
") for resubmision due to a fetch failure")
// Mark the map whose fetch failed as broken in the map stage
val mapStage = shuffleToMapStage(shuffleId)
if (mapId != -1) {
mapStage.removeOutputLoc(mapId, bmAddress)
mapOutputTracker.unregisterMapOutput(shuffleId, mapId, bmAddress)
}
logInfo("The failed fetch was from " + mapStage + " (" + mapStage.name +
"); marking it for resubmission")
failed += mapStage
// Remember that a fetch failed now; this is used to resubmit the broken
// stages later, after a small wait (to give other tasks the chance to fail)
lastFetchFailureTime = System.currentTimeMillis() // TODO: Use pluggable clock
// TODO: mark the executor as failed only if there were lots of fetch failures on it
if (bmAddress != null) {
handleExecutorLost(bmAddress.executorId, Some(task.generation))
}
case ExceptionFailure(className, description, stackTrace, metrics) =>
// Do nothing here, left up to the TaskScheduler to decide how to handle user failures
case other =>
// Unrecognized failure - abort all jobs depending on this stage
abortStage(idToStage(task.stageId), task + " failed: " + other)
}
}
/**
* Responds to an executor being lost. This is called inside the event loop, so it assumes it can
* modify the scheduler's internal state. Use executorLost() to post a loss event from outside.
*
* Optionally the generation during which the failure was caught can be passed to avoid allowing
* stray fetch failures from possibly retriggering the detection of a node as lost.
*/
private def handleExecutorLost(execId: String, maybeGeneration: Option[Long] = None) {
val currentGeneration = maybeGeneration.getOrElse(mapOutputTracker.getGeneration)
if (!failedGeneration.contains(execId) || failedGeneration(execId) < currentGeneration) {
failedGeneration(execId) = currentGeneration
logInfo("Executor lost: %s (generation %d)".format(execId, currentGeneration))
blockManagerMaster.removeExecutor(execId)
// TODO: This will be really slow if we keep accumulating shuffle map stages
for ((shuffleId, stage) <- shuffleToMapStage) {
stage.removeOutputsOnExecutor(execId)
val locs = stage.outputLocs.map(list => if (list.isEmpty) null else list.head).toArray
mapOutputTracker.registerMapOutputs(shuffleId, locs, true)
}
if (shuffleToMapStage.isEmpty) {
mapOutputTracker.incrementGeneration()
}
clearCacheLocs()
} else {
logDebug("Additional executor lost message for " + execId +
"(generation " + currentGeneration + ")")
}
}
private def handleExecutorGained(execId: String, hostPort: String) {
// remove from failedGeneration(execId) ?
if (failedGeneration.contains(execId)) {
logInfo("Host gained which was in lost list earlier: " + hostPort)
failedGeneration -= execId
}
}
/**
* Aborts all jobs depending on a particular Stage. This is called in response to a task set
* being cancelled by the TaskScheduler. Use taskSetFailed() to inject this event from outside.
*/
private def abortStage(failedStage: Stage, reason: String) {
val dependentStages = resultStageToJob.keys.filter(x => stageDependsOn(x, failedStage)).toSeq
failedStage.completionTime = Some(System.currentTimeMillis())
for (resultStage <- dependentStages) {
val job = resultStageToJob(resultStage)
val error = new SparkException("Job failed: " + reason)
job.listener.jobFailed(error)
sparkListeners.foreach(_.onJobEnd(SparkListenerJobEnd(job, JobFailed(error, Some(failedStage)))))
idToActiveJob -= resultStage.priority
activeJobs -= job
resultStageToJob -= resultStage
}
if (dependentStages.isEmpty) {
logInfo("Ignoring failure of " + failedStage + " because all jobs depending on it are done")
}
}
/**
* Return true if one of stage's ancestors is target.
*/
private def stageDependsOn(stage: Stage, target: Stage): Boolean = {
if (stage == target) {
return true
}
val visitedRdds = new HashSet[RDD[_]]
val visitedStages = new HashSet[Stage]
def visit(rdd: RDD[_]) {
if (!visitedRdds(rdd)) {
visitedRdds += rdd
for (dep <- rdd.dependencies) {
dep match {
case shufDep: ShuffleDependency[_,_] =>
val mapStage = getShuffleMapStage(shufDep, stage.priority)
if (!mapStage.isAvailable) {
visitedStages += mapStage
visit(mapStage.rdd)
} // Otherwise there's no need to follow the dependency back
case narrowDep: NarrowDependency[_] =>
visit(narrowDep.rdd)
}
}
}
}
visit(stage.rdd)
visitedRdds.contains(target.rdd)
}
private def getPreferredLocs(rdd: RDD[_], partition: Int): List[String] = {
// If the partition is cached, return the cache locations
val cached = getCacheLocs(rdd)(partition)
if (cached != Nil) {
return cached
}
// If the RDD has some placement preferences (as is the case for input RDDs), get those
val rddPrefs = rdd.preferredLocations(rdd.partitions(partition)).toList
if (rddPrefs != Nil) {
return rddPrefs
}
// If the RDD has narrow dependencies, pick the first partition of the first narrow dep
// that has any placement preferences. Ideally we would choose based on transfer sizes,
// but this will do for now.
rdd.dependencies.foreach(_ match {
case n: NarrowDependency[_] =>
for (inPart <- n.getParents(partition)) {
val locs = getPreferredLocs(n.rdd, inPart)
if (locs != Nil)
return locs
}
case _ =>
})
return Nil
}
private def cleanup(cleanupTime: Long) {
var sizeBefore = idToStage.size
idToStage.clearOldValues(cleanupTime)
logInfo("idToStage " + sizeBefore + " --> " + idToStage.size)
sizeBefore = shuffleToMapStage.size
shuffleToMapStage.clearOldValues(cleanupTime)
logInfo("shuffleToMapStage " + sizeBefore + " --> " + shuffleToMapStage.size)
sizeBefore = pendingTasks.size
pendingTasks.clearOldValues(cleanupTime)
logInfo("pendingTasks " + sizeBefore + " --> " + pendingTasks.size)
sizeBefore = stageToInfos.size
stageToInfos.clearOldValues(cleanupTime)
logInfo("stageToInfos " + sizeBefore + " --> " + stageToInfos.size)
}
def stop() {
eventQueue.put(StopDAGScheduler)
metadataCleaner.cancel()
taskSched.stop()
}
}
| bavardage/spark | core/src/main/scala/spark/scheduler/DAGScheduler.scala | Scala | apache-2.0 | 32,950 |
/**
* Copyright 2017 RiskSense, Inc.
* This file is part of ipaddr library.
*
* Ipaddr is free software licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.risksense.ipaddr
import scala.math.Ordering.Implicits._ // scalastyle:ignore underscore.import
/** An IPv4 network or subnet. A combination of an IP address and a network mask.
* Accepts CIDR and several related variants :
*
* (a) Standard CIDR
* <ul><li>x.x.x.x/y -> 192.0.2.0/24 </li></ul>
*
* b) Hybrid CIDR format (netmask address instead of prefix), where 'y' address represent a
* valid netmask
* <ul><li>x.x.x.x/y.y.y.y -> 192.0.2.0/255.255.255.0 </li></ul>
*
* c) ACL hybrid CIDR format (hostmask address instead of prefix like Cisco's ACL bitmasks),
* where 'y' address represent a valid netmask
* <ul><li>x.x.x.x/y.y.y.y -> 192.0.2.0/0.0.0.255 </li></ul>
*
* d) Abbreviated CIDR format
* <ul><li>x -> 192 </li>
* <li>x/y -> 10/8 </li>
* <li>x.x/y -> 192.168/16 </li>
* <li>x.x.x/y -> 192.168.0/24 </li></ul>
* which are equivalent to:
* <ul><li>x.0.0.0/y -> 192.0.0.0/24 </li>
* <li>x.0.0.0/y -> 10.0.0.0/8 </li>
* <li>x.x.0.0/y -> 192.168.0.0/16 </li>
* <li>x.x.x.0/y -> 192.168.0.0/24 </li></ul>
*
* @constructor creates an IpNetwork object
* @param address String representation of a network address
* @param mask Number of network bits
* @param version IP version of this network
*/
class IpNetwork private[ipaddr](
address: String,
val mask: Int,
val version: Int)
extends Ordered[IpNetwork] {
/** The IP address of this IpNetwork object.
*
* This may or may not be the same as the network IP address which varies according to the
* value of the CIDR subnet prefix.
*/
val ip: IpAddress = IpAddress(address)
// scalastyle:ignore equals.hash.code
private val maskedAddr = this.ip.applyMask(this.mask)
/** Number of IP addresses in this Network. */
lazy val size: Long = last - first + 1
/** The network address of this IpNetwork as an IpAddress object. */
lazy val network: IpAddress = ipAddr
/** HashCode of this IpNetwork is calculated over all octets.
*
* Please refer to hashCode in [[IpAddress]] for details.
*/
override def hashCode: Int = ipAddr.hashCode
/** True address for this IpNetwork object which omits any host bits to the right of the CIDR
* subnet prefix.
*/
val ipAddr: IpAddress = this.maskedAddr
/** Number of host bits in this network address */
val hostmaskNum: Long = (1L << (this.ipAddr.width - this.mask)) - 1
/** The host mask of this IpNetwork object */
val hostmask: IpAddress = IpAddress(this.hostmaskNum)
/** Numerical value of the network address */
val netmaskNum: Long = this.ip.maxNumerical ^ this.hostmaskNum
/** The subnet mask of this IpNetwork as an IpAddress object.
* @example If netmask is 24, then IpAddress(255.255.255.0)
*/
val netmask: IpAddress = IpAddress(this.netmaskNum)
/** The broadcast address of this IpNetwork object. */
val broadcast: IpAddress = IpAddress(this.ip.numerical | this.hostmaskNum)
/** The numerical value of first IP address found within this IpNetwork object. */
val first: Long = this.ip.numerical & (this.ip.maxNumerical ^ this.hostmaskNum)
/** The numerical value of last IP address found within this IpNetwork object. */
val last: Long = this.ip.numerical | this.hostmaskNum
/** A key tuple used to uniquely identify this Network.
*
* (numerical IP address, numerical first host, numerical last host)
*/
val key: (Int, Long, Long) = (this.version, this.first, this.last)
/** A tuple used to sort networks.
*
* A tuple (IP version, numerical first host, net bits, host bits) that is used to perform
* sorting.
*
* @return A key tuple used to compare and sort this network.
*/
lazy val sortKey: (Int, Long, Int, Long) = {
val netSizeBits = this.mask - 1
val hostBits = this.ip.numerical - this.first
(this.version, this.first, netSizeBits, hostBits)
}
/** All hosts in this Network. Includes network address and broadcast address as well. */
lazy val allHosts: Stream[IpAddress] = {
if (this.version == 4) {
BaseIp.addressStream(IpAddress(this.first), IpAddress(this.last))
} else {
Stream()
}
}
/** The true CIDR address for this IpNetwork object which omits any host bits to the right of the
* CIDR subnet prefix.
*/
def cidr: IpNetwork = IpNetwork(this.ipAddr, this.mask)
/** Succeeding Network.
*
* @param step The number of IP subnets between this IpNetwork object and the expected subnet.
* Defaults to 1.
* @return The adjacent subnet succeeding this IpNetwork object.
* @throws IpaddrException if the address cannot be calculated.
*/
def next(step: Int = 1): IpNetwork = {
val newValue = this.ipAddr.numerical + (this.size * step)
if (newValue + (this.size - 1) > this.ipAddr.maxNumerical || (newValue < 0)) {
throw new IpaddrException(IpaddrException.invalidAddress(newValue.toString))
} else {
IpNetwork(newValue, this.mask)
}
}
/** Preceding network
*
* @param step The number of IP subnets between this IpNetwork object and the expected subnet.
* Defaults to 1.
* @return The adjacent subnet preceding this IpNetwork object.
* @throws IpaddrException if the address cannot be calculated.
*/
def previous(step: Int = 1): IpNetwork = {
val newValue = this.ipAddr.numerical - (this.size * step)
if ((newValue + (this.size - 1)) > this.ipAddr.maxNumerical || (newValue < 0)) {
throw new IpaddrException(IpaddrException.invalidAddress(newValue.toString))
} else {
IpNetwork(newValue, this.mask)
}
}
/** Find subnet.
*
* Divide this Network's subnet into smaller subnets based on a specified CIDR prefix.
*
* @param prefix A prefix value indicating size of subnets to be returned.
* @param count Number of consecutive networks to be returned. (Optional)
* @return a sequence of IpNetwork objects
*/
def subnet(prefix: Int, count: Int = 0): Seq[IpNetwork] = {
if (prefix < 0 || prefix > this.ipAddr.width || prefix < this.mask) {
Nil
} else {
val maxSubnets = scala.math.pow(2, this.ipAddr.width - this.mask).toInt /
scala.math.pow(2, this.ipAddr.width - prefix).toInt
val countNew = count match {
case 0 => maxSubnets
case _ => count
}
if (countNew < 0 || countNew > maxSubnets) {
Nil
} else {
val temp = IpNetwork(this.first, prefix)
for { i <- 0 until countNew } yield {
val addressNum = temp.ip.numerical + (temp.size * i)
IpNetwork(addressNum, prefix)
}
}
}
}
/** Find supernet.
*
* Returns a sequence of supernets for this [[IpNetwork]] object between the size of the current
* prefix and (if specified) an endpoint prefix.
*
* @param prefix A prefix value for the maximum supernet (optional)
* Default: 0 - returns all possible supernets.
* @return a sequence of IpNetwork objects.
*/
def supernet(prefix: Int = 0): Seq[IpNetwork] = {
if (prefix < 0 || prefix > this.ipAddr.width) {
Nil
} else {
for { p <- prefix until this.mask } yield IpNetwork(this.ipAddr, p)
}
}
/** String representation of this network object.
*
* @return a CIDR format string with address and mask.
*/
override def toString: String = ip.toString + IpNetwork.fwdSlashStr + mask
def compare(that: IpNetwork): Int = {
// Compares the key objects from two IpAddress objects.
if (this.equals(that)) {
0
} else if (this.sortKey < that.sortKey) {
-1
} else {
1
}
}
// $COVERAGE-OFF$
/** Override `equals`.
*
* Checks if two IpNetwork objects are equal. Networks are equal if they have same network
* address and same mask.
*
* @param other IpNetwork object to compare with
* @return True if IpNetwork objects are equal, False otherwise
*/
override def equals(other: Any): Boolean = other match {
case that: IpNetwork => that.canEquals(this) && (this.key == that.key)
case _ => false
}
// $COVERAGE-ON$
/** Checks if that is same instance of this IpNetwork object.
*
* @param other an Object
* @return True if both objects are instances of IpNetwork class, False otherwise.
*/
def canEquals(other: Any): Boolean = other.isInstanceOf[IpNetwork]
/** Checks if the given [[IpRange]] belongs to this [[IpNetwork]] object.
*
* @param range an IpRange object
* @return True if input IpRange belongs to this Network, False otherwise.
*/
def contains(range: IpRange): Boolean = {
if (this.version != range.version) {
false
} else {
val shiftWidth = this.ipAddr.width - this.mask
val thisNet = this.ip.numerical >> shiftWidth // flush the host bits
(this.ipAddr.numerical <= range.start.numerical) &&
(((thisNet + 1) << shiftWidth) > range.end.numerical)
}
}
/** Checks if the given IpNetwork belongs to this [[IpNetwork]] object.
*
* @param net a IpNetwork object
* @return True if input IpNetwork belongs to this Network, False otherwise.
*/
def contains(net: IpNetwork): Boolean = {
if (this.version != net.version) {
false
} else {
val shiftWidth = this.ipAddr.width - this.mask
val thisNet = this.ip.numerical >> shiftWidth // flush the host bits
val thatNet = net.ip.numerical >> shiftWidth
(thatNet == thisNet) && (net.mask >= this.mask)
}
}
/** Checks if the given IP address belongs to this [[IpNetwork]] object.
*
* @param addr an IP address specified as dot-delimited string
* @return True if input address belongs to this Network, False otherwise.
*/
def contains(addr: String): Boolean = contains(IpAddress(addr))
/** Checks if the given [[IpAddress]] belongs to this [[IpNetwork]] object.
*
* @param ipAddress an IpAddress object
* @return True if input IpAddress belongs to this Network, False otherwise.
*/
def contains(ipAddress: IpAddress): Boolean = {
if (this.version != ipAddress.version) {
false
} else {
val shiftWidth = this.ipAddr.width - this.mask
val thisNet = this.ip.numerical >> shiftWidth // flush the host bits
val thatNet = ipAddress.numerical >> shiftWidth
thatNet == thisNet // compare network bits only
}
}
}
/** Implements an IP Network.
*
* For handling single IP addresses, see Ipv4 class.
*/
object IpNetwork {
private val fwdSlashStr = "/"
/** Creates an [[IpNetwork]] from an IpAddress and prefix.
*
* @param address An IpAddress object
* @param mask Subnet prefix number
* @return IpNetwork if the input is valid.
* @throws IpaddrException if the input address is invalid
*/
def apply(address: IpAddress, mask: Int): IpNetwork = apply(address.toString, mask)
/** Creates an [[IpNetwork]] object from a numeric network address and prefix.
*
* @param address Long equivalent of a network address
* @param mask Subnet prefix number
* @return IpNetwork if the input is valid.
* @throws IpaddrException if the input address is invalid
*/
def apply(address: Long, mask: Int): IpNetwork = apply(IpAddress(address), mask)
/** Creates an IpNetwork object from a network address and a network prefix
*
* @param address Dot-delimited string containing network address
* @param mask Subnet prefix number
* @return IpNetwork if input is valid, otherwise an IpaddrException.
* @throws IpaddrException if the input address is invalid
*/
def apply(address: String, mask: Int): IpNetwork = new IpNetwork(address, mask, Ipv4.version)
/** Creates an [[IpNetwork]] object from CIDR address like `10.2.1.0/24`
*
* @param address Dot-delimited CIDR string representation of a network address
* @return IpNetwork if input is valid
* @throws IpaddrException if the input address is invalid
*/
@throws(classOf[IpaddrException])
def apply(address: String): IpNetwork = {
val res = parseIpNetwork(address)
if (res.isEmpty) {
throw new IpaddrException(IpaddrException.invalidAddress(address))
}
new IpNetwork(res.get._1, res.get._2, Ipv4.version)
}
/** Converts a dot-delimited string to meaningful network address.
*
* @param address Dot-delimited string representation of a network address
* @return An Option containing 2-Tuple(Network address CIDR string, Integer Mask).
* None is returned if the input cannot be properly parsed.
*/
private def parseIpNetwork(address: String): Option[(String, Int)] = {
Ipv4.expandPartialAddress(address.split('/')(0)) match {
case Some(addrCidr) =>
val maskArray = address.split('/')
val mask = if (maskArray.length == 2) {
Ipv4.isValidMask(maskArray(1))
} else {
Some(Ipv4.width) // Assume default mask with all network bits set
}
mask match {
case Some(m) => Some((addrCidr, m))
case _ => None
}
case _ => None
}
}
}
| risksense/ipaddr | src/main/scala/com/risksense/ipaddr/IpNetwork.scala | Scala | apache-2.0 | 13,920 |
package illustrations
import scala.scalajs.js.annotation.JSExportTopLevel
import scala.scalajs.js.annotation.JSExport
import scala.concurrent._
@JSExportTopLevel("Primes")
object Primes{
def recSieve(remaining: List[Int], accum: Vector[Int]) : Vector[Int] =
remaining match {
case Nil => accum
case head :: tl => recSieve(tl.filterNot(_ % head == 0), accum :+ head)
}
@JSExport
def sieve(n: Int): Vector[Int] = recSieve((2 to n).toList, Vector())
def ap(first: Int, second: Int): Stream[Int] = Stream.from(0).map{j => first + (j * (second - first))}
val pc = new Primes(100000)
lazy val progressions = pc.primeArithmeticProgressions.take(1000000).groupBy(_.size)
@JSExport
lazy val progressionSizes = progressions.mapValues(_.size).toArray.sortBy(_._1).map(_._2).mkString(", ")
def primeLI(n: Int) =
if (pc.primeSet.contains(n)) s"""<li class="prime">$n</li>""" else s"<li>$n</li>"
def primesBetween(n: Int, m: Int): String = (n to m).toVector.map(primeLI(_)).mkString(" ")
def twinPrime(n: Int) = pc.primeSet.contains(n) && (pc.primeSet.intersect(Set(n -2, n + 2)).nonEmpty)
def twinPrimeLI(n: Int) =
if (twinPrime(n)) s"""<li class="twin-prime">$n</li>""" else s"<li>$n</li>"
def twinPrimesBetween(n: Int, m: Int): String = (n to m).toVector.map(twinPrimeLI(_)).mkString(" ")
def apSpan(ap: Vector[Int]) = {
val list = ap.map{n=> s"""<li class="prime-term">$n</li>"""}.mkString(" ")
s"""<span class="group">$list</span>"""
}
def someAPs(s: Stream[Vector[Int]], n: Int) = s.take(n).toVector.sortBy(_.head).map(apSpan(_)).mkString(" ")
}
class Primes(val max: Int){
lazy val primeVec : Vector[Int] = Primes.sieve(max)
lazy val primeSet : Set[Int ]= primeVec.toSet
lazy val primes : Stream[Int] = primeVec.toStream
lazy val twinPrimes : Stream[(Int, Int)] =
primes.filter(p => primeSet.contains(p + 2)).map(p => (p, p + 2))
def primeAp(first: Int, second: Int): Vector[Int] = Primes.ap(first, second).takeWhile(primeSet.contains(_)).toVector
lazy val primeArithmeticProgressions : Stream[Vector[Int]] =
primes.flatMap(y =>
primes.takeWhile(_ < y).map(x => primeAp(x, y))
)
def primeArithOf(length: Int, limit : Int = 1000000) = primeArithmeticProgressions.take(limit).filter(_.size == length)
} | siddhartha-gadgil/presentations | illustrations/src/Primes.scala | Scala | mit | 2,433 |
package eu.timepit.refined.macros
import scala.reflect.macros.blackbox
trait LiteralMatchers {
val c: blackbox.Context
import c.universe._
private[macros] object BigIntMatcher {
def unapply(expr: c.Tree): Option[BigInt] =
expr match {
case q"scala.`package`.BigInt.apply(${lit: Literal})" =>
lit.value.value match {
case i: Int => Some(BigInt(i))
case l: Long => Some(BigInt(l))
case s: String => scala.util.Try(BigInt(s)).toOption
case _ => None
}
case _ => None
}
}
private[macros] object BigDecimalMatcher {
def unapply(expr: c.Tree): Option[BigDecimal] = {
val constant = expr match {
case q"scala.`package`.BigDecimal.apply(${lit: Literal})" => Some(lit.value.value)
case q"scala.`package`.BigDecimal.exact(${lit: Literal})" => Some(lit.value.value)
case q"scala.`package`.BigDecimal.valueOf(${lit: Literal})" => Some(lit.value.value)
case _ => None
}
constant.flatMap {
case i: Int => Some(BigDecimal(i))
case l: Long => Some(BigDecimal(l))
case d: Double => Some(BigDecimal(d))
case s: String => scala.util.Try(BigDecimal(s)).toOption
case _ => None
}
}
}
}
| fthomas/refined | modules/core/shared/src/main/scala-3.0-/eu/timepit/refined/macros/LiteralMatchers.scala | Scala | mit | 1,370 |
package ui.program
import org.scalajs.dom
import org.scalajs.dom.raw.{WebGLProgram, WebGLShader, WebGLUniformLocation}
import ui.scene.SceneItem
import ui.shader.Shader
import ui.GLContext
import ui.math.{Vec2, Vec3}
import scala.collection.mutable
import scala.scalajs.js
class Program(val gLContext: GLContext,
val vertShader: Shader,
val fragShader: Shader,
val attributes: Seq[Attribute] = Seq[Attribute](),
val uniforms: Seq[Uniform] = Seq[Uniform]()) {
var program: Option[WebGLProgram] = None
val attributePositions: mutable.Map[Attribute,Int] = mutable.Map[Attribute,Int]()
val uniformPositions: mutable.Map[Uniform,WebGLUniformLocation] = mutable.Map[Uniform,WebGLUniformLocation]()
val uniformValuesF: mutable.Map[Uniform,Double] = mutable.Map[Uniform,Double]()
val uniformValuesI: mutable.Map[Uniform,Int] = mutable.Map[Uniform,Int]()
val uniformValuesV3: mutable.Map[Uniform,Vec3[Double]] = mutable.Map[Uniform,Vec3[Double]]()
val uniformValuesV2: mutable.Map[Uniform,Vec2] = mutable.Map[Uniform,Vec2]()
var startTime: Double = 0d
def init(): Unit = {
import dom.raw.WebGLRenderingContext._
val gl = gLContext.gl
val vShader = compileShader(vertShader, VERTEX_SHADER)
val fShader = compileShader(fragShader, FRAGMENT_SHADER)
val programRef = gl.createProgram()
program = Some(programRef)
gl.attachShader(programRef, vShader)
gl.attachShader(programRef, fShader)
gl.linkProgram(programRef)
gl.useProgram(programRef)
// Get attribute positions and enable
attributes.foreach((attribute: Attribute) => {
val attributePos = gl.getAttribLocation(programRef, attribute.name)
attributePositions += attribute -> attributePos
gl.enableVertexAttribArray(attributePos)
})
uniforms.foreach((uniform: Uniform) => {
val uniformPos = gl.getUniformLocation(programRef, uniform.name)
uniformPositions += uniform -> uniformPos
uniform.dataType match {
case DataType.GlFloat =>
uniformValuesF(uniform) = 0.0f
case DataType.GlVec3 =>
uniformValuesV3(uniform) = Vec3.zeros
case DataType.GlVec2 =>
uniformValuesV2(uniform) = Vec2(0.0f, 0.0f)
case DataType.GlInt =>
uniformValuesI(uniform) = 0
}
})
}
def ref: WebGLProgram = program.getOrElse(throw new Exception("No webgl reference"))
def draw(sceneItem: SceneItem, numItems: Int): Unit = {
uniformPositions.get(Uniform("iGlobalTime", DataType.GlFloat)).foreach(_ => {
if (startTime == 0) {
startTime = js.Date.now()
}
val elapsed = (js.Date.now() - startTime) / 1000d
uniformValuesF(Uniform("iGlobalTime", DataType.GlFloat)) = elapsed
})
import dom.raw.WebGLRenderingContext._
val gl = gLContext.gl
// Activate shader attributes
var attribPosition = 0
attributes.foreach((attribute: Attribute) => {
// Using attribute position to find buffer
sceneItem.buffers(attribPosition).bind(gl)
gl.vertexAttribPointer(
attributePositions(attribute),
attribute.size,
attribute.dataType match {
case DataType.GlFloat => FLOAT
case DataType.GlInt => INT
case _ => FLOAT
},
normalized = false,
0,
0
)
attribPosition += 1
})
var totalSize = 0
attributes.foreach((attribute) => totalSize += attribute.size)
//val totalSize: Int = attributes.fold(0)((size, attribute) => size + attribute.size)
uniforms.foreach((uniform: Uniform) => {
uniform.dataType match {
case DataType.GlFloat =>
gl.uniform1f(uniformPositions(uniform), uniformValuesF(uniform))
case DataType.GlInt =>
gl.uniform1i(uniformPositions(uniform), uniformValuesI(uniform))
case DataType.GlVec3 =>
val vec3Values = uniformValuesV3(uniform)
gl.uniform3f(uniformPositions(uniform), vec3Values.x, vec3Values.y, vec3Values.z)
case DataType.GlVec2 =>
val vec2Values = uniformValuesV2(uniform)
gl.uniform2f(uniformPositions(uniform), vec2Values.x, vec2Values.y)
}
})
// Time uniform
if (sceneItem.indexBuffer != null) {
val numEls = sceneItem.indexBuffer.data.length
gl.drawElements(TRIANGLES, numEls, UNSIGNED_SHORT, 0)
} else {
gl.drawArrays(TRIANGLES, 0, numItems)
}
}
def compileShader(shader: Shader, shaderType: Int): WebGLShader = {
import dom.raw.WebGLRenderingContext._
val gl = gLContext.gl
val vShader = gl.createShader(shaderType)
gl.shaderSource(vShader, shader.source)
gl.compileShader(vShader)
var compileStatus = gl.getShaderParameter(vShader, COMPILE_STATUS).asInstanceOf[Boolean]
if (!compileStatus) {
var compilationLog = gl.getShaderInfoLog(vShader)
dom.console.error("Shader compilation failed")
dom.console.warn(compilationLog)
}
vShader
}
}
| gvatn/play-scalajs-webgl-spark | client/src/main/scala/ui/program/Program.scala | Scala | mit | 5,153 |
package io.reactors
package protocol
import io.reactors.common.SetSeq
import org.scalatest._
import org.scalatest.concurrent.AsyncTimeLimitedTests
import scala.collection._
import scala.concurrent.ExecutionContext
import scala.concurrent.Promise
import scala.concurrent.duration._
class RouterProtocolsSpec
extends AsyncFunSuite with AsyncTimeLimitedTests {
val system = ReactorSystem.default("router-protocols")
def timeLimit = 10.seconds
implicit override def executionContext = ExecutionContext.Implicits.global
test("default router with single channel") {
val done = Promise[Boolean]()
system.spawn(Reactor[Int] { self =>
val rc = system.channels.daemon.router[Int]
.route(Router.roundRobin(Seq(self.main.channel)))
rc.channel ! 17
self.main.events onEvent { x =>
if (x == 17) {
done.success(true)
self.main.seal()
} else {
done.success(false)
}
}
})
done.future.map(t => assert(t))
}
test("round robin should work correctly when targets change") {
val done = Promise[Seq[Int]]()
system.spawn(Reactor[Int] { self =>
val seen = mutable.Buffer[Int]()
val c1 = system.channels.open[Int]
val c2 = system.channels.open[Int]
val routees = new SetSeq[Channel[Int]]
routees += self.main.channel
val rc = system.channels.daemon.router[Int].route(Router.roundRobin(routees))
rc.channel ! 17
self.main.events onEvent { x =>
seen += x
routees -= self.main.channel += c1.channel += c2.channel
rc.channel ! 18
}
c1.events onEvent { x =>
seen += x
c1.seal()
done.success(seen)
}
c2.events onEvent { x =>
seen += x
c2.seal()
rc.channel ! 19
}
})
done.future.map(xs => assert(xs == Seq(17, 18, 19)))
}
}
| storm-enroute/reactors | reactors-protocol/shared/src/test/scala/io/reactors/protocol/router-protocols-tests.scala | Scala | bsd-3-clause | 1,883 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{ObjectInputStream, ObjectOutputStream, IOException}
import java.util.Random
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable
import org.scalatest.FunSuite
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.util.ManualClock
class FakeDAGScheduler(sc: SparkContext, taskScheduler: FakeTaskScheduler)
extends DAGScheduler(sc) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {
taskScheduler.startedTasks += taskInfo.index
}
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: mutable.Map[Long, Any],
taskInfo: TaskInfo,
taskMetrics: TaskMetrics) {
taskScheduler.endedTasks(taskInfo.index) = reason
}
override def executorAdded(execId: String, host: String) {}
override def executorLost(execId: String) {}
override def taskSetFailed(taskSet: TaskSet, reason: String) {
taskScheduler.taskSetsFailed += taskSet.id
}
}
// Get the rack for a given host
object FakeRackUtil {
private val hostToRack = new mutable.HashMap[String, String]()
def cleanUp() {
hostToRack.clear()
}
def assignHostToRack(host: String, rack: String) {
hostToRack(host) = rack
}
def getRackForHost(host: String) = {
hostToRack.get(host)
}
}
/**
* A mock TaskSchedulerImpl implementation that just remembers information about tasks started and
* feedback received from the TaskSetManagers. Note that it's important to initialize this with
* a list of "live" executors and their hostnames for isExecutorAlive and hasExecutorsAliveOnHost
* to work, and these are required for locality in TaskSetManager.
*/
class FakeTaskScheduler(sc: SparkContext, liveExecutors: (String, String)* /* execId, host */)
extends TaskSchedulerImpl(sc)
{
val startedTasks = new ArrayBuffer[Long]
val endedTasks = new mutable.HashMap[Long, TaskEndReason]
val finishedManagers = new ArrayBuffer[TaskSetManager]
val taskSetsFailed = new ArrayBuffer[String]
val executors = new mutable.HashMap[String, String]
for ((execId, host) <- liveExecutors) {
addExecutor(execId, host)
}
for ((execId, host) <- liveExecutors; rack <- getRackForHost(host)) {
hostsByRack.getOrElseUpdate(rack, new mutable.HashSet[String]()) += host
}
dagScheduler = new FakeDAGScheduler(sc, this)
def removeExecutor(execId: String) {
executors -= execId
val host = executorIdToHost.get(execId)
assert(host != None)
val hostId = host.get
val executorsOnHost = executorsByHost(hostId)
executorsOnHost -= execId
for (rack <- getRackForHost(hostId); hosts <- hostsByRack.get(rack)) {
hosts -= hostId
if (hosts.isEmpty) {
hostsByRack -= rack
}
}
}
override def taskSetFinished(manager: TaskSetManager): Unit = finishedManagers += manager
override def isExecutorAlive(execId: String): Boolean = executors.contains(execId)
override def hasExecutorsAliveOnHost(host: String): Boolean = executors.values.exists(_ == host)
override def hasHostAliveOnRack(rack: String): Boolean = {
hostsByRack.get(rack) != None
}
def addExecutor(execId: String, host: String) {
executors.put(execId, host)
val executorsOnHost = executorsByHost.getOrElseUpdate(host, new mutable.HashSet[String])
executorsOnHost += execId
executorIdToHost += execId -> host
for (rack <- getRackForHost(host)) {
hostsByRack.getOrElseUpdate(rack, new mutable.HashSet[String]()) += host
}
}
override def getRackForHost(value: String): Option[String] = FakeRackUtil.getRackForHost(value)
}
/**
* A Task implementation that results in a large serialized task.
*/
class LargeTask(stageId: Int) extends Task[Array[Byte]](stageId, 0) {
val randomBuffer = new Array[Byte](TaskSetManager.TASK_SIZE_TO_WARN_KB * 1024)
val random = new Random(0)
random.nextBytes(randomBuffer)
override def runTask(context: TaskContext): (Long, Array[Byte]) = (randomBuffer.length, randomBuffer)
override def preferredLocations: Seq[TaskLocation] = Seq[TaskLocation]()
}
class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
import TaskLocality.{ANY, PROCESS_LOCAL, NO_PREF, NODE_LOCAL, RACK_LOCAL}
private val conf = new SparkConf
val LOCALITY_WAIT = conf.getLong("spark.locality.wait", 3000)
val MAX_TASK_FAILURES = 4
override def beforeEach() {
super.beforeEach()
FakeRackUtil.cleanUp()
}
test("TaskSet with no preferences") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// Offer a host with NO_PREF as the constraint,
// we should get a nopref task immediately since that's what we only have
var taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
// Tell it the task has finished
manager.handleSuccessfulTask(0, createTaskResult(0))
assert(sched.endedTasks(0) === Success)
assert(sched.finishedManagers.contains(manager))
}
test("multiple offers with no preferences") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(3)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
// First three offers should all find tasks
for (i <- 0 until 3) {
var taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === "exec1")
}
assert(sched.startedTasks.toSet === Set(0, 1, 2))
// Re-offer the host -- now we should get no more tasks
assert(manager.resourceOffer("exec1", "host1", NO_PREF) === None)
// Finish the first two tasks
manager.handleSuccessfulTask(0, createTaskResult(0))
manager.handleSuccessfulTask(1, createTaskResult(1))
assert(sched.endedTasks(0) === Success)
assert(sched.endedTasks(1) === Success)
assert(!sched.finishedManagers.contains(manager))
// Finish the last task
manager.handleSuccessfulTask(2, createTaskResult(2))
assert(sched.endedTasks(2) === Success)
assert(sched.finishedManagers.contains(manager))
}
test("skip unsatisfiable locality levels") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execC", "host2"))
val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// An executor that is not NODE_LOCAL should be rejected.
assert(manager.resourceOffer("execC", "host2", ANY) === None)
// Because there are no alive PROCESS_LOCAL executors, the base locality level should be
// NODE_LOCAL. So, we should schedule the task on this offered NODE_LOCAL executor before
// any of the locality wait timers expire.
assert(manager.resourceOffer("execA", "host1", ANY).get.index === 0)
}
test("basic delay scheduling") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host2", "exec2")),
Seq(TaskLocation("host1"), TaskLocation("host2", "exec2")),
Seq() // Last task has no locality prefs
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// First offer host1, exec1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL) == None)
clock.advance(LOCALITY_WAIT)
// Offer host1, exec1 again, at NODE_LOCAL level: the node local (task 2) should
// get chosen before the noPref task
assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL).get.index == 2)
// Offer host2, exec3 again, at NODE_LOCAL level: we should choose task 2
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL).get.index == 1)
// Offer host2, exec3 again, at NODE_LOCAL level: we should get noPref task
// after failing to find a node_Local task
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL) == None)
clock.advance(LOCALITY_WAIT)
assert(manager.resourceOffer("exec2", "host2", NO_PREF).get.index == 3)
}
test("we do not need to delay scheduling when we only have noPref tasks in the queue") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec3", "host2"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host2", "exec3")),
Seq() // Last task has no locality prefs
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// First offer host1, exec1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).get.index === 0)
assert(manager.resourceOffer("exec3", "host2", PROCESS_LOCAL).get.index === 1)
assert(manager.resourceOffer("exec3", "host2", NODE_LOCAL) == None)
assert(manager.resourceOffer("exec3", "host2", NO_PREF).get.index === 2)
}
test("delay scheduling with fallback") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc,
("exec1", "host1"), ("exec2", "host2"), ("exec3", "host3"))
val taskSet = FakeTask.createTaskSet(5,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3")),
Seq(TaskLocation("host2"))
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// First offer host1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// Offer host1 again: nothing should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
clock.advance(LOCALITY_WAIT)
// Offer host1 again: second task (on host2) should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 1)
// Offer host1 again: third task (on host2) should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 2)
// Offer host2: fifth task (also on host2) should get chosen
assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 4)
// Now that we've launched a local task, we should no longer launch the task for host3
assert(manager.resourceOffer("exec2", "host2", ANY) === None)
clock.advance(LOCALITY_WAIT)
// After another delay, we can go ahead and launch that task non-locally
assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 3)
}
test("delay scheduling with failed hosts") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"),
("exec3", "host3"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3"))
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// First offer host1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// After this, nothing should get chosen, because we have separated tasks with unavailable preference
// from the noPrefPendingTasks
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
// Now mark host2 as dead
sched.removeExecutor("exec2")
manager.executorLost("exec2", "host2")
// nothing should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
clock.advance(LOCALITY_WAIT * 2)
// task 1 and 2 would be scheduled as nonLocal task
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 1)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 2)
// all finished
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
assert(manager.resourceOffer("exec2", "host2", ANY) === None)
}
test("task result lost") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// Tell it the task has finished but the result was lost.
manager.handleFailedTask(0, TaskState.FINISHED, TaskResultLost)
assert(sched.endedTasks(0) === TaskResultLost)
// Re-offer the host -- now we should get task 0 again.
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
}
test("repeated failures lead to task set abortion") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// Fail the task MAX_TASK_FAILURES times, and check that the task set is aborted
// after the last failure.
(1 to manager.maxTaskFailures).foreach { index =>
val offerResult = manager.resourceOffer("exec1", "host1", ANY)
assert(offerResult.isDefined,
"Expect resource offer on iteration %s to return a task".format(index))
assert(offerResult.get.index === 0)
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
if (index < MAX_TASK_FAILURES) {
assert(!sched.taskSetsFailed.contains(taskSet.id))
} else {
assert(sched.taskSetsFailed.contains(taskSet.id))
}
}
}
test("executors should be blacklisted after task failure, in spite of locality preferences") {
val rescheduleDelay = 300L
val conf = new SparkConf().
set("spark.scheduler.executorTaskBlacklistTime", rescheduleDelay.toString).
// dont wait to jump locality levels in this test
set("spark.locality.wait", "0")
sc = new SparkContext("local", "test", conf)
// two executors on same host, one on different.
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"),
("exec1.1", "host1"), ("exec2", "host2"))
// affinity to exec1 on host1 - which we will fail.
val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "exec1")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, 4, clock)
{
val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1")
// Cause exec1 to fail : failure 1
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec1 fails after failure 1 due to blacklist
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", RACK_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", ANY).isEmpty)
}
// Run the task on exec1.1 - should work, and then fail it on exec1.1
{
val offerResult = manager.resourceOffer("exec1.1", "host1", NODE_LOCAL)
assert(offerResult.isDefined,
"Expect resource offer to return a task for exec1.1, offerResult = " + offerResult)
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1.1")
// Cause exec1.1 to fail : failure 2
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec1.1 fails after failure 2 due to blacklist
assert(manager.resourceOffer("exec1.1", "host1", NODE_LOCAL).isEmpty)
}
// Run the task on exec2 - should work, and then fail it on exec2
{
val offerResult = manager.resourceOffer("exec2", "host2", ANY)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec2")
// Cause exec2 to fail : failure 3
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec2 fails after failure 3 due to blacklist
assert(manager.resourceOffer("exec2", "host2", ANY).isEmpty)
}
// After reschedule delay, scheduling on exec1 should be possible.
clock.advance(rescheduleDelay)
{
val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1")
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).isEmpty)
// Cause exec1 to fail : failure 4
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
}
// we have failed the same task 4 times now : task id should now be in taskSetsFailed
assert(sched.taskSetsFailed.contains(taskSet.id))
}
test("new executors get added and lost") {
// Assign host2 to rack2
FakeRackUtil.assignHostToRack("host2", "rack2")
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execB")),
Seq(TaskLocation("host2", "execC")),
Seq())
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// Only ANY is valid
assert(manager.myLocalityLevels.sameElements(Array(NO_PREF, ANY)))
// Add a new executor
sched.addExecutor("execD", "host1")
manager.executorAdded()
// Valid locality should contain NODE_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(NODE_LOCAL, NO_PREF, ANY)))
// Add another executor
sched.addExecutor("execC", "host2")
manager.executorAdded()
// Valid locality should contain PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY)))
// test if the valid locality is recomputed when the executor is lost
sched.removeExecutor("execC")
manager.executorLost("execC", "host2")
assert(manager.myLocalityLevels.sameElements(Array(NODE_LOCAL, NO_PREF, ANY)))
sched.removeExecutor("execD")
manager.executorLost("execD", "host1")
assert(manager.myLocalityLevels.sameElements(Array(NO_PREF, ANY)))
}
test("test RACK_LOCAL tasks") {
// Assign host1 to rack1
FakeRackUtil.assignHostToRack("host1", "rack1")
// Assign host2 to rack1
FakeRackUtil.assignHostToRack("host2", "rack1")
// Assign host3 to rack2
FakeRackUtil.assignHostToRack("host3", "rack2")
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc,
("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(2,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execA")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL, ANY)))
// Set allowed locality to ANY
clock.advance(LOCALITY_WAIT * 3)
// Offer host3
// No task is scheduled if we restrict locality to RACK_LOCAL
assert(manager.resourceOffer("execC", "host3", RACK_LOCAL) === None)
// Task 0 can be scheduled with ANY
assert(manager.resourceOffer("execC", "host3", ANY).get.index === 0)
// Offer host2
// Task 1 can be scheduled with RACK_LOCAL
assert(manager.resourceOffer("execB", "host2", RACK_LOCAL).get.index === 1)
}
test("do not emit warning when serialized task is small") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
assert(!manager.emittedTaskSizeWarning)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(!manager.emittedTaskSizeWarning)
}
test("emit warning when serialized task is large") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = new TaskSet(Array(new LargeTask(0)), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
assert(!manager.emittedTaskSizeWarning)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(manager.emittedTaskSizeWarning)
}
test("Not serializable exception thrown if the task cannot be serialized") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = new TaskSet(Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
intercept[TaskNotSerializableException] {
manager.resourceOffer("exec1", "host1", ANY)
}
assert(manager.isZombie)
}
test("abort the job if total size of results is too large") {
val conf = new SparkConf().set("spark.driver.maxResultSize", "2m")
sc = new SparkContext("local", "test", conf)
def genBytes(size: Int) = { (x: Int) =>
val bytes = Array.ofDim[Byte](size)
scala.util.Random.nextBytes(bytes)
bytes
}
// multiple 1k result
val r = sc.makeRDD(0 until 10, 10).map(genBytes(1024)).collect()
assert(10 === r.size )
// single 10M result
val thrown = intercept[SparkException] {sc.makeRDD(genBytes(10 << 20)(0), 1).collect()}
assert(thrown.getMessage().contains("bigger than spark.driver.maxResultSize"))
// multiple 1M results
val thrown2 = intercept[SparkException] {
sc.makeRDD(0 until 10, 10).map(genBytes(1 << 20)).collect()
}
assert(thrown2.getMessage().contains("bigger than spark.driver.maxResultSize"))
}
test("speculative and noPref task should be scheduled after node-local") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host2"), TaskLocation("host1")),
Seq(),
Seq(TaskLocation("host3", "execC")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL).get.index === 0)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index == 1)
manager.speculatableTasks += 1
clock.advance(LOCALITY_WAIT)
// schedule the nonPref task
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index === 2)
// schedule the speculative task
assert(manager.resourceOffer("execB", "host2", NO_PREF).get.index === 1)
clock.advance(LOCALITY_WAIT * 3)
// schedule non-local tasks
assert(manager.resourceOffer("execB", "host2", ANY).get.index === 3)
}
test("node-local tasks should be scheduled right away when there are only node-local and no-preference tasks") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(),
Seq(TaskLocation("host3")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// node-local tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 0)
assert(manager.resourceOffer("execA", "host2", NODE_LOCAL).get.index === 1)
assert(manager.resourceOffer("execA", "host3", NODE_LOCAL).get.index === 3)
assert(manager.resourceOffer("execA", "host3", NODE_LOCAL) === None)
// schedule no-preference after node local ones
assert(manager.resourceOffer("execA", "host3", NO_PREF).get.index === 2)
}
test("SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(ExecutorCacheTaskLocation("host1", "execA")),
Seq(ExecutorCacheTaskLocation("host2", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// process-local tasks are scheduled first
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 2)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL).get.index === 3)
// node-local tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 0)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL).get.index === 1)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL) == None)
}
test("SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished") {
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
val taskSet = FakeTask.createTaskSet(3,
Seq(),
Seq(ExecutorCacheTaskLocation("host1", "execA")),
Seq(ExecutorCacheTaskLocation("host2", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// process-local tasks are scheduled first
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL).get.index === 1)
assert(manager.resourceOffer("execB", "host2", PROCESS_LOCAL).get.index === 2)
// no-pref tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index === 0)
assert(manager.resourceOffer("execA", "host1", ANY) == None)
}
test("Ensure TaskSetManager is usable after addition of levels") {
// Regression test for SPARK-2931
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(2,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host2", "execB.1")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
// Only ANY is valid
assert(manager.myLocalityLevels.sameElements(Array(ANY)))
// Add a new executor
sched.addExecutor("execA", "host1")
sched.addExecutor("execB.2", "host2")
manager.executorAdded()
assert(manager.pendingTasksWithNoPrefs.size === 0)
// Valid locality should contain PROCESS_LOCAL, NODE_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
assert(manager.resourceOffer("execA", "host1", ANY) !== None)
clock.advance(LOCALITY_WAIT * 4)
assert(manager.resourceOffer("execB.2", "host2", ANY) !== None)
sched.removeExecutor("execA")
sched.removeExecutor("execB.2")
manager.executorLost("execA", "host1")
manager.executorLost("execB.2", "host2")
clock.advance(LOCALITY_WAIT * 4)
sched.addExecutor("execC", "host3")
manager.executorAdded()
// Prior to the fix, this line resulted in an ArrayIndexOutOfBoundsException:
assert(manager.resourceOffer("execC", "host3", ANY) !== None)
}
test("Test that locations with HDFSCacheTaskLocation are treated as PROCESS_LOCAL.") {
// Regression test for SPARK-2931
sc = new SparkContext("local", "test")
val sched = new FakeTaskScheduler(sc,
("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(3,
Seq(HostTaskLocation("host1")),
Seq(HostTaskLocation("host2")),
Seq(HDFSCacheTaskLocation("host3")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock)
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execA")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execB")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execC")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(ANY)))
}
def createTaskResult(id: Int): DirectTaskResult[Int] = {
val valueSer = SparkEnv.get.serializer.newInstance()
new DirectTaskResult[Int](valueSer.serialize(id), mutable.Map.empty, new TaskMetrics)
}
}
| trueyao/spark-lever | core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala | Scala | apache-2.0 | 31,064 |
package hercules.protocols
import hercules.entities.ProcessingUnit
import hercules.entities.notification.NotificationUnit
import spray.json._
/**
* Import this object to gain access to the messaging protocol of
* Hercules.
*
* All messages which are to be sent globally need to be defined in here!
*/
object HerculesMainProtocol {
//--------------------------------------------------------------
// GENERAL MESSAGES
//--------------------------------------------------------------
/**
* This trait is the base for all messages in the Hercules application
* All messages to be parsed need to extend this!
*/
sealed trait HerculesMessage {
protected val name: String = getClass.getSimpleName
/**
* The values of the object to be mapped to json
* Override to add more information about an object
* The minimum information added is the type of the message.
* @return
*/
protected def mappedValues: Map[String, JsValue] = Map("type" -> (JsString(name)))
/**
* Provides the json representation of a message.
* Will as a minimum contain the type of the message.
* @return
*/
def toJson: JsValue = JsObject(mappedValues)
}
/**
* Use this to acknowledge e.g. that some work was accepted
*/
case class Acknowledge() extends HerculesMessage {}
/**
* Use this to Reject a something, like a work load. Optionally include a
* reason.
* @param reason what something was rejected.
*/
case class Reject(reason: Option[String] = None) extends HerculesMessage {
override def mappedValues =
super.mappedValues.updated(
"reason", JsString(reason.getOrElse("Unknown reason")))
}
/**
* TODO: This should probably realy be moved out into a MasterProtocol,
* since it's not a message which all actors are expected to be able to
* handle. /JD 20141113
* Request the state from the master, if no unit has been specified
* the full state should be returned. This can be used for example the details
* @param unitName the name of the processing unit to look for
*/
case class RequestMasterState(unitName: Option[String] = None) extends HerculesMessage {
override def mappedValues =
super.mappedValues.updated(
"unitName", JsString(unitName.getOrElse("Unknown unit")))
}
//--------------------------------------------------------------
// MESSAGES ABOUT FINDING AND FORGETTING PROCESSING UNITS
//--------------------------------------------------------------
/**
* Base trait for processing unit messages. Any message type which relates
* to a processing unit should extend this.
*/
sealed trait ProcessingMessage extends HerculesMessage
/**
* The base trait for the messages encapsulating the state of the
* ProcessingUnit, which in turn defines what is to be done with it.
*/
trait ProcessingUnitMessage extends ProcessingMessage {
val unit: ProcessingUnit
override def mappedValues =
super.mappedValues.updated(
"unit", unit.toJson)
}
/**
* Used for when the you don't have a ProcessingUnit, but just a name,
* e.g. when looking up a processing unit to see which state it's in.
*/
trait ProcessingUnitNameMessage extends ProcessingMessage {
val unitName: String
override def mappedValues =
super.mappedValues.updated(
"unitname", JsString(unitName))
}
/**
* Used to request a check if there are any processing units which should
* be forgotten by the actor. E.g. the ProcessingUnitWatcherActor might
* want to perform some changes on the file system (or in a database) and
* then reintroduce it into the general flow.
*/
case object RequestProcessingUnitMessageToForget extends ProcessingMessage
/**
* Used to indicate the a processing unit has been found.
* @param unit which was found.
*/
case class FoundProcessingUnitMessage(unit: ProcessingUnit) extends ProcessingUnitMessage
/**
* Used to indicate that a processing unit should be forgotten (the exact
* meaining of that is determined by the context).
* @param unitName of processing unit to forget.
*/
case class ForgetProcessingUnitMessage(unitName: String) extends ProcessingUnitNameMessage
//--------------------------------------------------------------
// MESSAGES ABOUT DEMULTIPLEXING
//--------------------------------------------------------------
/**
* Base trait for all demultiplexing messages
*/
sealed trait DemultiplexingMessage extends ProcessingMessage
/**
* Sent to indicate a request for a demultiplexing message, e.g if one wants
* a new processing unit to demultiplex.
*/
case object RequestDemultiplexingProcessingUnitMessage extends DemultiplexingMessage
/**
* Send to start demultiplexing a processing unit.
* @param unit processing unit
*/
case class StartDemultiplexingProcessingUnitMessage(unit: ProcessingUnit) extends DemultiplexingMessage with ProcessingUnitMessage
/**
* Send to stop demultiplexing of a processing unit.
* @param unit processing unit
*/
case class StopDemultiplexingProcessingUnitMessage(unitName: String) extends DemultiplexingMessage with ProcessingUnitNameMessage
/**
* Send to indicate the demultiplexing of a processing unit has finished.
* @param unit processing unit
*/
case class FinishedDemultiplexingProcessingUnitMessage(unit: ProcessingUnit) extends DemultiplexingMessage with ProcessingUnitMessage
/**
* Send to indicate the demultiplexing of a processing unit has failed.
* @param unit processing unit
* @param reason why it failed.
*/
case class FailedDemultiplexingProcessingUnitMessage(unit: ProcessingUnit, reason: String) extends DemultiplexingMessage with ProcessingUnitMessage {
override def mappedValues =
super.mappedValues.updated(
"reason", JsString(reason))
}
/**
* Send to indicate the demultiplexing of a processing unit should be restarted.
* @param unitName of unit which should be restarted
*/
case class RestartDemultiplexingProcessingUnitMessage(unitName: String) extends DemultiplexingMessage with ProcessingUnitNameMessage
/**
* Send to indicate the demultiplexing of a processing unit should be forgotten,
* this would typically indicate that demultiplexing should be reinitiated.
* @param unitName of unit which should be forgotten.
*/
case class ForgetDemultiplexingProcessingUnitMessage(unitName: String) extends DemultiplexingMessage with ProcessingUnitNameMessage
//--------------------------------------------------------------
// MESSAGES ABOUT QUALITY CONTROL
// TODO None of these messages are used yet. /JD 20141114
//--------------------------------------------------------------
/**
* Start the QC checking process for a demultiplexing unit
* @param unit processing unit
*/
case class StartQCProcessingUnitMessage(unit: ProcessingUnit) extends ProcessingUnitMessage
/**
* Finished the QC checking process for a demultiplexing unit
* @param unit processing unit
*/
case class FinishedQCProcessingUnitMessage(unit: ProcessingUnit) extends ProcessingUnitMessage
/**
* Failed the QC checking process for a demultiplexing unit
* @param unit processing unit
*/
case class FailedQCProcessingUnitMessage(unit: ProcessingUnit) extends ProcessingUnitMessage
//--------------------------------------------------------------
// MESSAGES NOTIFICATIONS
//--------------------------------------------------------------
/**
* The base trait for the messages encapsulating the notifications to be
* sent out. Contains the NotificationUnit.
*/
sealed trait NotificationUnitMessage extends HerculesMessage {
val unit: NotificationUnit
}
/**
* Send this notification unit
* @param notificationUnit to send
*/
case class SendNotificationUnitMessage(unit: NotificationUnit) extends NotificationUnitMessage
/**
* This notification unit has been sent
* @param notificationUnit to send
*/
case class SentNotificationUnitMessage(unit: NotificationUnit) extends NotificationUnitMessage
/**
* Failed in sending this notification unit
* @param notificationUnit to send
*/
case class FailedNotificationUnitMessage(unit: NotificationUnit, reason: String) extends NotificationUnitMessage
//@TODO Extend this with all messages that we should to be able to send!
} | johandahlberg/hercules | src/main/scala/hercules/protocols/HerculesMainProtocol.scala | Scala | mit | 8,460 |
package amora.backend.services
import java.io.File
import akka.actor.ActorSystem
import amora.backend.Logger
import amora.backend.indexer.ArtifactFetcher
import amora.converter.protocol.Artifact
import amora.converter.protocol.Project
class IndexArtifacts(val system: ActorSystem, override val logger: Logger) extends ScalaService with ArtifactFetcher {
import amora.api._
override def cacheLocation = new File(system.settings.config.getString("app.storage.artifact-repo"))
def run(turtleReq: String): String = {
val artifacts = sparqlQuery"""
prefix p:<http://amora.center/kb/amora/Schema/Project/>
prefix a:<http://amora.center/kb/amora/Schema/Artifact/>
select * where {
[a a:] a:owner [p:name ?pname] ; a:organization ?org ; a:name ?name ; a:version ?version .
}
""".runOnModel(turtleModel(turtleReq)).map { row ⇒
Artifact(Project(row.string("pname")), row.string("org"), row.string("name"), row.string("version"))
}
downloadAndIndexArtifacts(artifacts)
response(s"""
@prefix service:<http://amora.center/kb/Schema/Service/> .
@prefix response:<http://amora.center/kb/ServiceResponse/> .
<#this>
a response: ;
service:requestId <$requestId> ;
.
""")
}
}
| sschaef/tooling-research | backend/src/main/scala/amora/backend/services/IndexArtifacts.scala | Scala | mit | 1,277 |
package bgstats.model
import monix.reactive.Observable
trait ApplicationStore[M] extends AllCommands {
val state$: Observable[ApplicationState[M]]
}
| BardurArantsson/bg-stats | src/main/scala/bgstats/model/ApplicationStore.scala | Scala | apache-2.0 | 155 |
package com.karasiq.shadowcloud.metadata.imageio
import com.typesafe.config.Config
import com.karasiq.shadowcloud.metadata.{MetadataParser, MetadataProvider, MimeDetector}
class ImageIOMetadataProvider(rootConfig: Config) extends MetadataProvider {
protected object imageioConfig {
val config = rootConfig.getConfig("metadata.imageio")
val thumbnailsConfig = config.getConfig("thumbnails")
}
val detectors: Seq[MimeDetector] = Vector.empty
val parsers: Seq[MetadataParser] = Vector(
ImageIOThumbnailCreator(imageioConfig.thumbnailsConfig)
)
}
| Karasiq/shadowcloud | metadata/imageio/src/main/scala/com/karasiq/shadowcloud/metadata/imageio/ImageIOMetadataProvider.scala | Scala | apache-2.0 | 570 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.kafka
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import com.typesafe.scalalogging.LazyLogging
import org.joda.time.Instant
import org.locationtech.geomesa.features.SerializationOption.SerializationOptions
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer
import org.opengis.feature.simple.{SimpleFeatureType, SimpleFeature}
sealed trait GeoMessage {
def timestamp: Instant
}
object GeoMessage {
/** @return a new [[Clear]] message with the current time */
def clear(): Clear = Clear(Instant.now)
/** @return a new [[Clear]] message with the given feature ``id`` and current time */
def delete(id: String): Delete = Delete(Instant.now, id)
/** @return a new [[CreateOrUpdate]] message with the given ``sf`` and current time */
def createOrUpdate(sf: SimpleFeature): CreateOrUpdate = CreateOrUpdate(Instant.now, sf)
}
/** Create a new simple feature or update an existing [[SimpleFeature]]
*
* @param feature the [[SimpleFeature]]
*/
case class CreateOrUpdate(override val timestamp: Instant, feature: SimpleFeature) extends GeoMessage
/** Delete an existing [[SimpleFeature]]
*
* @param id the id of the simple feature
*/
case class Delete(override val timestamp: Instant, id: String) extends GeoMessage
/** Delete all [[org.opengis.feature.simple.SimpleFeature]]s
*/
case class Clear(override val timestamp: Instant) extends GeoMessage
/** Encodes [[GeoMessage]]s. [[Clear]] and [[Delete]] messages are handled directly. See class
* [[GeoMessageEncoder]] for encoding [[CreateOrUpdate]] messages.
*
* The following encoding is used:
*
* Key: version (1 byte) type (1 byte) timestamp (8 bytes)
*
* The current version is 1
* The type is 'C' for create or update, 'D' for delete and 'X' for clear
*
* Value
* CreateOrUpdate: serialized simple feature
* Delete: simple feature ID as UTF-8 bytes
* Clear: empty
*
*/
object GeoMessageEncoder {
val version: Byte = 1
val createOrUpdateType: Byte = 'C'
val deleteType: Byte = 'D'
val clearType: Byte = 'X'
private val EMPTY = Array.empty[Byte]
}
/** Encodes [[GeoMessage]]s.
*
* @param schema the [[SimpleFeatureType]]; required to serialize [[CreateOrUpdate]] messages
*/
class GeoMessageEncoder(schema: SimpleFeatureType) {
import GeoMessageEncoder._
private val serializer = new KryoFeatureSerializer(schema, SerializationOptions.withUserData)
def encodeKey(msg: GeoMessage): Array[Byte] = {
val msgType: Byte = msg match {
case c: CreateOrUpdate => createOrUpdateType
case d: Delete => deleteType
case x: Clear => clearType
case u => throw new IllegalArgumentException(s"Invalid message: '$u'")
}
val bb = ByteBuffer.allocate(10)
bb.put(version)
bb.put(msgType)
bb.putLong(msg.timestamp.getMillis)
bb.array()
}
def encodeMessage(msg: GeoMessage): Array[Byte] = msg match {
case c: CreateOrUpdate =>
encodeCreateOrUpdateMessage(c)
case d: Delete =>
encodeDeleteMessage(d)
case x: Clear =>
encodeClearMessage(x)
case u =>
throw new IllegalArgumentException(s"Invalid message: '$u'")
}
def encodeCreateOrUpdateMessage(msg: CreateOrUpdate): Array[Byte] = serializer.serialize(msg.feature)
def encodeDeleteMessage(msg: Delete): Array[Byte] = msg.id.getBytes(StandardCharsets.UTF_8)
def encodeClearMessage(msg: Clear): Array[Byte] = EMPTY
}
/** Decodes an encoded [[GeoMessage]].
*
* @param schema the [[SimpleFeatureType]]; required to deserialize [[CreateOrUpdate]] messages
*/
class GeoMessageDecoder(schema: SimpleFeatureType) extends LazyLogging {
case class MsgKey(version: Byte, msgType: Byte, ts: Instant)
private val serializer = new KryoFeatureSerializer(schema, SerializationOptions.withUserData)
/** Decodes an encoded [[GeoMessage]] represented by the given ``key`` and ``msg``.
*
* @param key the encoded message key
* @param msg the encoded message body
* @return the decoded [[GeoMessage]]
*/
def decode(key: Array[Byte], msg: Array[Byte]): GeoMessage = {
val MsgKey(version, msgType, ts) = decodeKey(key)
if (version == 1) {
decodeVersion1(msgType, ts, msg)
} else {
throw new IllegalArgumentException(s"Unknown serialization version: $version")
}
}
protected def decodeKey(key: Array[Byte]): MsgKey = {
def keyToString = if (key == null) "null" else s"'${new String(key, StandardCharsets.UTF_8)}'"
if (key == null || key.length == 0) {
throw new IllegalArgumentException(
s"Invalid message key: $keyToString. Cannot determine serialization version.")
}
val buffer = ByteBuffer.wrap(key)
val version = buffer.get()
if (version == 1) {
if (key.length != 10) {
throw new IllegalArgumentException(
s"Invalid version 1 message key: $keyToString. Expecting 10 bytes but found ${key.length}.")
}
val msgType = buffer.get()
val ts = new Instant(buffer.getLong)
MsgKey(version, msgType, ts)
} else {
throw new IllegalArgumentException(
s"Invalid message key: $keyToString. Unknown serialization version: $version")
}
}
private def decodeVersion1(msgType: Byte, ts: Instant, msg: Array[Byte]): GeoMessage = msgType match {
case GeoMessageEncoder.createOrUpdateType =>
val sf = serializer.deserialize(msg)
CreateOrUpdate(ts, sf)
case GeoMessageEncoder.deleteType =>
val id = new String(msg, StandardCharsets.UTF_8)
Delete(ts, id)
case GeoMessageEncoder.clearType =>
Clear(ts)
case _ =>
throw new IllegalArgumentException("Unknown message type: " + msgType.toChar)
}
}
| MutahirKazmi/geomesa | geomesa-kafka/geomesa-kafka-datastore/geomesa-kafka-datastore-common/src/main/scala/org/locationtech/geomesa/kafka/GeoMessage.scala | Scala | apache-2.0 | 6,216 |
package org.zalando.jsonapi.json.circe
import io.circe._
import io.circe.generic.auto._
import io.circe.syntax._
import org.zalando.jsonapi.json.FieldNames
import org.zalando.jsonapi.model.JsonApiObject._
import org.zalando.jsonapi.model.Links.Link
import org.zalando.jsonapi.model.RootObject.{Data, ResourceObject, ResourceObjects}
import org.zalando.jsonapi.model.{Error, _}
// scalastyle:off public.methods.have.type
trait CirceJsonapiEncoders {
def valueToJson(generalValue: Value): Json = generalValue match {
case NullValue =>
Json.Null
case StringValue(value) =>
Json.fromString(value)
case BooleanValue(value) =>
Json.fromBoolean(value)
case NumberValue(value) =>
Json.fromBigDecimal(value)
case JsArrayValue(values) =>
Json.fromValues(values.map(valueToJson))
case JsObjectValue(values) =>
Json.fromFields(values.map {
case Attribute(name, value) =>
name -> valueToJson(value)
})
}
def jsonFromOptionalFields(entries: (String, Option[Json])*): Json = {
Json.fromFields(entries.flatMap {
case (name, valueOption) => valueOption.map(name -> _)
})
}
implicit def valueEncoder[V <: Value] = Encoder.instance[V](valueToJson)
implicit val attributeEncoder = Encoder.instance[Attribute] {
case Attribute(name, value) =>
Json.fromFields(Seq(name -> value.asJson))
}
implicit val attributesEncoder = Encoder.instance[Attributes] {
case Seq(attributes @ _ *) =>
attributes.map(_.asJson).reduce(_.deepMerge(_))
}
implicit val linkEncoder = Encoder.instance[Link] { link =>
val (name: String, href: String, metaOpt: Option[Meta]) = link match {
case Links.Self(url, None) => (FieldNames.`self`, url, None)
case Links.Self(url, Some(meta)) => (FieldNames.`self`, url, Some(meta))
case Links.About(url, None) => (FieldNames.`about`, url, None)
case Links.About(url, Some(meta)) => (FieldNames.`about`, url, Some(meta))
case Links.First(url, None) => (FieldNames.`first`, url, None)
case Links.First(url, Some(meta)) => (FieldNames.`first`, url, Some(meta))
case Links.Last(url, None) => (FieldNames.`last`, url, None)
case Links.Last(url, Some(meta)) => (FieldNames.`last`, url, Some(meta))
case Links.Next(url, None) => (FieldNames.`next`, url, None)
case Links.Next(url, Some(meta)) => (FieldNames.`next`, url, Some(meta))
case Links.Prev(url, None) => (FieldNames.`prev`, url, None)
case Links.Prev(url, Some(meta)) => (FieldNames.`prev`, url, Some(meta))
case Links.Related(url, None) => (FieldNames.`related`, url, None)
case Links.Related(url, Some(meta)) => (FieldNames.`related`, url, Some(meta))
}
metaOpt match {
case None => Json.fromFields(Seq(name -> Json.fromString(href)))
case Some(meta) =>
val linkObjectJson = Json.fromFields(Seq("href" -> Json.fromString(href), "meta" -> meta.asJson))
Json.fromFields(Seq(name -> linkObjectJson))
}
}
implicit val linksEncoder = Encoder.instance[Links](_.map(_.asJson).reduce(_.deepMerge(_)))
def dataToJson(data: Data): Json = {
data match {
case ro: ResourceObject =>
ro.asJson
case ros: ResourceObjects =>
ros.asJson
}
}
lazy implicit val relationshipEncoder = Encoder.instance[Relationship](relationship => {
jsonFromOptionalFields(
FieldNames.`links` -> relationship.links.map(_.asJson),
// TODO: there's prolly a cleaner way here. there's a circular dependency Data -> ResourceObject(s) -> Relationship(s) -> Data that's giving circe problems
FieldNames.`data` -> relationship.data.map(dataToJson)
)
})
implicit val relationshipsEncoder = Encoder.instance[Relationships](relationships =>
Json.fromFields(relationships.map {
case (name, value) => name -> value.asJson
}))
implicit val jsonApiEncoder = Encoder.instance[JsonApi] {
case Seq(jsonApiPropertys @ _ *) =>
Json.fromFields(jsonApiPropertys.map {
case JsonApiProperty(name, value) =>
name -> value.asJson
})
}
implicit val metaEncoder = Encoder.instance[Meta](meta => {
Json.fromFields(meta.toSeq.map {
case (name, value) => name -> value.asJson
})
})
implicit val errorSourceEncoder = Encoder.instance[ErrorSource](errorSource => {
jsonFromOptionalFields(
FieldNames.`pointer` -> errorSource.pointer.map(Json.fromString),
FieldNames.`parameter` -> errorSource.parameter.map(Json.fromString)
)
})
implicit val errorEncoder = Encoder.instance[Error](error => {
jsonFromOptionalFields(
FieldNames.`id` -> error.id.map(Json.fromString),
FieldNames.`status` -> error.status.map(Json.fromString),
FieldNames.`code` -> error.code.map(Json.fromString),
FieldNames.`title` -> error.title.map(Json.fromString),
FieldNames.`detail` -> error.detail.map(Json.fromString),
FieldNames.`links` -> error.links.map(_.asJson),
FieldNames.`meta` -> error.meta.map(_.asJson),
FieldNames.`source` -> error.source.map(_.asJson)
)
})
implicit val resourceObjectEncoder = Encoder.instance[ResourceObject](resourceObject => {
jsonFromOptionalFields(
FieldNames.`type` -> Option(Json.fromString(resourceObject.`type`)),
FieldNames.`id` -> resourceObject.id.map(Json.fromString),
FieldNames.`attributes` -> resourceObject.attributes.map(_.asJson),
FieldNames.`relationships` -> resourceObject.relationships.map(_.asJson),
FieldNames.`links` -> resourceObject.links.map(_.asJson),
FieldNames.`meta` -> resourceObject.meta.map(_.asJson)
)
})
implicit val resourceObjectsEncoder = Encoder.instance[ResourceObjects] {
case ResourceObjects(resourceObjects) =>
Json.fromValues(resourceObjects.map(_.asJson))
}
lazy implicit val dataEncoder = Encoder.instance[Data](dataToJson)
implicit val includedEncoder = Encoder.instance[Included](_.resourceObjects.asJson)
implicit val rootObjectEncoder = Encoder.instance[RootObject](rootObject => {
jsonFromOptionalFields(
FieldNames.`data` -> rootObject.data.map(_.asJson),
FieldNames.`links` -> rootObject.links.map(_.asJson),
FieldNames.`errors` -> rootObject.errors.map(_.asJson),
FieldNames.`meta` -> rootObject.meta.map(_.asJson),
FieldNames.`included` -> rootObject.included.map(_.asJson),
FieldNames.`jsonapi` -> rootObject.jsonApi.map(_.asJson)
)
})
}
object CirceJsonapiEncoders extends CirceJsonapiEncoders
| texvex/scala-jsonapi | src/main/scala/org/zalando/jsonapi/json/circe/CirceJsonapiEncoders.scala | Scala | mit | 6,626 |
package com.yetu.oauth2provider.oauth2.services
import com.yetu.oauth2provider.controllers.authentication.providers.EmailPasswordProvider
import com.yetu.oauth2provider.oauth2.models.{ ImplicitFlowException, ImplicitFlowSyntaxException, YetuUser }
import com.yetu.oauth2provider.services.data.interface.IPersonService
import play.api.Logger
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scalaoauth2.provider._
class ImplicitGrantFlowService[U](personService: IPersonService) {
lazy val logger = Logger("com.yetu.oauth2provider.oauth2.services.ImplicitGrantFlowService")
def validateRequest(implicit request: AuthorizationRequest): Future[YetuUser] = {
parseHeaders { email =>
personService.findByEmailAndProvider(email, EmailPasswordProvider.EmailPassword).map {
case Some(user) => user.asInstanceOf[YetuUser]
case _ => throw new ImplicitFlowException("user not found")
}
}
}
//TODO change exception class SignatureSyntaxException to something else
def parseHeaders(callback: (String) => Future[YetuUser])(implicit request: AuthorizationRequest): Future[YetuUser] = {
request.headers.get("email").map(_.head) match {
case Some(e) => callback(e)
case _ => Future.failed(ImplicitFlowSyntaxException("Missing parameter [ email ]"))
}
}
}
| yetu/oauth2-provider | app/com/yetu/oauth2provider/oauth2/services/ImplicitGrantFlowService.scala | Scala | mit | 1,377 |
/*
* TreeTable.scala
* (TreeTable)
*
* Copyright (c) 2013-2020 Hanns Holger Rutz. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.treetable
import de.sciss.treetable.j.event.TreeColumnModelListener
import java.awt
import javax.swing.tree.TreePath
import javax.swing.{DropMode, Icon, event => jse, table => jtab, tree => jtree}
import scala.collection.immutable.{IndexedSeq => Vec}
import scala.language.implicitConversions
import scala.swing.{Color, Component, Dimension, Publisher, Reactions, SetWrapper}
object TreeTable {
private trait JTreeTableMixin { def tableWrapper: TreeTable[_, _] }
val Path: Vec.type = Vec
type Path[+A] = Vec[A]
implicit private[treetable] def pathToTreePath(p: Path[Any]): jtree.TreePath = {
// TreePath must be non null and not empty... SUCKERS
// if (p.isEmpty) null else {
val array: Array[AnyRef] = p.iterator.map(_.asInstanceOf[AnyRef]).toArray
new jtree.TreePath(array)
// }
}
implicit private[treetable] def treePathToPath[A](tp: jtree.TreePath): Path[A] = {
if (tp == null) Path.empty
else tp.getPath.iterator.map(_.asInstanceOf[A]).toIndexedSeq
}
case class DropLocation[+A](private val peer: j.TreeTable.DropLocation) {
def path: Path[A] = peer.getPath
/** Returns the child index within the last branch of the path.
*
* @return the index at which a drop occurs within the children of
* the branch denoted by `getPath`. For example, `0` means
* the drop happens before the first child, `1` means it
* happens after the first child. For `ON` drop mode, `-1`
* indicates that the drop occurs above the parent node.
*/
def index : Int = peer.getIndex
def row : Int = peer.getRow
def column: Int = peer.getColumn
def isInsertRow : Boolean = peer.isInsertRow
def isInsertColumn: Boolean = peer.isInsertColumn
}
}
class TreeTable[A, Col <: TreeColumnModel[A]](treeModel0: TreeModel[A], treeColumnModel0: Col,
tableColumnModel0: jtab.TableColumnModel)
extends Component /* with Scrollable.Wrapper */ {
me =>
import TreeTable.{Path, pathToTreePath, treePathToPath}
def this(treeModel0: TreeModel[A], treeColumnModel0: Col) =
this(treeModel0, treeColumnModel0, null)
private val /* var */ _treeModel = treeModel0
private val /* var */ _treeColumnModel = treeColumnModel0
// private var _tableColumnModel = tableColumnModel0
private var _renderer: TreeTableCellRenderer = _
def treeModel: TreeModel[A] = _treeModel
def treeColumnModel: Col = _treeColumnModel
// def tableColumnModel: jtab.TableColumnModel = _tableColumnModel
// def tableModel: jtab.TableModel
def renderer: TreeTableCellRenderer = _renderer
def renderer_=(r: TreeTableCellRenderer): Unit = {
val rp = r match {
case w: TreeTableCellRenderer.Wrapped => w.peer
case _ => new j.TreeTableCellRenderer {
def getTreeTableCellRendererComponent(treeTable: j.TreeTable, value: Any, selected: Boolean, hasFocus: Boolean,
row: Int, column: Int): awt.Component = {
val state = TreeTableCellRenderer.State(selected = selected, focused = hasFocus, tree = None)
r.getRendererComponent(me, value, row = row, column = column, state).peer
}
def getTreeTableCellRendererComponent(treeTable: j.TreeTable, value: Any, selected: Boolean, hasFocus: Boolean,
row: Int, column: Int, expanded: Boolean, leaf: Boolean): awt.Component = {
val state = TreeTableCellRenderer.State(selected = selected, focused = hasFocus,
tree = Some(TreeTableCellRenderer.TreeState(expanded = expanded, leaf = leaf)))
r.getRendererComponent(me, value, row = row, column = column, state).peer
}
}
}
_renderer = r
peer.setDefaultRenderer(classOf[AnyRef], rp)
}
// def editable: Boolean = ...
// def cellValues: Iterator[A] = ...
private def wrapTreeModel(_peer: TreeModel[A]): jtree.TreeModel = new jtree.TreeModel {
jModel =>
private val peer = _peer
def getRoot: AnyRef = peer.root.asInstanceOf[AnyRef]
def getChild(parent: Any, index: Int): AnyRef = peer.getChild(parent.asInstanceOf[A], index).asInstanceOf[AnyRef]
def getChildCount(parent: Any): Int = peer.getChildCount(parent.asInstanceOf[A])
def isLeaf(node: Any): Boolean = peer.isLeaf(node.asInstanceOf[A])
def valueForPathChanged(path: TreePath, newValue: Any): Unit =
peer.valueForPathChanged(path, newValue.asInstanceOf[A]) // XXX TODO: is newValue really an `A`?
def getIndexOfChild(parent: Any, child: Any): Int =
peer.getIndexOfChild(parent.asInstanceOf[A], child.asInstanceOf[A])
private val sync = new AnyRef
private var listeners = Vector.empty[jse.TreeModelListener]
private val reaction: Reactions.Reaction = {
case te: TreeNodesChanged[_] =>
val evt = te.toJava(jModel)
listeners.foreach { l => l.treeNodesChanged(evt) }
case te: TreeNodesInserted[_] =>
val evt = te.toJava(jModel)
listeners.foreach { l => l.treeNodesInserted(evt) }
case te: TreeNodesRemoved[_] =>
val evt = te.toJava(jModel)
listeners.foreach { l => l.treeNodesRemoved(evt) }
case te: TreeStructureChanged[_] =>
val evt = te.toJava(jModel)
listeners.foreach { l => l.treeStructureChanged(evt) }
}
def addTreeModelListener(l: jse.TreeModelListener): Unit =
sync.synchronized {
val start = listeners.isEmpty
listeners :+= l
if (start) peer.reactions += reaction
}
def removeTreeModelListener(l: jse.TreeModelListener): Unit =
sync.synchronized {
val idx = listeners.indexOf(l)
if (idx >= 0) {
listeners = listeners.patch(idx, Vector.empty, 1)
if (listeners.isEmpty) peer.reactions -= reaction
}
}
}
private def wrapTreeColumnModel(_peer: Col): j.TreeColumnModel = new j.TreeColumnModel {
private val peer = _peer
// val peer = _treeColumnModel
def getHierarchicalColumn: Int = peer.hierarchicalColumn
def getColumnClass(column: Int): Class[_] = peer.getColumnClass(column)
def isCellEditable(node: Any, column: Int): Boolean = peer.isCellEditable(node.asInstanceOf[A], column)
def getColumnCount: Int = peer.columnCount
def getColumnName(column: Int): String = peer.getColumnName(column)
def getValueAt(node: Any, column: Int): AnyRef = peer.getValueAt(node.asInstanceOf[A], column).asInstanceOf[AnyRef]
def setValueAt(value: Any, node: Any, column: Int): Unit = peer.setValueAt(value, node.asInstanceOf[A], column)
private val sync = new AnyRef
private var listeners = Vector.empty[TreeColumnModelListener]
private val reaction: Reactions.Reaction = {
case TreeColumnChanged(_, path, column) =>
val evt = new j.event.TreeColumnModelEvent(this, path, column)
listeners.foreach { l =>
l.treeColumnChanged(evt)
}
}
def addTreeColumnModelListener(l: TreeColumnModelListener): Unit =
sync.synchronized {
val start = listeners.isEmpty
listeners :+= l
if (start) peer.reactions += reaction
}
def removeTreeColumnModelListener(l: TreeColumnModelListener): Unit =
sync.synchronized {
val idx = listeners.indexOf(l)
if (idx >= 0) {
listeners = listeners.patch(idx, Vector.empty, 1)
if (listeners.isEmpty) peer.reactions -= reaction
}
}
}
override lazy val peer: j.TreeTable =
new j.TreeTable(wrapTreeModel(treeModel0), wrapTreeColumnModel(treeColumnModel0), tableColumnModel0)
with TreeTable.JTreeTableMixin with SuperMixin {
def tableWrapper: TreeTable[A, Col] = TreeTable.this
// override def getCellRenderer(r: Int, c: Int) = new TableCellRenderer {
// def getTableCellRendererComponent(table: JTable, value: AnyRef, isSelected: Boolean, hasFocus: Boolean, row: Int, column: Int) =
// Table.this.rendererComponent(isSelected, hasFocus, row, column).peer
// }
// override def getCellEditor(r: Int, c: Int) = editor(r, c)
// override def getValueAt(r: Int, c: Int) = Table.this.apply(r,c).asInstanceOf[AnyRef]
}
def autoCreateColumnHeader : Boolean = peer.getAutoCreateColumnHeader
def autoCreateColumnHeader_= (value: Boolean ): Unit = peer.setAutoCreateColumnHeader(value)
def autoCreateColumnsFromModel : Boolean = peer.getAutoCreateColumnsFromModel
def autoCreateColumnsFromModel_(value: Boolean ): Unit = peer.setAutoCreateColumnsFromModel(value)
def autoCreateRowHeader : Boolean = peer.getAutoCreateRowHeader
def autoCreateRowHeader_= (value: Boolean ): Unit = peer.setAutoCreateRowHeader(value)
def autoCreateRowSorter : Boolean = peer.getAutoCreateRowSorter
def autoCreateRowSorter_= (value: Boolean ): Unit = peer.setAutoCreateRowSorter(value)
def cellSelectionEnabled : Boolean = peer.getCellSelectionEnabled
def cellSelectionEnabled_= (value: Boolean ): Unit = peer.setCellSelectionEnabled(value)
def columnFocusEnabled : Boolean = peer.isColumnFocusEnabled
def columnFocusEnabled_= (value: Boolean ): Unit = peer.setColumnFocusEnabled(value)
def columnSelectionAllowed : Boolean = peer.getColumnSelectionAllowed
def columnSelectionAllowed_= (value: Boolean ): Unit = peer.setColumnSelectionAllowed(value)
def rowSelectionAllowed : Boolean = peer.getRowSelectionAllowed
def rowSelectionAllowed_= (value: Boolean ): Unit = peer.setRowSelectionAllowed(value)
def expandsSortedNodes : Boolean = peer.getExpandsSortedNodes
def expandsSortedNodes_= (value: Boolean ): Unit = peer.setExpandesSortedNodes(value)
def expandsSelectedPaths : Boolean = peer.getExpandsSelectedPaths
def expandsSelectedPaths_= (value: Boolean ): Unit = peer.setExpandsSelectedPaths(value)
def largeModel : Boolean = peer.isLargeModel
def largeModel_= (value: Boolean ): Unit = peer.setLargeModel(value)
def nodeSortingEnabled : Boolean = peer.isNodeSortingEnabled
def nodeSortingEnabled_= (value: Boolean ): Unit = peer.setNodeSortingEnabled(value)
def rootVisible : Boolean = peer.isRootVisible
def rootVisible_= (value: Boolean ): Unit = peer.setRootVisible(value)
def scrollsOnExpand : Boolean = peer.getScrollsOnExpand
def scrollsOnExpand_= (value: Boolean ): Unit = peer.setScrollsOnExpand(value)
// isShowGrid
def showHorizontalLines : Boolean = peer.getShowHorizontalLines
def showHorizontalLines_= (value: Boolean ): Unit = peer.setShowHorizontalLines(value)
def showVerticalLines : Boolean = peer.getShowVerticalLines
def showVerticalLines_= (value: Boolean ): Unit = peer.setShowVerticalLines(value)
def showsRootHandles : Boolean = peer.getShowsRootHandles
def showsRootHandles_= (value: Boolean ): Unit = peer.setShowsRootHandles(value)
def rowHeight : Int = peer.getRowHeight
def rowHeight_= (value: Int ): Unit = peer.setRowHeight(value)
def rowMargin : Int = peer.getRowMargin
def rowMargin_= (value: Int ): Unit = peer.setRowMargin(value)
def toggleClickCount : Int = peer.getToggleClickCount
def toggleClickCount_= (value: Int ): Unit = peer.setToggleClickCount(value)
def visibleRowCount : Int = peer.getVisibleRowCount
def visibleRowCount_= (value: Int ): Unit = peer.setVisibleRowCount(value)
def intercellSpacing : Dimension = peer.getIntercellSpacing
def intercellSpacing_= (value: Dimension): Unit = peer.setIntercellSpacing(value)
def alternativeRowColor : Color = peer.getAlternateRowColor
def alternativeRowColor_= (value: Color ): Unit = peer.setAlternateRowColor(value)
def gridColor : Color = peer.getGridColor
def gridColor_= (value: Color ): Unit = peer.setGridColor(value)
def selectionBackground : Color = peer.getSelectionBackground
def selectionBackground_= (value: Color ): Unit = peer.setSelectionBackground(value)
def selectionForeground : Color = peer.getSelectionForeground
def selectionForeground_= (value: Color ): Unit = peer.setSelectionForeground(value)
def openIcon : Icon = peer.getOpenIcon
def openIcon_= (value: Icon ): Unit = peer.setOpenIcon(value)
def leafIcon : Icon = peer.getLeafIcon
def leafIcon_= (value: Icon ): Unit = peer.setLeafIcon(value)
def closedIcon : Icon = peer.getClosedIcon
def closedIcon_= (value: Icon ): Unit = peer.setClosedIcon(value)
def ascendingSortIcon : Icon = peer.getAscendingSortIcon
def ascendingSortIcon_= (value: Icon ): Unit = peer.setAscendingSortIcon(value)
def descendingSortIcon : Icon = peer.getDescendingSortIcon
def descendingSortIcon_= (value: Icon ): Unit = peer.setDescendingSortIcon(value)
// def autoscrolls : Boolean = peer.getAutoscrolls
// def autoscrolls_=(value : Boolean) { peer.getAutoscrolls }
def dragEnabled : Boolean = peer.getDragEnabled
def dragEnabled_= (value: Boolean ): Unit = peer.setDragEnabled(value)
def dropMode : DropMode = peer.getDropMode
def dropMode_= (value: DropMode ): Unit = peer.setDropMode(value)
def expandPath(path: Path[A]): Unit = peer.expandPath(path)
def hierarchicalColumn: Int = peer.getHierarchicalColumn
def isCellEditable(row: Int, column: Int): Boolean = peer.isCellEditable(row, column)
def isCellSelected(row: Int, column: Int): Boolean = peer.isCellSelected(row, column)
def isCollapsed(path: Path[A]): Boolean = peer.isCollapsed(path)
def isCollapsed(row : Int ): Boolean = peer.isCollapsed(row )
def isExpanded (path: Path[A]): Boolean = peer.isExpanded (path)
def isExpanded (row : Int ): Boolean = peer.isExpanded (row )
def isLeaf (path: Path[A]): Boolean = peer.isLeaf (path)
// = selection.path.contains
// def isPathSelected (path : Path[A]): Boolean = peer.isPathSelected (path )
def isColumnSelected(column: Int ): Boolean = peer.isColumnSelected(column)
def isRowSelected (row : Int ): Boolean = peer.isRowSelected (row )
def isEditing : Boolean = peer.isEditing
def isFixedRowHeight: Boolean = peer.isFixedRowHeight
// def apply(row: Int, column: Int): Any = peer.getValueAt(row, column)
def getNode(row: Int): A = peer.getNode(row).asInstanceOf[A]
def getRowForPath(path: Path[A]): Int = peer.getRowForPath(path)
def editCellAt(row: Int, column: Int): Boolean = peer.editCellAt(row, column)
def startEditingAtPath(path: Path[A]): Boolean = peer.startEditingAtPath(path)
def startEditing(row: Int) : Boolean = peer.startEditingAtRow(row)
def dropLocation: Option[TreeTable.DropLocation[A]] = Option(peer.getDropLocation).map(TreeTable.DropLocation[A])
def changeSelection(row: Int, column: Int, toggle: Boolean = false, extend: Boolean = false): Unit =
peer.changeSelection(row, column, toggle, extend)
object selection extends Publisher {
protected abstract class SelectionSet[B](a: => scala.collection.Seq[B]) extends SetWrapper[B] {
// def -=(n: B): this.type
// def +=(n: B): this.type
def contains(n: B): Boolean = a.contains(n)
override def size: Int = a.length
def iterator: Iterator[B] = a.iterator
}
object paths extends SelectionSet[Path[A]]({
val p = peer.getSelectionPaths
if (p == null) Seq.empty else p.iterator.map(treePathToPath).toSeq
}) {
def subtractOne(p : Path[A] ): this.type = { peer.removeSelectionPath (p) ; this }
def addOne (p : Path[A] ): this.type = { peer.addSelectionPath (p) ; this }
// override def --=(ps: swing.Seq[Path[A]]): this.type = { peer.removeSelectionPaths(ps.iterator.map(pathToTreePath).toArray); this }
// override def ++=(ps: swing.Seq[Path[A]]): this.type = { peer.addSelectionPaths (ps.iterator.map(pathToTreePath).toArray); this }
override def addAll(ps: MoreElem[Path[A]]): this.type = {
peer.addSelectionPaths(mkIterator(ps).map(pathToTreePath).toArray)
this
}
override def subtractAll(ps: MoreElem[Path[A]]): this.type = {
peer.removeSelectionPaths(mkIterator(ps).map(pathToTreePath).toArray)
this
}
def leadSelection: Option[Path[A]] = Option(peer.getLeadSelectionPath: Path[A])
// override def clear(): Unit = peer.clearSelection()
override def size: Int = peer.getSelectionCount
}
object rows extends SelectionSet(peer.getSelectedRows) {
def subtractOne (n: Int): this.type = { peer.removeSelectionRow(n); this }
def addOne (n: Int): this.type = { peer.addSelectionRow (n); this }
def leadIndex : Int = peer.getSelectionModel.getLeadSelectionRow
// def anchorIndex: Int = peer.getSelectionModel.getAnchorSelectionRow
override def size: Int = peer.getSelectionCount
}
// cells is a PITA peer-wise
// object cells extends SelectionSet[(Int, Int)]({
// // (for(r <- selection.rows; c <- selection.columns) yield (r,c)).toSeq
// ...
//
// }) {
// def -=(n: (Int, Int)) = {
// // peer.removeRowSelectionInterval(n._1,n._1)
// // peer.removeColumnSelectionInterval(n._2,n._2)
// peer.removeSelectionRow(n._1)
// peer.removeColumn
// this
// }
// def +=(n: (Int, Int)) = {
// // peer.addRowSelectionInterval(n._1,n._1)
// // peer.addColumnSelectionInterval(n._2,n._2)
// this
// }
// override def size = peer.getSelectedRowCount * peer.getSelectedColumnCount
// }
peer.getSelectionModel.addTreeSelectionListener(new jse.TreeSelectionListener {
def valueChanged(e: jse.TreeSelectionEvent): Unit = {
val (pathsAdded, pathsRemoved) = e.getPaths.toVector.partition(e.isAddedPath)
publish(TreeTableSelectionChanged(me,
pathsAdded map treePathToPath,
pathsRemoved map treePathToPath,
Option(e.getNewLeadSelectionPath: Path[A]),
Option(e.getOldLeadSelectionPath: Path[A])))
}
})
// TODO: rows, cells, ...
// def cellValues: Iterator[A] = ...
// def isEmpty = size == 0
}
} | Sciss/TreeTable | scala/src/main/scala/de/sciss/treetable/TreeTable.scala | Scala | lgpl-3.0 | 20,488 |
/*
* Copyright 2019 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.common.webmvc.jackson
import com.fasterxml.jackson.databind.ObjectMapper
import org.springframework.beans.factory.config.BeanPostProcessor
import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter
import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter
import scala.collection.JavaConverters._
class ObjectMapperBeanPostProcessor(postProcess: ObjectMapper => Unit) extends BeanPostProcessor {
override def postProcessBeforeInitialization(bean: AnyRef, beanName: String): AnyRef = {
bean match {
case adapter: RequestMappingHandlerAdapter =>
adapter
.getMessageConverters
.asScala
.collect({ case hmc: MappingJackson2HttpMessageConverter => hmc.getObjectMapper })
.foreach(postProcess(_))
case _ =>
}
bean
}
}
| AbsaOSS/spline | commons/src/main/scala/za/co/absa/spline/common/webmvc/jackson/ObjectMapperBeanPostProcessor.scala | Scala | apache-2.0 | 1,478 |
package io.udash
package rest.openapi
import java.util.UUID
import com.avsystem.commons._
import com.avsystem.commons.misc.{ImplicitNotFound, NamedEnum, NamedEnumCompanion, Timestamp}
import io.udash.rest.raw._
import monix.eval.TaskLike
import scala.annotation.implicitNotFound
@implicitNotFound("RestSchema for ${T} not found")
trait RestSchema[T] { self =>
/**
* Creates a [[Schema]] object or external schema reference.
* May use [[SchemaResolver]] to resolve any dependent `RestSchema` instances.
* This method should be called directly only by [[SchemaResolver]].
*/
def createSchema(resolver: SchemaResolver): RefOr[Schema]
/**
* Optional name of the schema. When `RestSchema` is named, schema created by [[createSchema]] will be registered
* under that name in [[SchemaRegistry]] and ultimately included into [[Components]] of the [[OpenApi]] document.
* All direct usages of the schema in OpenAPI document will be replaced by a reference to
* the registered schema, i.e. `{"$$ref": "#/components/schemas/<schema-name>"}`.
*
* If schema is unnamed, it will be always inlined instead of being replaced by a reference.
*/
def name: Opt[String]
def map[S](fun: RefOr[Schema] => Schema, newName: OptArg[String] = OptArg.Empty): RestSchema[S] =
RestSchema.create(resolver => RefOr(fun(resolver.resolve(self))), newName)
def named(name: String): RestSchema[T] =
RestSchema.create(createSchema, name)
def unnamed: RestSchema[T] =
RestSchema.create(createSchema)
}
object RestSchema {
def apply[T](implicit rt: RestSchema[T]): RestSchema[T] = rt
def create[T](creator: SchemaResolver => RefOr[Schema], schemaName: OptArg[String] = OptArg.Empty): RestSchema[T] =
new RestSchema[T] {
def createSchema(resolver: SchemaResolver): RefOr[Schema] = creator(resolver)
def name: Opt[String] = schemaName.toOpt
}
def named[T](name: String)(creator: SchemaResolver => RefOr[Schema]): RestSchema[T] =
create(creator, name)
def plain[T](schema: Schema): RestSchema[T] =
RestSchema.create(_ => RefOr(schema))
def ref[T](refstr: String): RestSchema[T] =
RestSchema.create(_ => RefOr.ref(refstr))
def lazySchema[T](actual: => RestSchema[T]): RestSchema[T] =
new RestSchema[T] {
private lazy val actualSchema = actual
def createSchema(resolver: SchemaResolver): RefOr[Schema] = actualSchema.createSchema(resolver)
def name: Opt[String] = actualSchema.name
}
implicit lazy val NothingSchema: RestSchema[Nothing] =
RestSchema.create(_ => throw new NotImplementedError("RestSchema[Nothing]"))
implicit lazy val UnitSchema: RestSchema[Unit] = plain(Schema(nullable = true))
implicit lazy val NullSchema: RestSchema[Null] = plain(Schema(nullable = true))
implicit lazy val VoidSchema: RestSchema[Void] = plain(Schema(nullable = true))
implicit lazy val BooleanSchema: RestSchema[Boolean] = plain(Schema.Boolean)
implicit lazy val CharSchema: RestSchema[Char] = plain(Schema.Char)
implicit lazy val ByteSchema: RestSchema[Byte] = plain(Schema.Byte)
implicit lazy val ShortSchema: RestSchema[Short] = plain(Schema.Short)
implicit lazy val IntSchema: RestSchema[Int] = plain(Schema.Int)
implicit lazy val LongSchema: RestSchema[Long] = plain(Schema.Long)
implicit lazy val FloatSchema: RestSchema[Float] = plain(Schema.Float)
implicit lazy val DoubleSchema: RestSchema[Double] = plain(Schema.Double)
implicit lazy val BigIntSchema: RestSchema[BigInt] = plain(Schema.Integer)
implicit lazy val BigDecimalSchema: RestSchema[BigDecimal] = plain(Schema.Number)
implicit lazy val JBooleanSchema: RestSchema[JBoolean] = plain(Schema.Boolean.copy(nullable = true))
implicit lazy val JCharacterSchema: RestSchema[JCharacter] = plain(Schema.Char.copy(nullable = true))
implicit lazy val JByteSchema: RestSchema[JByte] = plain(Schema.Byte.copy(nullable = true))
implicit lazy val JShortSchema: RestSchema[JShort] = plain(Schema.Short.copy(nullable = true))
implicit lazy val JIntegerSchema: RestSchema[JInteger] = plain(Schema.Int.copy(nullable = true))
implicit lazy val JLongSchema: RestSchema[JLong] = plain(Schema.Long.copy(nullable = true))
implicit lazy val JFloatSchema: RestSchema[JFloat] = plain(Schema.Float.copy(nullable = true))
implicit lazy val JDoubleSchema: RestSchema[JDouble] = plain(Schema.Double.copy(nullable = true))
implicit lazy val JBigIntegerSchema: RestSchema[JBigInteger] = plain(Schema.Integer)
implicit lazy val JBigDecimalSchema: RestSchema[JBigDecimal] = plain(Schema.Number)
implicit lazy val TimestampSchema: RestSchema[Timestamp] = plain(Schema.DateTime)
implicit lazy val JDateSchema: RestSchema[JDate] = plain(Schema.DateTime)
implicit lazy val StringSchema: RestSchema[String] = plain(Schema.String)
implicit lazy val SymbolSchema: RestSchema[Symbol] = plain(Schema.String)
implicit lazy val UuidSchema: RestSchema[UUID] = plain(Schema.Uuid)
implicit def arraySchema[T: RestSchema]: RestSchema[Array[T]] =
RestSchema[T].map(Schema.arrayOf(_))
implicit def seqSchema[C[X] <: BSeq[X], T: RestSchema]: RestSchema[C[T]] =
RestSchema[T].map(Schema.arrayOf(_))
implicit def setSchema[C[X] <: BSet[X], T: RestSchema]: RestSchema[C[T]] =
RestSchema[T].map(Schema.arrayOf(_, uniqueItems = true))
implicit def jCollectionSchema[C[X] <: JCollection[X], T: RestSchema]: RestSchema[C[T]] =
RestSchema[T].map(Schema.arrayOf(_))
implicit def jSetSchema[C[X] <: JSet[X], T: RestSchema]: RestSchema[C[T]] =
RestSchema[T].map(Schema.arrayOf(_, uniqueItems = true))
implicit def mapSchema[M[X, Y] <: BMap[X, Y], K, V: RestSchema]: RestSchema[M[K, V]] =
RestSchema[V].map(Schema.mapOf)
implicit def jMapSchema[M[X, Y] <: JMap[X, Y], K, V: RestSchema]: RestSchema[M[K, V]] =
RestSchema[V].map(Schema.mapOf)
implicit def optionSchema[T: RestSchema]: RestSchema[Option[T]] =
RestSchema[T].map(Schema.nullable)
implicit def optSchema[T: RestSchema]: RestSchema[Opt[T]] =
RestSchema[T].map(Schema.nullable)
implicit def optArgSchema[T: RestSchema]: RestSchema[OptArg[T]] =
RestSchema[T].map(Schema.nullable)
implicit def optRefSchema[T >: Null : RestSchema]: RestSchema[OptRef[T]] =
RestSchema[T].map(Schema.nullable)
implicit def nOptSchema[T: RestSchema]: RestSchema[NOpt[T]] =
RestSchema[T].map(Schema.nullable)
implicit def eitherSchema[A: RestSchema, B: RestSchema]: RestSchema[Either[A, B]] =
RestSchema.create { resolver =>
RefOr(Schema(oneOf = List(
RefOr(Schema(`type` = DataType.Object, properties =
Map("Left" -> resolver.resolve(RestSchema[A])), required = List("Left"))),
RefOr(Schema(`type` = DataType.Object, properties =
Map("Right" -> resolver.resolve(RestSchema[B])), required = List("Right")))
)))
}
private def enumValues[E <: NamedEnum](implicit comp: NamedEnumCompanion[E]): List[String] =
comp.values.iterator.map(_.name).toList
private def jEnumValues[E <: Enum[E] : ClassTag]: List[String] =
classTag[E].runtimeClass.getEnumConstants.iterator.map(_.asInstanceOf[E].name).toList
implicit def namedEnumSchema[E <: NamedEnum : NamedEnumCompanion]: RestSchema[E] =
RestSchema.plain(Schema.enumOf(enumValues[E]))
implicit def jEnumSchema[E <: Enum[E] : ClassTag]: RestSchema[E] =
RestSchema.plain(Schema.enumOf(jEnumValues[E]))
implicit def namedEnumMapSchema[M[X, Y] <: BMap[X, Y], K <: NamedEnum : NamedEnumCompanion, V: RestSchema]: RestSchema[M[K, V]] =
RestSchema[V].map(Schema.enumMapOf(enumValues[K], _))
implicit def jEnumMapSchema[M[X, Y] <: BMap[X, Y], K <: Enum[K] : ClassTag, V: RestSchema]: RestSchema[M[K, V]] =
RestSchema[V].map(Schema.enumMapOf(jEnumValues[K], _))
implicit def namedEnumJMapSchema[M[X, Y] <: JMap[X, Y], K <: NamedEnum : NamedEnumCompanion, V: RestSchema]: RestSchema[M[K, V]] =
RestSchema[V].map(Schema.enumMapOf(enumValues[K], _))
implicit def jEnumJMapSchema[M[X, Y] <: JMap[X, Y], K <: Enum[K] : ClassTag, V: RestSchema]: RestSchema[M[K, V]] =
RestSchema[V].map(Schema.enumMapOf(jEnumValues[K], _))
}
/**
* Intermediate typeclass which serves as basis for [[RestResponses]] and [[RestRequestBody]].
* [[RestMediaTypes]] is derived by default from [[RestSchema]].
* It should be defined manually for every type which has custom serialization to
* [[io.udash.rest.raw.HttpBody HttpBody]] defined so that generated OpenAPI properly reflects that custom
* serialization format.
*/
@implicitNotFound("RestMediaTypes instance for ${T} not found")
trait RestMediaTypes[T] {
/**
* @param schemaTransform Should be used if [[RestMediaTypes]] is being built based on [[RestSchema]] for
* the same type. The transformation may adjust the schema and give it a different name.
* This transformation is usually used when there's a type that wraps another type and wants
* to reuse [[RestMediaTypes]] of the wrapped type but also introduces some schema
* modifications. See [[io.udash.rest.RestDataWrapperCompanion]].
*/
def mediaTypes(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]): Map[String, MediaType]
}
object RestMediaTypes {
def apply[T](implicit r: RestMediaTypes[T]): RestMediaTypes[T] = r
implicit val ByteArrayMediaTypes: RestMediaTypes[Array[Byte]] =
(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]) => {
val schema = resolver.resolve(schemaTransform(RestSchema.plain(Schema.Binary)))
Map(HttpBody.OctetStreamType -> MediaType(schema = schema))
}
implicit def fromSchema[T: RestSchema]: RestMediaTypes[T] =
(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]) =>
Map(HttpBody.JsonType -> MediaType(schema = resolver.resolve(schemaTransform(RestSchema[T]))))
@implicitNotFound("RestMediaTypes instance for ${T} not found, because:\\n#{forSchema}")
implicit def notFound[T](implicit forSchema: ImplicitNotFound[RestSchema[T]]): ImplicitNotFound[RestMediaTypes[T]] =
ImplicitNotFound()
}
/**
* Typeclass which defines how an OpenAPI [[Responses]] Object will look like for a given type.
* By default, [[RestResponses]] is derived based on [[RestMediaTypes]] for that type which is itself derived by
* default from [[RestSchema]] for that type. It should be defined manually for every type which has custom
* serialization to [[io.udash.rest.raw.RestResponse RestResponse]] defined so that generated OpenAPI properly
* reflects that custom serialization format.
*/
@implicitNotFound("RestResponses instance for ${T} not found")
trait RestResponses[T] {
/**
* @param schemaTransform Should be used if [[RestResponses]] is being built based on [[RestSchema]] for
* the same type. The transformation may adjust the schema and give it a different name.
* This transformation is usually used when there's a type that wraps another type and wants
* to reuse [[RestResponses]] of the wrapped type but also introduces some schema
* modifications. See [[io.udash.rest.RestDataWrapperCompanion]].
*/
def responses(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]): Responses
}
object RestResponses {
def apply[T](implicit r: RestResponses[T]): RestResponses[T] = r
final val SuccessDescription = "Success"
implicit val UnitResponses: RestResponses[Unit] =
(_: SchemaResolver, _: RestSchema[_] => RestSchema[_]) => Responses(byStatusCode = Map(
204 -> RefOr(Response(description = SuccessDescription))
))
implicit def fromMediaTypes[T: RestMediaTypes]: RestResponses[T] =
(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]) => Responses(byStatusCode = Map(
200 -> RefOr(Response(
description = SuccessDescription,
content = RestMediaTypes[T].mediaTypes(resolver, schemaTransform)
))
))
@implicitNotFound("RestResponses instance for ${T} not found, because:\\n#{forMediaTypes}")
implicit def notFound[T](implicit forMediaTypes: ImplicitNotFound[RestMediaTypes[T]]): ImplicitNotFound[RestResponses[T]] =
ImplicitNotFound()
}
/**
* Just like [[io.udash.rest.openapi.RestResponses RestResponses]],
* [[io.udash.rest.openapi.RestResultType RestResultType]] is a typeclass that defines how an OpenAPI
* Responses Object will look like for an HTTP method which returns given type. The difference between
* [[io.udash.rest.openapi.RestResultType RestResultType]] and [[io.udash.rest.openapi.RestResponses RestResponses]]
* is that [[io.udash.rest.openapi.RestResultType RestResultType]] is defined for full result
* type which usually is some kind of asynchronous wrapper over actual result type (e.g. `Future`).
* In such situation, [[io.udash.rest.openapi.RestResponses RestResponses]] must be provided for `T` while
* [[io.udash.rest.openapi.RestResultType RestResultType]] is provided
* for `Future[T]` (or whatever async wrapper is used), based on the [[io.udash.rest.openapi.RestResponses RestResponses]]
* instance of `T`. You can see an example of this in [[io.udash.rest.FutureRestImplicits FutureRestImplicits]].
*
* [[io.udash.rest.openapi.RestResultType RestResultType]] for [[io.udash.rest.openapi.OpenApiMetadata OpenApiMetadata]]
* is analogous to [[io.udash.rest.raw.HttpResponseType HttpResponseType]]
* for [[io.udash.rest.raw.RestMetadata RestMetadata]].
*/
final case class RestResultType[T](responses: SchemaResolver => Responses)
object RestResultType {
implicit def forAsyncEffect[F[_] : TaskLike, T: RestResponses]: RestResultType[F[T]] =
RestResultType(RestResponses[T].responses(_, identity))
@implicitNotFound("#{forResponseType}")
implicit def notFound[T](
implicit forResponseType: ImplicitNotFound[HttpResponseType[T]]
): ImplicitNotFound[RestResultType[T]] = ImplicitNotFound()
@implicitNotFound("#{forRestResponses}")
implicit def notFoundForAsyncEffect[F[_] : TaskLike, T](
implicit forRestResponses: ImplicitNotFound[RestResponses[T]]
): ImplicitNotFound[RestResultType[F[T]]] = ImplicitNotFound()
}
/**
* Typeclass which defines how OpenAPI [[RequestBody]] Object will look like for a Given type when that type is
* used as a type of [[io.udash.rest.Body Body]] parameter of a [[io.udash.rest.CustomBody CustomBody]] method.
* By default, [[RestRequestBody]] is derived from [[RestMediaTypes]] which by itself is derived by default
* from [[RestSchema]].
*/
@implicitNotFound("RestRequestBody instance for ${T} not found")
trait RestRequestBody[T] {
/**
* @param schemaTransform Should be used if [[RestRequestBody]] is being built based on [[RestSchema]] for
* the same type. The transformation may adjust the schema and give it a different name.
*/
def requestBody(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]): Opt[RefOr[RequestBody]]
}
object RestRequestBody {
def apply[T](implicit r: RestRequestBody[T]): RestRequestBody[T] = r
def simpleRequestBody(mediaType: String, schema: RefOr[Schema], required: Boolean): Opt[RefOr[RequestBody]] =
Opt(RefOr(RequestBody(
content = Map(
mediaType -> MediaType(schema = schema)
),
required = required
)))
implicit val UnitRequestBody: RestRequestBody[Unit] = (_: SchemaResolver, _: RestSchema[_] => RestSchema[_]) => Opt.Empty
implicit def fromMediaTypes[T: RestMediaTypes]: RestRequestBody[T] =
(resolver: SchemaResolver, schemaTransform: RestSchema[_] => RestSchema[_]) => {
val mediaTypes = RestMediaTypes[T].mediaTypes(resolver, schemaTransform)
Opt(RefOr(RequestBody(content = mediaTypes, required = true)))
}
@implicitNotFound("RestRequestBody instance for ${T} not found, because:\\n#{forMediaTypes}")
implicit def notFound[T](implicit forMediaTypes: ImplicitNotFound[RestMediaTypes[T]]): ImplicitNotFound[RestRequestBody[T]] =
ImplicitNotFound()
}
trait SchemaResolver {
/**
* Resolves a [[RestSchema]] instance into an actual [[Schema]] object or reference.
* If the schema is unnamed then this method will simply return the same value as [[RestSchema.createSchema]].
* If the schema is named, it may be internally registered under its name and a
* [[https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#callbackObject Reference Object]]
* will be returned instead - see [[SchemaRegistry]].
*/
def resolve(schema: RestSchema[_]): RefOr[Schema]
}
final class InliningResolver extends SchemaResolver {
private[this] val resolving = new MHashSet[String]
def resolve(schema: RestSchema[_]): RefOr[Schema] =
try {
schema.name.foreach { n =>
if (!resolving.add(n)) {
throw new IllegalArgumentException(s"Recursive schema reference: $n")
}
}
schema.createSchema(this)
}
finally {
schema.name.foreach(resolving.remove)
}
}
object InliningResolver {
def resolve(schema: RestSchema[_]): RefOr[Schema] =
new InliningResolver().resolve(schema)
}
/**
* An implementation of [[SchemaResolver]] which registers named [[RestSchema]]s and replaces them with a
* [[https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#referenceObject Reference Object]].
* All the registered schemas can then be extracted and listed in the [[Components]] object.
*/
final class SchemaRegistry(
nameToRef: String => String = name => s"#/components/schemas/$name",
initial: Iterable[(String, RefOr[Schema])] = Map.empty
) extends SchemaResolver {
private[this] case class Entry(source: Opt[RestSchema[_]], schema: RefOr[Schema])
private[this] val resolving = new MHashSet[String]
private[this] val registry = new MHashMap[String, MListBuffer[Entry]]
.setup(_ ++= initial.iterator.map { case (n, s) => (n, MListBuffer[Entry](Entry(Opt.Empty, s))) })
def registeredSchemas: Map[String, RefOr[Schema]] =
registry.iterator.map { case (k, entries) =>
entries.result() match {
case Entry(_, schema) :: Nil => (k, schema)
case _ => throw new IllegalArgumentException(
s"Multiple schemas named $k detected - you may want to disambiguate them using @name annotation"
)
}
}.intoMap[ITreeMap]
def resolve(restSchema: RestSchema[_]): RefOr[Schema] = restSchema.name match {
case Opt(name) =>
if (!resolving.contains(name)) { // handling recursive schemas
val entries = registry.getOrElseUpdate(name, new MListBuffer)
if (!entries.exists(_.source.contains(restSchema))) {
resolving += name
val newSchema = try restSchema.createSchema(this) finally {
resolving -= name
}
if (!entries.exists(_.schema == newSchema)) {
entries += Entry(Opt(restSchema), newSchema)
}
}
}
RefOr.ref(nameToRef(name))
case Opt.Empty =>
restSchema.createSchema(this)
}
}
| UdashFramework/udash-core | rest/src/main/scala/io/udash/rest/openapi/RestSchema.scala | Scala | apache-2.0 | 19,116 |
package org.apache.spark.ml.ann
import breeze.linalg.{*, DenseMatrix => BDM, DenseVector => BDV, Vector => BV, axpy => Baxpy,
sum => Bsum}
import org.apache.spark.mllib.linalg.{Vectors, Vector}
import org.apache.spark.mllib.optimization.Updater
private[ann] class CNNUpdater(alpha: Double) extends Updater {
override def compute(
weightsOld: Vector,
gradient: Vector,
stepSize: Double,
iter: Int,
regParam: Double): (Vector, Double) = {
val thisIterStepSize = stepSize
val brzWeights: BV[Double] = weightsOld.toBreeze.toDenseVector
Baxpy(-thisIterStepSize, gradient.toBreeze * alpha, brzWeights)
(Vectors.fromBreeze(brzWeights), 0)
}
}
| hhbyyh/mCNN | src/communityInterface/CNNUpdater.scala | Scala | apache-2.0 | 689 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.File
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.parquet.hadoop.ParquetOutputFormat
import org.apache.spark.{DebugFilesystem, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.execution.datasources.parquet.TestingUDT.{NestedStruct, NestedStructUDT, SingleElement}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* A test suite that tests various Parquet queries.
*/
class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext {
import testImplicits._
test("simple select queries") {
withParquetTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(sql("SELECT _1 FROM t where t._1 > 5"), (6 until 10).map(Row.apply(_)))
checkAnswer(sql("SELECT _1 FROM t as tmp where tmp._1 < 5"), (0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
// Query appends, don't test with both read modes.
withParquetTable(data, "t", false) {
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), (data ++ data).map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"), ignoreIfNotExists = true, purge = false)
}
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withParquetTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"), ignoreIfNotExists = true, purge = false)
}
test("SPARK-15678: not use cache on overwrite") {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("overwrite").parquet(path)
val df = spark.read.parquet(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("overwrite").parquet(path)
assert(df.count() == 1000)
spark.catalog.refreshByPath(path)
assert(df.count() == 10)
assert(spark.read.parquet(path).count() == 10)
}
}
test("SPARK-15678: not use cache on append") {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("append").parquet(path)
val df = spark.read.parquet(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("append").parquet(path)
assert(df.count() == 1000)
spark.catalog.refreshByPath(path)
assert(df.count() == 1010)
assert(spark.read.parquet(path).count() == 1010)
}
}
test("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
// TODO: vectorized doesn't work here because it requires UnsafeRows
withParquetTable(data, "t", false) {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x._1 = y._1")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, "Duplicated expression ID in query plan:\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq("val_$i"))))
withParquetTable(data, "t") {
checkAnswer(sql("SELECT _1._2[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> "val_$i")))
withParquetTable(data, "t") {
checkAnswer(sql("SELECT _1[0]._2 FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
test("SPARK-1913 regression: columns only referenced by pushed down filters should remain") {
withParquetTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT _1 FROM t WHERE _1 < 10"), (1 to 9).map(Row.apply(_)))
}
}
test("SPARK-5309 strings stored using dictionary compression in parquet") {
withParquetTable((0 until 1000).map(i => ("same", "run_" + i /100, 1)), "t") {
checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t GROUP BY _1, _2"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t WHERE _2 = 'run_5' GROUP BY _1, _2"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-6917 DecimalType should work with non-native types") {
val data = (1 to 10).map(i => Row(Decimal(i, 18, 0), new java.sql.Timestamp(i)))
val schema = StructType(List(StructField("d", DecimalType(18, 0), false),
StructField("time", TimestampType, false)).toArray)
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), schema)
df.write.parquet(file.getCanonicalPath)
val df2 = spark.read.parquet(file.getCanonicalPath)
checkAnswer(df2, df.collect().toSeq)
}
}
test("Enabling/disabling merging partfiles when merging parquet schema") {
def testSchemaMerging(expectedColumnNumber: Int): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
spark.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=2").toString)
// delete summary files, so if we don't merge part-files, one column will not be included.
Utils.deleteRecursively(new File(basePath + "/foo=1/_metadata"))
Utils.deleteRecursively(new File(basePath + "/foo=1/_common_metadata"))
assert(spark.read.parquet(basePath).columns.length === expectedColumnNumber)
}
}
withSQLConf(
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_RESPECT_SUMMARIES.key -> "true",
ParquetOutputFormat.ENABLE_JOB_SUMMARY -> "true"
) {
testSchemaMerging(2)
}
withSQLConf(
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_RESPECT_SUMMARIES.key -> "false"
) {
testSchemaMerging(3)
}
}
test("Enabling/disabling schema merging") {
def testSchemaMerging(expectedColumnNumber: Int): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
spark.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=2").toString)
assert(spark.read.parquet(basePath).columns.length === expectedColumnNumber)
}
}
withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true") {
testSchemaMerging(3)
}
withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "false") {
testSchemaMerging(2)
}
}
test("Enabling/disabling ignoreCorruptFiles") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.parquet(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.parquet(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.parquet(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(
df,
Seq(Row(0), Row(1)))
}
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
testIgnoreCorruptFiles()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val exception = intercept[SparkException] {
testIgnoreCorruptFiles()
}
assert(exception.getMessage().contains("is not a Parquet file"))
}
}
/**
* this is part of test 'Enabling/disabling ignoreCorruptFiles' but run in a loop
* to increase the chance of failure
*/
ignore("SPARK-20407 ParquetQuerySuite 'Enabling/disabling ignoreCorruptFiles' flaky test") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.parquet(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.parquet(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.parquet(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(
df,
Seq(Row(0), Row(1)))
}
}
for (i <- 1 to 100) {
DebugFilesystem.clearOpenStreams()
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val exception = intercept[SparkException] {
testIgnoreCorruptFiles()
}
assert(exception.getMessage().contains("is not a Parquet file"))
}
DebugFilesystem.assertNoOpenStreams()
}
}
test("SPARK-8990 DataFrameReader.parquet() should respect user specified options") {
withTempPath { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
spark.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=a").toString)
// Disables the global SQL option for schema merging
withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "false") {
assertResult(2) {
// Disables schema merging via data source option
spark.read.option("mergeSchema", "false").parquet(basePath).columns.length
}
assertResult(3) {
// Enables schema merging via data source option
spark.read.option("mergeSchema", "true").parquet(basePath).columns.length
}
}
}
}
test("SPARK-9119 Decimal should be correctly written into parquet") {
withTempPath { dir =>
val basePath = dir.getCanonicalPath
val schema = StructType(Array(StructField("name", DecimalType(10, 5), false)))
val rowRDD = sparkContext.parallelize(Array(Row(Decimal("67123.45"))))
val df = spark.createDataFrame(rowRDD, schema)
df.write.parquet(basePath)
val decimal = spark.read.parquet(basePath).first().getDecimal(0)
assert(Decimal("67123.45") === Decimal(decimal))
}
}
test("SPARK-10005 Schema merging for nested struct") {
withTempPath { dir =>
val path = dir.getCanonicalPath
def append(df: DataFrame): Unit = {
df.write.mode(SaveMode.Append).parquet(path)
}
// Note that both the following two DataFrames contain a single struct column with multiple
// nested fields.
append((1 to 2).map(i => Tuple1((i, i))).toDF())
append((1 to 2).map(i => Tuple1((i, i, i))).toDF())
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true") {
checkAnswer(
spark.read.option("mergeSchema", "true").parquet(path),
Seq(
Row(Row(1, 1, null)),
Row(Row(2, 2, null)),
Row(Row(1, 1, 1)),
Row(Row(2, 2, 2))))
}
}
}
test("SPARK-10301 requested schema clipping - same schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 1L)))
}
}
test("SPARK-11997 parquet with null partition values") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(1, 3)
.selectExpr("if(id % 2 = 0, null, id) AS n", "id")
.write.partitionBy("n").parquet(path)
checkAnswer(
spark.read.parquet(path).filter("n is null"),
Row(2, null))
}
}
// This test case is ignored because of parquet-mr bug PARQUET-370
ignore("SPARK-10301 requested schema clipping - schemas with disjoint sets of fields") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(null, null)))
}
}
test("SPARK-10301 requested schema clipping - requested schema contains physical schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true)
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 1L, null, null)))
}
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'd', id + 3) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true)
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, null, null, 3L)))
}
}
test("SPARK-10301 requested schema clipping - physical schema contains requested schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2, 'd', id + 3) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 1L)))
}
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2, 'd', id + 3) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 3L)))
}
}
test("SPARK-10301 requested schema clipping - schemas overlap but don't contain each other") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("b", LongType, nullable = true)
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(1L, 2L, null)))
}
}
test("SPARK-10301 requested schema clipping - deeply nested struct") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', ARRAY(NAMED_STRUCT('b', id, 'c', id))) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema = new StructType()
.add("s",
new StructType()
.add(
"a",
ArrayType(
new StructType()
.add("b", LongType, nullable = true)
.add("d", StringType, nullable = true),
containsNull = true),
nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(Seq(Row(0, null)))))
}
}
test("SPARK-10301 requested schema clipping - out of order") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df1 = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s")
.coalesce(1)
val df2 = spark
.range(1, 2)
.selectExpr("NAMED_STRUCT('c', id + 2, 'b', id + 1, 'd', id + 3) AS s")
.coalesce(1)
df1.write.parquet(path)
df2.write.mode(SaveMode.Append).parquet(path)
val userDefinedSchema = new StructType()
.add("s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Seq(
Row(Row(0, 1, null)),
Row(Row(null, 2, 4))))
}
}
test("SPARK-10301 requested schema clipping - schema merging") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df1 = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'c', id + 2) AS s")
.coalesce(1)
val df2 = spark
.range(1, 2)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s")
.coalesce(1)
df1.write.mode(SaveMode.Append).parquet(path)
df2.write.mode(SaveMode.Append).parquet(path)
checkAnswer(
spark
.read
.option("mergeSchema", "true")
.parquet(path)
.selectExpr("s.a", "s.b", "s.c"),
Seq(
Row(0, null, 2),
Row(1, 2, 3)))
}
}
testStandardAndLegacyModes("SPARK-10301 requested schema clipping - UDT") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr(
"""NAMED_STRUCT(
| 'f0', CAST(id AS STRING),
| 'f1', NAMED_STRUCT(
| 'a', CAST(id + 1 AS INT),
| 'b', CAST(id + 2 AS LONG),
| 'c', CAST(id + 3.5 AS DOUBLE)
| )
|) AS s
""".stripMargin)
.coalesce(1)
df.write.mode(SaveMode.Append).parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("f1", new NestedStructUDT, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(NestedStruct(1, 2L, 3.5D))))
}
}
test("expand UDT in StructType") {
val schema = new StructType().add("n", new NestedStructUDT, nullable = true)
val expected = new StructType().add("n", new NestedStructUDT().sqlType, nullable = true)
assert(ParquetReadSupport.expandUDT(schema) === expected)
}
test("expand UDT in ArrayType") {
val schema = new StructType().add(
"n",
ArrayType(
elementType = new NestedStructUDT,
containsNull = false),
nullable = true)
val expected = new StructType().add(
"n",
ArrayType(
elementType = new NestedStructUDT().sqlType,
containsNull = false),
nullable = true)
assert(ParquetReadSupport.expandUDT(schema) === expected)
}
test("expand UDT in MapType") {
val schema = new StructType().add(
"n",
MapType(
keyType = IntegerType,
valueType = new NestedStructUDT,
valueContainsNull = false),
nullable = true)
val expected = new StructType().add(
"n",
MapType(
keyType = IntegerType,
valueType = new NestedStructUDT().sqlType,
valueContainsNull = false),
nullable = true)
assert(ParquetReadSupport.expandUDT(schema) === expected)
}
test("returning batch for wide table") {
withSQLConf(SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> "10") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).select(Seq.tabulate(11) {i => ('id + i).as(s"c$i")} : _*)
df.write.mode(SaveMode.Overwrite).parquet(path)
// donot return batch, because whole stage codegen is disabled for wide table (>200 columns)
val df2 = spark.read.parquet(path)
val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
assert(!fileScan2.asInstanceOf[FileSourceScanExec].supportsBatch)
checkAnswer(df2, df)
// return batch
val columns = Seq.tabulate(9) {i => s"c$i"}
val df3 = df2.selectExpr(columns : _*)
val fileScan3 = df3.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
assert(fileScan3.asInstanceOf[FileSourceScanExec].supportsBatch)
checkAnswer(df3, df.selectExpr(columns : _*))
}
}
}
test("SPARK-15719: disable writing summary files by default") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(3).write.parquet(path)
val fs = FileSystem.get(sparkContext.hadoopConfiguration)
val files = fs.listFiles(new Path(path), true)
while (files.hasNext) {
val file = files.next
assert(!file.getPath.getName.contains("_metadata"))
}
}
}
test("SPARK-15804: write out the metadata to parquet file") {
val df = Seq((1, "abc"), (2, "hello")).toDF("a", "b")
val md = new MetadataBuilder().putString("key", "value").build()
val dfWithmeta = df.select('a, 'b.as("b", md))
withTempPath { dir =>
val path = dir.getCanonicalPath
dfWithmeta.write.parquet(path)
readParquetFile(path) { df =>
assert(df.schema.last.metadata.getString("key") == "value")
}
}
}
test("SPARK-16344: array of struct with a single field named 'element'") {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(Tuple1(Array(SingleElement(42)))).toDF("f").write.parquet(path)
checkAnswer(
sqlContext.read.parquet(path),
Row(Array(Row(42)))
)
}
}
test("SPARK-16632: read Parquet int32 as ByteType and ShortType") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") {
withTempPath { dir =>
val path = dir.getCanonicalPath
// When being written to Parquet, `TINYINT` and `SMALLINT` should be converted into
// `int32 (INT_8)` and `int32 (INT_16)` respectively. However, Hive doesn't add the `INT_8`
// and `INT_16` annotation properly (HIVE-14294). Thus, when reading files written by Hive
// using Spark with the vectorized Parquet reader enabled, we may hit error due to type
// mismatch.
//
// Here we are simulating Hive's behavior by writing a single `INT` field and then read it
// back as `TINYINT` and `SMALLINT` in Spark to verify this issue.
Seq(1).toDF("f").write.parquet(path)
val withByteField = new StructType().add("f", ByteType)
checkAnswer(spark.read.schema(withByteField).parquet(path), Row(1: Byte))
val withShortField = new StructType().add("f", ShortType)
checkAnswer(spark.read.schema(withShortField).parquet(path), Row(1: Short))
}
}
}
}
object TestingUDT {
case class SingleElement(element: Long)
@SQLUserDefinedType(udt = classOf[NestedStructUDT])
case class NestedStruct(a: Integer, b: Long, c: Double)
class NestedStructUDT extends UserDefinedType[NestedStruct] {
override def sqlType: DataType =
new StructType()
.add("a", IntegerType, nullable = true)
.add("b", LongType, nullable = false)
.add("c", DoubleType, nullable = false)
override def serialize(n: NestedStruct): Any = {
val row = new SpecificInternalRow(sqlType.asInstanceOf[StructType].map(_.dataType))
row.setInt(0, n.a)
row.setLong(1, n.b)
row.setDouble(2, n.c)
}
override def userClass: Class[NestedStruct] = classOf[NestedStruct]
override def deserialize(datum: Any): NestedStruct = {
datum match {
case row: InternalRow =>
NestedStruct(row.getInt(0), row.getLong(1), row.getDouble(2))
}
}
}
}
| u2009cf/spark-radar | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala | Scala | apache-2.0 | 26,926 |
object Solution {
def hasCommon(s: String, t: String): Boolean = {
(s.toSet & t.toSet).size >= 1
}
def main(args: Array[String]) {
for (_ <- 1 to readInt) {
val s = readLine
val t = readLine
if (hasCommon(s, t)) println("YES") else println("NO")
}
}
}
| advancedxy/hackerrank | algorithms/strings/TwoStrings.scala | Scala | mit | 292 |
package slick.backend
import scala.language.experimental.macros
import java.net.{URL, URI}
import scala.annotation.{StaticAnnotation, Annotation}
import scala.reflect.ClassTag
import scala.reflect.macros.Context
import scala.util.control.NonFatal
import slick.SlickException
import slick.profile.BasicProfile
import com.typesafe.config.{ConfigFactory, Config}
/** A configuration for a Database plus a matching Slick driver. */
trait DatabaseConfig[P <: BasicProfile] {
/** Get the configured Database. It is instantiated lazily when this method is called for the
* first time, and must be closed after use. */
def db: P#Backend#Database
/** The configured driver. */
val driver: P
/** The raw configuration. */
def config: Config
/** The name of the driver class or object (without a trailing "$"). */
def driverName: String
/** Whether the `driverName` represents an object instead of a class. */
def driverIsObject: Boolean
}
object DatabaseConfig {
/** Load a driver and database configuration through
* [[https://github.com/typesafehub/config Typesafe Config]].
*
* The following config parameters are available:
* <ul>
* <li>`driver` (String, required): The fully qualified name of a class or object which
* implements the specified profile. If the name ends with `$` it is assumed to be an object
* name, otherwise a class name.</li>
* <li>`db` (Config, optional): The configuration of a database for the driver's backend.
* For JdbcProfile-based' drivers (and thus JdbcBackend), see
* `JdbcBackend.DatabaseFactory.forConfig` for parameters that should be defined inside of
* `db`.</li>
* </ul>
*
* @param path The path in the configuration file for the database configuration (e.g. `foo.bar`
* would find a driver name at config key `foo.bar.driver`) or an empty string
* for the top level of the `Config` object.
* @param config The `Config` object to read from. This defaults to the global app config
* (e.g. in `application.conf` at the root of the class path) if not specified.
*/
def forConfig[P <: BasicProfile : ClassTag](path: String, config: Config = ConfigFactory.load()): DatabaseConfig[P] = {
val n = config.getString((if(path.isEmpty) "" else path + ".") + "driver")
val untypedP = try {
(if(n.endsWith("$")) Class.forName(n).getField("MODULE$").get(null)
else Class.forName(n).newInstance())
} catch { case NonFatal(ex) =>
throw new SlickException(s"""Error getting instance of Slick driver "$n"""", ex)
}
val pClass = implicitly[ClassTag[P]].runtimeClass
if(!pClass.isInstance(untypedP))
throw new SlickException(s"Configured Slick driver $n is not an instance of requested profile ${pClass.getName}")
val root = config
new DatabaseConfig[P] {
lazy val db: P#Backend#Database =
driver.backend.createDatabase(root, (if(path.isEmpty) "" else path + ".") + "db")
val driver: P = untypedP.asInstanceOf[P]
lazy val config: Config = if(path.isEmpty) root else root.getConfig(path)
def driverName = if(driverIsObject) n.substring(0, n.length-1) else n
def driverIsObject = n.endsWith("$")
}
}
/** Load a driver and database configuration from the specified URI. If only a fragment name
* is given, it is resolved as a path in the global app config (e.g. in `application.conf` at
* the root of the class path), otherwise as a path in the configuration located at the URI
* without the fragment, which must be a valid URL. Without a fragment, the whole config object
* is used. */
def forURI[P <: BasicProfile : ClassTag](uri: URI): DatabaseConfig[P] = {
val (base, path) = {
val f = uri.getRawFragment
val s = uri.toString
if(s.isEmpty) (null, "")
else if(f eq null) (s, "")
else if(s.startsWith("#")) (null, uri.getFragment)
else (s.substring(0, s.length-f.length-1), uri.getFragment)
}
val root =
if(base eq null) ConfigFactory.load()
else ConfigFactory.parseURL(new URL(base)).resolve()
forConfig[P](path, root)
}
/** Load a driver and database configuration from the URI specified in a [[StaticDatabaseConfig]]
* annotation in the static scope of the caller. */
def forAnnotation[P <: BasicProfile](implicit ct: ClassTag[P]): DatabaseConfig[P] =
macro StaticDatabaseConfigMacros.getImpl[P]
}
/** An annotation for injecting a DatabaseConfig at compile time. The URI parameter must be a
* literal String. This annotation is required for providing a statically scoped database
* configuration to the `tsql` interpolator. */
final class StaticDatabaseConfig(val uri: String) extends Annotation with StaticAnnotation
object StaticDatabaseConfigMacros {
private[slick] def getURI(c: Context): String = {
import c.universe._
def findUri(ann: Seq[Tree]): Option[String] =
ann.map(c.typeCheck(_, pt = weakTypeOf[StaticDatabaseConfig], silent = true)).collectFirst {
case Apply(Select(_, _), List(Literal(Constant(uri: String)))) => uri
}
val methConf = Option(c.enclosingMethod).filter(_ != EmptyTree).map(_.asInstanceOf[MemberDef])
.flatMap(md => findUri(md.mods.annotations))
val classConf = findUri(c.enclosingClass.asInstanceOf[MemberDef].mods.annotations)
methConf.orElse(classConf).getOrElse(
c.abort(c.enclosingPosition, "No @StaticDatabaseConfig annotation found in enclosing scope"))
}
def getImpl[P <: BasicProfile : c.WeakTypeTag](c: Context)(ct: c.Expr[ClassTag[P]]): c.Expr[DatabaseConfig[P]] = {
import c.universe._
val uri = c.Expr[String](Literal(Constant(getURI(c))))
reify(DatabaseConfig.forURI[P](new URI(uri.splice))(ct.splice))
}
}
| adamkozuch/slick | slick/src/main/scala/slick/backend/DatabaseConfig.scala | Scala | bsd-2-clause | 5,810 |
package model
/**
* Created by ghseeli on 1/14/17.
*/
sealed trait Status {
def message: String
}
case class SquareAlreadyOccupied[B <: Coordinate](board: Board[B], player: Option[Player], coordinate: B) extends Status {
val message = "Entered square is already occupied!"
}
case class InvalidCoordinate[B <: Coordinate](board: Board[B], coordinate: B) extends Status {
val message = "Entered coordinate is invalid!"
}
case class NothingToUndo[B <: Coordinate](board: Board[B]) extends Status {
val message = "There is nothing to undo!"
} | ghseeli/four-dim-tic-tac-toe | src/main/scala/model/Status.scala | Scala | gpl-3.0 | 552 |
package utils.reflect
import java.lang.reflect.{Field,Method,Type,ParameterizedType,WildcardType,GenericArrayType,TypeVariable}
import scala.collection.mutable.Builder
import Reflect.findClass
trait AutoConvertData extends ConvertData {
def convert:String
}
/** A Binder ties an AccessibleObject (Field, Method with one argument, which is hidden in a DataActor) with a ConversionSolver and
* some conversion data.
* As a result, it can be used to automatically set any value, possibly automatically converted to the appropriate type.
* Binder cannot be created by the user: the Binder factory must be used for that.
* The Binder exists only to store the relevant data pertaining to the underlying operations (conversion, field/method setting etc.)
* It can be kept and reused.
* An Instance must be created in order to use the binder on an actual object.
*
* A Binder is kind of a lazy immutable object: once its internal state is calculated (which happens only once the first
* conversion is requested), it never changes.
* It is important to note that this implies that a Binder can only convert from one type of data.
* If you have once read a String to set a int field, for example, then you cannot use the same binder to use an int as input.
* Another Binder, using the same DataActor would have to be used for that purpose (the internal conversion is obviously not the same!)
*
* The Binder works on the expected type set in the DataBinder. Such a type (which is the one declared on the field, method...)
* cannot include any Wildcard. Thus, Map[String,Array[Properties]] is acceptable, but not Map[String,Array[_<:Properties]]
*
* There are only few acceptable sequences of calls to use a Binder properly:
*
* val b:Binder[_,_] = Binder(fld,solver,fd,false) //get the binder (not on a collection)
* val x:b.I = b(anObject) //bind it to an object
* x.set(a,e) //set value a to x
*
* val b:Binder[_,_] = Binder(fld,solver,fd,true) //get the binder (on a collection)
* val x:b.I = b(anObject) //bind it to an object
* val y:b?I = x.subInstance //enter the collection
* y.set(a,e0) //set value a to underlying collection in y
* y.set(b,e1) //set value b to underlying collection in y
* y.set(z,en) //set value z to underlying collection in y
* y.close(e) //terminate the collection, which assigns it to fld
*
* In case the collection is a Map, the set must apply to Assoc objects (i.e. it is then expected that a, b,..., z have the Assoc interface.
* At the end of the sequence, anObject.fld is set to the received value, appropriately converted.
*
* The top class (which cannot be instancied because the constructor is private) is used for binding to a DataActor.
* Derived classes (which are also hidden) are used to bind sub-collections.
*/
sealed class Binder private (val what:DataActor,protected[this] val solver:ConversionSolver,protected[this] val fd:AutoConvertData) {
final type I = Analyze#Instance
private[this] var cached:Analyze = null
protected[this] def build(on:AnyRef):I = {
if (cached==null) cached = new Analyze
cached.newInstance(on,null)
}
/** The Analyze class is a container that keeps track of the functions used to perform the binding.
* These classes are all but invisible to the end user.
*/
protected class Analyze private[Binder] { //Binder instance for a pair (object/field or object/method)
def isCol:Boolean = false //indicates whether this is a Collection
def isMap:Boolean = false //indicates whether this is a map
private[this] var eConvert:Any => Any = null
protected[this] def eType:Type = what.expected
val eClass:Class[_] = findClass(eType)
final protected[this] def convert(src:Any):Any = { //builds the actual value for x as expected from the container
//finds the converter for source class src; will only be defined on the first invocation (when a value is actually set)
if (eConvert==null) eConvert=getSolver(src.getClass,eType)
eConvert(src)
}
def subAnalyze():Analyze = throw new IllegalStateException("sub instance are only allowed on collections")
protected[reflect] def newInstance(on:AnyRef,parent:I):Instance = new Instance(on)
/** The instance class actually binds an object with a DataActor.
*/
class Instance protected[Analyze] (val on:AnyRef) {
final def binder = Binder.this
final def read():Any = what.get(on)
def eltClass = Analyze.this.eClass
def endClass = Analyze.this.eType
def set(x:Any):Unit = rcv(convert(x))
def asT:Traversable[Any] = throw new IllegalStateException("cannot cast a field instance as a Traversable")
def close():Unit = throw new IllegalStateException("cannot close a field instance")
def close(key:Any):Unit = throw new IllegalStateException("cannot close a field instance")
def subInstance:I = throw new IllegalStateException("sub instance are only allowed on collections")
//use with care: this is direct access to the underlying collection WITHOUT conversion
def rcv(x:Any):Unit = what.set(on,x)
def rcv(key:Any,x:Any):Unit = throw new IllegalStateException("only a Map instance can receive a (key,value)")
}
}
final def apply(on:AnyRef):I = build(on)
protected[this] final def getSolver(cz:Class[_],t:Type) = solver(cz,findClass(t),fd,fd.convert).fold(s=>throw new IllegalStateException(s), identity)
}
object Binder {
trait Assoc[+K,+T] {
def key:K
def value:T
final override def toString = "Assoc("+key+" -> "+value+")"
}
/** Used to map a pair key/value to store maps.
* Use this as the return type of an end method for items stored as maps.
*/
final case class AssocElt[+K,+T](final val key:K, final val value:T) extends Assoc[K,T] {
final protected def this() = this(null.asInstanceOf[K],null.asInstanceOf[T])
}
/** Use --> instead of -> for building Assoc as a shortcut
*/
final implicit class AssocLeft[K](final val key:K) {
@inline def -->[T] (t:T) = new AssocElt(key,t)
}
/** Binder for a collection element. It can not be assigned until all elements have been first collected.
* Furthermore, the conversion process occurs on the elements themselves, not the container.
*/
private class CollectionBinder(what:DataActor,solver:ConversionSolver,fd:AutoConvertData) extends Binder(what,solver,fd) {
private[this] val deepCache = new Array[super.Analyze](6) //Do we expect deep collection of more than this depth ?
class Analyze(val depth:Int,val parent:super.Analyze) extends super.Analyze {
override def subAnalyze:Analyze = solver.collectionSolver(eType) match {
case None => throw new IllegalArgumentException(s"type $eType cannot be identified as a workable collection")
case Some(a) if a.isMap => new Map(a,depth+1,this)
case Some(a) => new Col(a,depth+1,this)
}
override protected[reflect] def newInstance(on:AnyRef,parent:I) = new Instance(on,parent)
class Instance(on:AnyRef,parent:I) extends super.Instance(on) {
override def subInstance:I = {
if (deepCache(depth)==null) deepCache(depth) = subAnalyze
deepCache(depth).newInstance(on,this)
}
}
}
private class Col(adapt:CollectionAdapter.Adapt,depth:Int,parent:Analyze) extends Analyze(depth,parent) {
override final def eType = adapt.czElt
final override def isCol = true
final override def isMap = adapt.isMap
override def newInstance(on:AnyRef,parent:I):Instance = new Instance(on,parent)
class Instance(on:AnyRef,parent:I) extends super.Instance(on,parent) {
final val stack:Builder[Any,Any] = adapt.newBuilder.asInstanceOf[Builder[Any,Any]]
final override def close():Unit = parent.rcv(stack.result)
final override def close(key:Any):Unit = parent.rcv(key,stack.result)
override def rcv(x:Any):Unit = stack+=x
override def asT = { val r=read(); if (r==null) null else adapt.asTraversable(r.asInstanceOf[adapt.C]) }
}
}
private class Map(adapt:CollectionAdapter.Adapt,depth:Int,parent:Analyze) extends Col(adapt,depth,parent) { //used for mapped collection
final val kType = adapt.asInstanceOf[CollectionAdapter[_]#MapAdapter[_,_]].czKey
protected[this] var kConvert:(Any)=>Any = null
override def newInstance(on:AnyRef,parent:I):Instance = new Instance(on,parent)
class Instance(on:AnyRef,parent:I) extends super.Instance(on,parent) {
override def set(x:Any) = x match {
case a:Assoc[_,_] => rcv(a.key,convert(a.value))
case _ => throw new IllegalStateException(s"a map must receive a ${classOf[Assoc[_,_]]} as data")
}
override def rcv(x:Any) = throw new IllegalStateException()
override def rcv(key:Any,value:Any):Unit = {
if (kConvert==null) kConvert = getSolver(key.getClass,kType)
super.rcv(kConvert(key) -> value)
}
}
}
override def build(on:AnyRef):I = new Analyze(0,null).newInstance(on,null)
}
/** Factory that builds a Binder with a given DataActor */
final def apply(what:DataActor,solver:ConversionSolver,fd:AutoConvertData,isCol:Boolean):Binder = {
if (isCol) new CollectionBinder(what,solver,fd)
else new Binder(what,solver,fd)
}
/** A class that lets use Binders easily to enter values directly.
* See the test code for example of use.
* - method o is for specifying final inputs
* - method u is for specifying layers
*/
implicit final class Helper(val x: Binder#Analyze#Instance) {
@inline private def sub(f: Helper=>Unit*) = { val c:Helper=x.subInstance; f.foreach(_(c)); c.x }
@inline final def u(f: Helper=>Unit*):Unit = sub(f:_*).close()
@inline final def read = x.read()
}
object Helper {
@inline final def o(v:Any*):Helper=>Unit = xh=>v.foreach(xh.x.set(_))
@inline final def u(f:Helper=>Unit*):Helper=>Unit = _.u(f:_*)
@inline final def u(key:Any)(f:Helper=>Unit*):Helper=>Unit = _.sub(f:_*).close(key)
}
}
| Y-P-/data-processing-binding | Utils/src/utils/reflect/Binder.scala | Scala | gpl-3.0 | 10,957 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.tf.loaders
import com.intel.analytics.bigdl.tensor.Tensor
class MinimumSpec extends BinaryOpBaseSpec {
override def getOpName: String = "Maximum"
override def getInputs: Seq[Tensor[_]] =
Seq(Tensor[Float](4).rand(), Tensor[Float](4).rand())
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/utils/tf/loaders/MinimumSpec.scala | Scala | apache-2.0 | 896 |
package org.squeryl.framework
import org.squeryl.{Session, Query}
import org.squeryl.test.PrimitiveTypeModeForTests._
import org.scalatest.matchers.should.Matchers
trait QueryTester extends Matchers {
var logQueries = false
var validateFirstAndExit = -1
var dumpAst = false
var doNotExecute = false
def activateWorkbenchMode = {
logQueries = true
dumpAst = true
validateFirstAndExit = 0
}
def loggerOn =
Session.currentSession.setLogger((s:String) => println(s))
def log(queryName: String, query:Query[_]) = {
println(queryName + " :")
println(query)
for(r <- query.asInstanceOf[Query[Any]])
println("-->" + r)
}
def validateQuery[R,S](name: String, q:Query[R], mapFunc: R=>S, expected: List[S]): Unit =
validateQuery[R,S](logQueries, name, q, mapFunc, expected)
def validateQuery[R,S](logFirst: Boolean, name: String, q:Query[R], mapFunc: R=>S, expected: List[S]): Unit = {
if(validateFirstAndExit >= 1)
return
// if(dumpAst)
// println(q.dumpAst)
if(logFirst || logQueries)
log(name, q)
if(doNotExecute)
return
val r = q.toList.map(mapFunc)
r should equal(expected)
// if(r == expected)
// println("query " + name + " passed.")
// else {
// val msg =
// "query : " + name + " failed,\n" +
// "expected " + expected + " got " + r + " \n query " + name +
// " was : \n" + q
// org.squeryl.internals.Utils.org.squeryl.internals.Utils.throwError(msg)
// }
if(validateFirstAndExit >= 0)
validateFirstAndExit += 1
}
}
object SingleTestRun extends org.scalatest.Tag("SingleTestRun")
| xuwei-k/Squeryl | src/test/scala/org/squeryl/framework/QueryTester.scala | Scala | apache-2.0 | 1,734 |
/*
Copyright (C) 2013-2019 Expedia Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hotels.styx.client
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.client.{ValueMatchingStrategy, WireMock}
import com.hotels.styx.api.HttpRequest.get
import com.hotels.styx.api.HttpResponseStatus._
import com.hotels.styx.support.ResourcePaths.fixturesHome
import com.hotels.styx.support.backends.FakeHttpServer
import com.hotels.styx.support.configuration._
import com.hotels.styx.utils.StubOriginHeader.STUB_ORIGIN_INFO
import com.hotels.styx.{StyxClientSupplier, StyxProxySpec}
import org.scalatest.{FunSpec, SequentialNestedSuiteExecution}
import java.nio.charset.StandardCharsets.UTF_8
class TlsVersionSpec extends FunSpec
with StyxProxySpec
with StyxClientSupplier
with SequentialNestedSuiteExecution {
val logback = fixturesHome(this.getClass, "/conf/logback/logback-debug-stdout.xml")
val appOriginTlsv11 = FakeHttpServer.HttpsStartupConfig(
appId = "appTls11",
originId = "appTls11-01",
protocols = Seq("TLSv1.1")
)
.start()
.stub(WireMock.get(urlMatching("/.*")), originResponse("App TLS v1.1"))
val appOriginTlsv12B = FakeHttpServer.HttpsStartupConfig(
appId = "appTls11B",
originId = "appTls11B-01",
protocols = Seq("TLSv1.2")
)
.start()
.stub(WireMock.get(urlMatching("/.*")), originResponse("App TLS v1.2 B"))
val appOriginTlsDefault = FakeHttpServer.HttpsStartupConfig(
appId = "appTlsDefault",
originId = "appTlsDefault-01",
protocols = Seq("TLSv1.1", "TLSv1.2")
)
.start()
.stub(WireMock.get(urlMatching("/.*")), originResponse("App TLS v1.1"))
val appOriginTlsv12 = FakeHttpServer.HttpsStartupConfig(
appId = "appTls12",
originId = "appTls12-02",
protocols = Seq("TLSv1.2")
)
.start()
.stub(WireMock.get(urlMatching("/.*")), originResponse("App TLS v1.2"))
override val styxConfig = StyxYamlConfig(
"""
|proxy:
| connectors:
| http:
| port: 0
|admin:
| connectors:
| http:
| port: 0
|services:
| factories: {}
""".stripMargin,
logbackXmlLocation = logback)
override protected def beforeAll(): Unit = {
super.beforeAll()
styxServer.setBackends(
"/tls11/" -> HttpsBackend(
"appTls11",
Origins(appOriginTlsv11),
TlsSettings(authenticate = false, sslProvider = "JDK", protocols = List("TLSv1.1"))),
"/tlsDefault/" -> HttpsBackend(
"appTlsDefault",
Origins(appOriginTlsDefault),
TlsSettings(authenticate = false, sslProvider = "JDK", protocols = List("TLSv1.1", "TLSv1.2"))),
"/tls12" -> HttpsBackend(
"appTls12",
Origins(appOriginTlsv12),
TlsSettings(authenticate = false, sslProvider = "JDK", protocols = List("TLSv1.2"))),
"/tls11-to-tls12" -> HttpsBackend(
"appTls11B",
Origins(appOriginTlsv12B),
TlsSettings(authenticate = false, sslProvider = "JDK", protocols = List("TLSv1.1")))
)
}
override protected def afterAll(): Unit = {
appOriginTlsv11.stop()
appOriginTlsv12.stop()
super.afterAll()
}
def httpRequest(path: String) = get(styxServer.routerURL(path)).build()
def valueMatchingStrategy(matches: String) = {
val matchingStrategy = new ValueMatchingStrategy()
matchingStrategy.setMatches(matches)
matchingStrategy
}
describe("Backend Service TLS Protocol Setting") {
it("Proxies to TLSv1.1 origin when TLSv1.1 support enabled.") {
val response1 = decodedRequest(httpRequest("/tls11/a"))
assert(response1.status() == OK)
assert(response1.bodyAs(UTF_8) == "Hello, World!")
appOriginTlsv11.verify(
getRequestedFor(
urlEqualTo("/tls11/a"))
.withHeader("X-Forwarded-Proto", valueMatchingStrategy("http")))
val response2 = decodedRequest(httpRequest("/tlsDefault/a2"))
assert(response2.status() == OK)
assert(response2.bodyAs(UTF_8) == "Hello, World!")
appOriginTlsDefault.verify(
getRequestedFor(
urlEqualTo("/tlsDefault/a2"))
.withHeader("X-Forwarded-Proto", valueMatchingStrategy("http")))
}
it("Proxies to TLSv1.2 origin when TLSv1.2 support is enabled.") {
val response1 = decodedRequest(httpRequest("/tlsDefault/b1"))
assert(response1.status() == OK)
assert(response1.bodyAs(UTF_8) == "Hello, World!")
appOriginTlsDefault.verify(
getRequestedFor(urlEqualTo("/tlsDefault/b1"))
.withHeader("X-Forwarded-Proto", valueMatchingStrategy("http")))
val response2 = decodedRequest(httpRequest("/tls12/b2"))
assert(response2.status() == OK)
assert(response2.bodyAs(UTF_8) == "Hello, World!")
appOriginTlsv12.verify(
getRequestedFor(urlEqualTo("/tls12/b2"))
.withHeader("X-Forwarded-Proto", valueMatchingStrategy("http")))
}
it("Refuses to connect to TLSv1.1 origin when TLSv1.1 is disabled") {
val response = decodedRequest(httpRequest("/tls11-to-tls12/c"))
assert(response.status() == BAD_GATEWAY)
assert(response.bodyAs(UTF_8) == "Site temporarily unavailable.")
appOriginTlsv12B.verify(0, getRequestedFor(urlEqualTo("/tls11-to-tls12/c")))
}
}
def originResponse(appId: String) = aResponse
.withStatus(OK.code())
.withHeader(STUB_ORIGIN_INFO.toString, appId)
.withBody("Hello, World!")
}
| mikkokar/styx | system-tests/e2e-suite/src/test/scala/com/hotels/styx/client/TlsVersionSpec.scala | Scala | apache-2.0 | 5,998 |
import cats.effect.{ IO, IOApp, ExitCode }
import cats.implicits._
import io.circe.Json
import org.http4s.HttpRoutes
import org.http4s.circe._
import org.http4s.syntax._
import org.http4s.dsl.io._
import org.http4s.implicits._
import org.http4s.server.blaze._
object Main extends IOApp {
val helloWorldService = HttpRoutes.of[IO] {
case GET -> Root / "hello" / name =>
Ok(Json.obj("message" -> Json.fromString(s"Hello, ${name}")))
}.orNotFound
def run(args: List[String]): IO[ExitCode] =
BlazeServerBuilder[IO]
.bindHttp(8081, "localhost")
.withHttpApp(helloWorldService)
.serve
.compile
.drain
.as(ExitCode.Success)
}
| t-mochizuki/scala-study | http4s-example/src/main/scala/Main.scala | Scala | mit | 678 |
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi.vision
import org.goseumdochi.common._
import org.bytedeco.javacpp.opencv_core._
import scala.collection._
trait VisionAnalyzer extends AutoCloseable
{
private val debugImages = new mutable.ArrayBuffer[IplImage]
type OverylayRenderFunc = (RetinalOverlay) => Unit
type Debugger = (OverylayRenderFunc) => Unit
protected def newDebugger(inputImg : IplImage) : Debugger =
{
if (settings.Test.visualize) {
val newImage = inputImg.clone
debugImages += newImage
val overlay = new OpenCvRetinalOverlay(
newImage, xform, RetinalPos(newImage.width, newImage.height))
def newDebugger(overlayRenderFunc : OverylayRenderFunc) {
overlayRenderFunc(overlay)
}
newDebugger
} else {
def nullDebugger(func : OverylayRenderFunc) {
}
nullDebugger
}
}
def getDebugImages = debugImages.toIndexedSeq
def analyzeFrame(
imageDeck : ImageDeck, frameTime : TimePoint,
hintBodyPos : Option[PlanarPos])
: Iterable[VisionActor.AnalyzerResponseMsg]
def settings : Settings
def retinalTransformProvider : RetinalTransformProvider
def xform = retinalTransformProvider.getRetinalTransform
override def close() {}
def isLongLived() : Boolean = false
}
class NullVisionAnalyzer(
val settings : Settings,
val retinalTransformProvider : RetinalTransformProvider)
extends VisionAnalyzer
{
override def analyzeFrame(
imageDeck : ImageDeck, frameTime : TimePoint,
hintBodyPos : Option[PlanarPos])
: Iterable[VisionActor.AnalyzerResponseMsg] =
{
None
}
}
| lingeringsocket/goseumdochi | base/src/main/scala/org/goseumdochi/vision/VisionAnalyzer.scala | Scala | apache-2.0 | 2,242 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import org.apache.spark.sql.catalyst.{StructFilters, StructFiltersSuite}
import org.apache.spark.sql.sources
import org.apache.spark.sql.types.StructType
class JsonFiltersSuite extends StructFiltersSuite {
override def createFilters(filters: Seq[sources.Filter], schema: StructType): StructFilters = {
new JsonFilters(filters, schema)
}
}
| mahak/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/json/JsonFiltersSuite.scala | Scala | apache-2.0 | 1,194 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package controller.api
import controllers.KafkaManagerContext
import controllers.api.KafkaHealthCheck
import kafka.manager.utils.{CuratorAwareTest, KafkaServerInTest}
import kafka.test.SeededBroker
import play.api.Play
import play.api.libs.json.Json
import play.api.test.Helpers._
import play.api.test.{FakeApplication, FakeRequest}
import play.mvc.Http.Status.{BAD_REQUEST, OK}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try
class TestKafkaHealthCheck extends CuratorAwareTest with KafkaServerInTest {
private[this] val broker = new SeededBroker("controller-api-test",4)
override val kafkaServerZkPath = broker.getZookeeperConnectionString
private[this] val duration = FiniteDuration(10,SECONDS)
private[this] val testClusterName = "kafka-hc-test-cluster"
private[this] val testTopicName = "kafka-hc-test-topic"
override protected def beforeAll() : Unit = {
super.beforeAll()
lazy val app : FakeApplication = {
FakeApplication(additionalConfiguration = Map("kafka-manager.zkhosts" -> kafkaServerZkPath))
}
Play.start(app)
createCluster()
createTopic()
Thread.sleep(10000)
}
override protected def afterAll(): Unit = {
disableCluster()
deleteCluster()
Play.stop()
Try(broker.shutdown())
super.afterAll()
}
private[this] def createCluster() = {
val future = KafkaManagerContext.getKafkaManager.addCluster(testClusterName,"0.8.2.0",kafkaServerZkPath, jmxEnabled = false, filterConsumers = true)
val result = Await.result(future,duration)
result.toEither.left.foreach(apiError => sys.error(apiError.msg))
Thread.sleep(3000)
}
private[this] def createTopic() = {
val future = KafkaManagerContext.getKafkaManager.createTopic(testClusterName,testTopicName,4,1)
val result = Await.result(future,duration)
result.toEither.left.foreach(apiError => sys.error(apiError.msg))
}
private[this] def deleteTopic() = {
val future = KafkaManagerContext.getKafkaManager.deleteTopic(testClusterName,testTopicName)
val result = Await.result(future,duration)
}
private[this] def disableCluster() = {
val future = KafkaManagerContext.getKafkaManager.disableCluster(testClusterName)
Await.result(future, duration)
Thread.sleep(3000)
}
private[this] def deleteCluster() = {
val future = KafkaManagerContext.getKafkaManager.deleteCluster(testClusterName)
Await.result(future,duration)
Thread.sleep(3000)
}
test("get available brokers") {
val future = KafkaHealthCheck.availableBrokers(testClusterName).apply(FakeRequest())
assert(status(future) === OK)
assert(contentAsJson(future) === Json.obj("availableBrokers" -> Seq(0)))
}
test("get available brokers in non-existing cluster") {
val future = KafkaHealthCheck.availableBrokers("non-existent").apply(FakeRequest())
assert(status(future) === BAD_REQUEST)
}
test("get under-replicated partitions") {
val future = KafkaHealthCheck.underReplicatedPartitions(testClusterName, testTopicName).apply(FakeRequest())
assert(status(future) === OK)
assert(contentAsJson(future) === Json.obj("topic" -> testTopicName, "underReplicatedPartitions" -> Seq.empty[Int]))
}
test("get under-replicated partitions of non-existing topic in non-existing cluster") {
val future = KafkaHealthCheck.underReplicatedPartitions("non-existent", "weird").apply(FakeRequest())
assert(status(future) === BAD_REQUEST)
}
test("get unavailable partitions") {
val future = KafkaHealthCheck.unavailablePartitions(testClusterName, testTopicName).apply(FakeRequest())
assert(status(future) == OK)
assert(contentAsJson(future) == Json.obj("topic" -> testTopicName, "unavailablePartitions" -> Seq.empty[Int]))
}
test("get unavailable partitions of non-existing topic in non-existing cluster") {
val future = KafkaHealthCheck.unavailablePartitions("non-existent", "weird").apply(FakeRequest())
assert(status(future) === BAD_REQUEST)
}
}
| sdgdsffdsfff/kafka-manager | test/controller/api/TestKafkaHealthCheck.scala | Scala | apache-2.0 | 4,134 |
package com.github.aselab.activerecord.io
import com.github.aselab.activerecord._
import reflections._
import java.util.{Date, UUID, TimeZone}
import java.sql.Timestamp
import com.github.nscala_time.time.Imports._
import org.joda.time.format.ISODateTimeFormat
trait Converter[A, B] {
def serialize(v: Any): B
def deserialize(s: B): A
}
trait FormConverter[T] extends Converter[T, String] {
override def serialize(v: Any): String = Option(v).map(_.toString).orNull
}
object FormConverter extends PrimitiveHandler[FormConverter[_]] {
val stringHandler = new FormConverter[String] {
def deserialize(s: String): String = s
}
val intHandler = new FormConverter[java.lang.Integer] {
def deserialize(s: String): java.lang.Integer = s.toInt
}
val longHandler = new FormConverter[java.lang.Long] {
def deserialize(s: String): java.lang.Long = s.toLong
}
val doubleHandler = new FormConverter[java.lang.Double] {
def deserialize(s: String): java.lang.Double = s.toDouble
}
val booleanHandler = new FormConverter[java.lang.Boolean] {
def deserialize(s: String): java.lang.Boolean = s.toBoolean
}
val bigDecimalHandler = new FormConverter[BigDecimal] {
def deserialize(s: String): BigDecimal = BigDecimal(s)
}
val floatHandler = new FormConverter[java.lang.Float] {
def deserialize(s: String): java.lang.Float = s.toFloat
}
val timestampHandler = new FormConverter[Timestamp] {
override def serialize(v: Any): String = {
new DateTime(v, Config.timeZone).toString(Config.datetimeFormatter)
}
def deserialize(s: String): Timestamp =
new Timestamp(Config.datetimeFormatter.parseDateTime(s).getMillis)
}
val uuidHandler = new FormConverter[UUID] {
def deserialize(s: String): UUID = UUID.fromString(s)
}
val dateHandler = new FormConverter[Date] {
override def serialize(v: Any): String =
new DateTime(v, Config.timeZone).toString(Config.dateFormatter)
def deserialize(s: String): Date =
Config.dateFormatter.parseDateTime(s).toDate
}
}
| xdougx/scala-activerecord | activerecord/src/main/scala/io/converters.scala | Scala | mit | 2,059 |
package com.rasterfoundry.backsplash.server
import com.rasterfoundry.backsplash.HistogramStore
import com.rasterfoundry.backsplash.HistogramStore.ToHistogramStoreOps
import com.rasterfoundry.database.LayerAttributeDao
import cats.effect.IO
import cats.implicits._
import com.typesafe.scalalogging.LazyLogging
import doobie.Transactor
import geotrellis.raster.histogram._
import geotrellis.raster.io.json._
import java.util.UUID
trait HistogramStoreImplicits
extends ToHistogramStoreOps
with HistogramJsonFormats
with LazyLogging {
val xa: Transactor[IO]
@SuppressWarnings(Array("TraversableHead"))
private def mergeHistsForBands(
bands: List[Int],
hists: List[Array[Histogram[Double]]]): Array[Histogram[Double]] = {
val combinedHistogram = hists.foldLeft(
Array.fill(hists.head.length)(
StreamingHistogram(255): Histogram[Double]))(
(histArr1: Array[Histogram[Double]],
histArr2: Array[Histogram[Double]]) => {
histArr1 zip histArr2 map {
case (h1, h2) => h1 merge h2
}
}
)
bands.toArray map { band =>
combinedHistogram(band)
}
}
private def handleBandsOutOfRange(
result: Either[Throwable, Array[Histogram[Double]]],
layerId: UUID,
subsetBands: List[Int]) = result match {
case Left(_: ArrayIndexOutOfBoundsException) =>
logger.warn(
s"Requested bands not available in layer $layerId: $subsetBands")
IO { Array.empty[Histogram[Double]] }
case Left(e) =>
IO.raiseError(e)
case Right(hists) =>
IO.pure { hists }
}
implicit val layerAttributeHistogramStore: HistogramStore[LayerAttributeDao] =
new HistogramStore[LayerAttributeDao] {
def layerHistogram(self: LayerAttributeDao,
layerId: UUID,
subsetBands: List[Int]) = {
self
.getHistogram(layerId, xa)
.map({ (hists: Array[Histogram[Double]]) =>
subsetBands.toArray map { band =>
hists(band)
}
})
.attempt
} flatMap { handleBandsOutOfRange(_, layerId, subsetBands) }
def projectLayerHistogram(
self: LayerAttributeDao,
projectLayerId: UUID,
subsetBands: List[Int]
): IO[Array[Histogram[Double]]] = {
self
.getProjectLayerHistogram(projectLayerId, xa)
.map({ hists =>
mergeHistsForBands(subsetBands, hists)
})
.attempt flatMap {
handleBandsOutOfRange(_, projectLayerId, subsetBands)
}
}
}
}
| azavea/raster-foundry | app-backend/backsplash-server/src/main/scala/com/rasterfoundry/backsplash/implicits/HistogramStoreImplicits.scala | Scala | apache-2.0 | 2,612 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.ddl
import java.io.File
import org.kiji.schema.shell.DDLException
import org.kiji.schema.shell.LocalJarFile
import com.google.common.io.Files
class TestUseJarCommand extends CommandTestCase {
"UseJarCommand" should {
"should validate the jar file exists" in {
val command = new UseJarCommand(env, LocalJarFile("/does/not/exist.jar"))
command.validateJar() must throwAn[DDLException](
"The path you specified for the jar does not exist.")
}
"should validate the jar file is not a directory" in {
val tempDir = Files.createTempDir()
val jarLocation = LocalJarFile(tempDir.getAbsolutePath())
val command = new UseJarCommand(env, jarLocation)
command.validateJar() must throwA[DDLException](
"The path you specified for the jar points to a directory.")
}
"should validate the jar file ends in .jar" in {
val tempDir = Files.createTempDir()
val jarFile = new File(tempDir, "myFile.txt")
jarFile.createNewFile()
val jarLocation = LocalJarFile(jarFile.getAbsolutePath())
val command = new UseJarCommand(env, jarLocation)
command.validateJar() must throwA[DDLException](
"You must specify the path to a file with extension .jar or .JAR")
}
"should add a jar file to the environment" in {
val tempDir = Files.createTempDir()
val jarFile = new File(tempDir, "myJar.jar")
jarFile.createNewFile()
val jarLocation = LocalJarFile(jarFile.getAbsolutePath())
val command = new UseJarCommand(env, jarLocation)
val envWithJar = command.exec()
envWithJar.libJars.size must beEqualTo(1)
val LocalJarFile(actualPath) = envWithJar.libJars(0)
jarFile.getAbsolutePath() must beEqualTo(actualPath)
}
}
}
| kijiproject/kiji-schema-shell | src/test/scala/org/kiji/schema/shell/ddl/TestUseJarCommand.scala | Scala | apache-2.0 | 2,525 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence.jdbc
import akka.actor.ActorSystem
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcPersistentEntityRegistry
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcReadSideImpl
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcSessionImpl
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.OffsetTableConfiguration
import com.lightbend.lagom.scaladsl.persistence.PersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.scaladsl.persistence.ReadSidePersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.WriteSidePersistenceComponents
import com.lightbend.lagom.spi.persistence.OffsetStore
import play.api.db.DBComponents
import scala.concurrent.ExecutionContext
/**
* Persistence JDBC components (for compile-time injection).
*/
trait JdbcPersistenceComponents
extends PersistenceComponents
with ReadSideJdbcPersistenceComponents
with WriteSideJdbcPersistenceComponents
private[lagom] trait SlickProviderComponents extends DBComponents {
def actorSystem: ActorSystem
def executionContext: ExecutionContext
lazy val slickProvider: SlickProvider = {
// Ensures JNDI bindings are made before we build the SlickProvider
SlickDbProvider.buildAndBindSlickDatabases(dbApi, actorSystem.settings.config, applicationLifecycle)
new SlickProvider(actorSystem)(executionContext)
}
}
/**
* Write-side persistence JDBC components (for compile-time injection).
*/
trait WriteSideJdbcPersistenceComponents extends WriteSidePersistenceComponents with SlickProviderComponents {
def actorSystem: ActorSystem
def executionContext: ExecutionContext
override lazy val persistentEntityRegistry: PersistentEntityRegistry =
new JdbcPersistentEntityRegistry(actorSystem, slickProvider)
}
/**
* Read-side persistence JDBC components (for compile-time injection).
*/
trait ReadSideJdbcPersistenceComponents extends ReadSidePersistenceComponents with SlickProviderComponents {
lazy val offsetTableConfiguration: OffsetTableConfiguration = new OffsetTableConfiguration(
configuration.underlying,
readSideConfig
)
private[lagom] lazy val slickOffsetStore: SlickOffsetStore =
new SlickOffsetStore(actorSystem, slickProvider, offsetTableConfiguration)
lazy val offsetStore: OffsetStore = slickOffsetStore
lazy val jdbcReadSide: JdbcReadSide = new JdbcReadSideImpl(slickProvider, slickOffsetStore)(executionContext)
lazy val jdbcSession: JdbcSession = new JdbcSessionImpl(slickProvider)
}
| rcavalcanti/lagom | persistence-jdbc/scaladsl/src/main/scala/com/lightbend/lagom/scaladsl/persistence/jdbc/JdbcPersistenceComponents.scala | Scala | apache-2.0 | 2,911 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine.physical.mongodb
import slamdata.Predef._
import scalaz._
import Scalaz._
import slamdata.engine.fp._
import slamdata.engine.{RenderTree, Terminal, NonTerminal}
import slamdata.engine.javascript._
import IdHandling._
/**
A WorkflowTask approximately represents one request to MongoDB.
*/
sealed trait WorkflowTask
object WorkflowTask {
import slamdata.engine.physical.mongodb.expression._
import Workflow._
type Pipeline = List[PipelineOp]
implicit def WorkflowTaskRenderTree(implicit RC: RenderTree[Collection], RO: RenderTree[WorkflowF[Unit]], RJ: RenderTree[Js], RS: RenderTree[Selector]) =
new RenderTree[WorkflowTask] {
val WorkflowTaskNodeType = "WorkflowTask" :: "Workflow" :: Nil
def render(task: WorkflowTask) = task match {
case ReadTask(value) => RC.render(value).copy(nodeType = "ReadTask" :: WorkflowTaskNodeType)
case PipelineTask(source, pipeline) =>
val nt = "PipelineTask" :: WorkflowTaskNodeType
NonTerminal(nt, None,
render(source) ::
NonTerminal("Pipeline" :: nt, None, pipeline.map(RO.render(_))) ::
Nil)
case FoldLeftTask(head, tail) =>
NonTerminal("FoldLeftTask" :: WorkflowTaskNodeType, None,
render(head) ::
tail.map(render(_)).toList)
case MapReduceTask(source, MapReduce(map, reduce, outOpt, selectorOpt, sortOpt, limitOpt, finalizerOpt, scopeOpt, jsModeOpt, verboseOpt)) =>
val nt = "MapReduceTask" :: WorkflowTaskNodeType
NonTerminal(nt, None,
render(source) ::
RJ.render(map) ::
RJ.render(reduce) ::
Terminal("Out" :: nt, Some(outOpt.toString)) ::
selectorOpt.map(RS.render(_)).getOrElse(Terminal("None" :: Nil, None)) ::
sortOpt.map(keys => NonTerminal("Sort" :: nt, None,
(keys.map { case (expr, ot) => Terminal("Key" :: "Sort" :: nt, Some(expr.toString + " -> " + ot)) } ).toList)).getOrElse(Terminal("None" :: Nil, None)) ::
Terminal("Limit" :: nt, Some(limitOpt.toString)) ::
finalizerOpt.map(RJ.render(_)).getOrElse(Terminal("None" :: Nil, None)) ::
Terminal("Scope" :: nt, Some(scopeOpt.toString)) ::
Terminal("JsMode" :: nt, Some(jsModeOpt.toString)) ::
Nil)
case _ => Terminal(WorkflowTaskNodeType, Some(task.toString))
}
}
/**
Run once a task is known to be completely built.
*/
def finish(base: DocVar, task: WorkflowTask):
(DocVar, WorkflowTask) = task match {
case PipelineTask(src, pipeline) =>
// possibly toss duplicate `_id`s created by `Unwind`s
val uwIdx = pipeline.lastIndexWhere {
case $Unwind(_, _) => true;
case _ => false
}
// we’re fine if there’s no `Unwind`, or some existing op fixes the `_id`s
if (uwIdx == -1 ||
pipeline.indexWhere(
{ case $Group(_, _, _) => true
case $Project(_, _, ExcludeId) => true
case _ => false
},
uwIdx) != -1)
(base, task)
else shape(pipeline) match {
case Some(names) =>
(DocVar.ROOT(),
PipelineTask(
src,
pipeline :+
$Project((),
Reshape(names.map(n => n -> -\\/($var(DocField(n)))).toListMap),
ExcludeId)))
case None =>
(Workflow.ExprVar,
PipelineTask(
src,
pipeline :+
$Project((),
Reshape(ListMap(Workflow.ExprName -> -\\/($var(base)))),
ExcludeId)))
}
case _ => (base, task)
}
private def shape(p: Pipeline): Option[List[BsonField.Name]] = {
def src = shape(p.dropRight(1))
p.lastOption.flatMap(_ match {
case op: ShapePreservingF[_] => src
case $Project((), Reshape(shape), _) => Some(shape.keys.toList)
case $Group((), Grouped(shape), _) => Some(shape.keys.map(_.toName).toList)
case $Unwind((), _) => src
case $Redact((), _) => None
case $GeoNear((), _, _, _, _, _, _, _, _, _) => src.map(_ :+ BsonField.Name("dist"))
})
}
/**
* A task that returns a necessarily small amount of raw data.
*/
final case class PureTask(value: Bson) extends WorkflowTask
/**
* A task that merely sources data from some specified collection.
*/
final case class ReadTask(value: Collection) extends WorkflowTask
/**
* A task that executes a Mongo read query.
*/
final case class QueryTask(
source: WorkflowTask,
query: FindQuery,
skip: Option[Int],
limit: Option[Int])
extends WorkflowTask
/**
* A task that executes a Mongo pipeline aggregation.
*/
final case class PipelineTask(source: WorkflowTask, pipeline: Pipeline)
extends WorkflowTask
/**
* A task that executes a Mongo map/reduce job.
*/
final case class MapReduceTask(source: WorkflowTask, mapReduce: MapReduce)
extends WorkflowTask
/**
* A task that executes a sequence of other tasks, one at a time, collecting
* the results in the same collection. The first task must produce a new
* collection, and the remaining tasks must be able to merge their results
* into an existing collection, hence the types.
*/
final case class FoldLeftTask(head: WorkflowTask, tail: NonEmptyList[MapReduceTask])
extends WorkflowTask
/**
* A task that evaluates some code on the server. The JavaScript function
* must accept two parameters: the source collection, and the destination
* collection.
*/
// final case class EvalTask(source: WorkflowTask, code: Js.FuncDecl)
// extends WorkflowTask
}
| wemrysi/quasar | core/src/main/scala/slamdata/engine/physical/mongodb/workflowtask.scala | Scala | apache-2.0 | 6,502 |
package com.nefariouszhen.alert
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import io.dropwizard.Bundle
import io.dropwizard.setup.{Environment, Bootstrap}
class ScalaBundle extends Bundle {
def initialize(bootstrap: Bootstrap[_]): Unit = {
bootstrap.getObjectMapper.registerModule(DefaultScalaModule)
}
def run(environment: Environment): Unit = {
// Do Nothing
}
}
| nbauernfeind/xmpp-alert | src/main/scala/com/nefariouszhen/alert/ScalaBundle.scala | Scala | apache-2.0 | 402 |
/*
* Copyright (C) 2013 Bernhard Berger
*
* Based on pegdown (C) 2010-2011 Mathias Doenitz and
* peg-markdown (C) 2008-2010 John MacFarlane.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package info.hocdoc.markdown
import scala.util.Random
/**
* Holds simple HTML encoding logic.
* Maybe despites it's name now not so fast as the Java FastEncoder.
*/
object FastEncoder {
def encode(string: String): String = string.map(c => encode(c)).mkString
def encode(c: Char): String = c match {
case '&' => "&"
case '<' => "<"
case '>' => ">"
case '"' => """
case '\'' => "'"
case _ => c.toString
}
private val random = new Random(0x2626);
def obfuscate(email: String): String = {
email.map(c => random.nextInt(5) match {
case 0 | 1 => "&#" + c.toInt.toString + ";"
case 2 | 3 => "&#x" + c.toInt.toHexString + ";"
case _ => encode(c)
}).mkString
}
} | Hocdoc/pegdownScala | src/main/scala/info/hocdoc/markdown/FastEncoder.scala | Scala | apache-2.0 | 1,465 |
package chandu0101.scalajs.react.components
package materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import scala.scalajs.js
import scala.scalajs.js.`|`
/**
* This file is generated - submit issues instead of PR against it
*/
class Ampm_24hr(val value: String) extends AnyVal
object Ampm_24hr {
val ampm = new Ampm_24hr("ampm")
val _24hr = new Ampm_24hr("24hr")
val values = List(ampm, _24hr)
}
class BeforeAfter(val value: String) extends AnyVal
object BeforeAfter {
val before = new BeforeAfter("before")
val after = new BeforeAfter("after")
val values = List(before, after)
}
class DeterminateIndeterminate(val value: String) extends AnyVal
object DeterminateIndeterminate {
val determinate = new DeterminateIndeterminate("determinate")
val indeterminate = new DeterminateIndeterminate("indeterminate")
val values = List(determinate, indeterminate)
}
class DialogInline(val value: String) extends AnyVal
object DialogInline {
val dialog = new DialogInline("dialog")
val inline = new DialogInline("inline")
val values = List(dialog, inline)
}
class LeftRight(val value: String) extends AnyVal
object LeftRight {
val left = new LeftRight("left")
val right = new LeftRight("right")
val values = List(left, right)
}
class LeftRightCenter(val value: String) extends AnyVal
object LeftRightCenter {
val left = new LeftRightCenter("left")
val right = new LeftRightCenter("right")
val center = new LeftRightCenter("center")
val values = List(left, right, center)
}
class NoneFocusedKeyboard_focused(val value: String) extends AnyVal
object NoneFocusedKeyboard_focused {
val none = new NoneFocusedKeyboard_focused("none")
val focused = new NoneFocusedKeyboard_focused("focused")
val keyboard_focused = new NoneFocusedKeyboard_focused("keyboard-focused")
val values = List(none, focused, keyboard_focused)
}
class PortraitLandscape(val value: String) extends AnyVal
object PortraitLandscape {
val portrait = new PortraitLandscape("portrait")
val landscape = new PortraitLandscape("landscape")
val values = List(portrait, landscape)
}
class ReadyLoadingHide(val value: String) extends AnyVal
object ReadyLoadingHide {
val ready = new ReadyLoadingHide("ready")
val loading = new ReadyLoadingHide("loading")
val hide = new ReadyLoadingHide("hide")
val values = List(ready, loading, hide)
}
class TopBottom(val value: String) extends AnyVal
object TopBottom {
val top = new TopBottom("top")
val bottom = new TopBottom("bottom")
val values = List(top, bottom)
}
class _1_2(val value: String) extends AnyVal
object _1_2 {
val _1 = new _1_2("1")
val _2 = new _1_2("2")
val values = List(_1, _2)
}
| elacin/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/materialui/gen-types.scala | Scala | apache-2.0 | 2,718 |
package ru.primetalk.typed.expressions
import org.scalatest.FunSuite
import ru.primetalk.language.russian.RussianNumerals
import ru.primetalk.language.russian.morphology.LemmaGrammarCategory
/**
* @author zhizhelev, 21.10.14.
*/
class ParserTests extends FunSuite {
trait MyParsers extends Parsers {
override type Lemma = LemmaGrammarCategory.Lemma
override def wordToLemma(word: String) = RussianNumerals.toLemma(word)
override def lemmasForNumber(n: Long): LemmaStream =
Seq(LemmaGrammarCategory.NumericalLemma(n))
val p: Parser[Long]
lazy val parser: SimpleParser[Long] = p
def parse(text: String): Long = parser(text)
}
test("simple direct mapping") {
new MyParsers {
val p = backTrackingParser(`[0]`)
val res = p(Seq("ноль").map(wordToLemma))
assert(res === Success(0L, Seq()))
}
}
test("Small numbers") {
new MyParsers {
val p = backTrackingParser(`[1..19]`)
val res = parse("пять")
assert(res === 5L)
}
}
test("two digit numbers") {
new MyParsers {
val p = backTrackingParser(`[1..99]`)
assert(parse("двадцать семь") === 27L)
assert(parse("тридцать") === 30L)
}
}
test("thousands") {
new MyParsers {
val p = backTrackingParser(`[1..999 999]`)
assert(parse("двадцать семь тысяч двести одиннадцать") === 27211L)
assert(parse("одна тысяча") === 1000L)
}
}
test("millions") {
new MyParsers {
val p = backTrackingParser(range1To999Order(1e6.toLong))
assert(parse("двадцать семь миллионов три тысячи двести сорок пять") === 27003245L)
}
}
case class A[T](value: T)
case class B[T](value: T)
case class MyPair[T <: Int](a: A[T], b: B[T])
def swap[T](p: Any): (A[Int], B[Int]) =
p match {
case MyPair(a, b) =>
(A(b.value), B(a.value))
}
test("case ") {
val x = MyPair[Int](A(1), B(2))
val y = swap(x)
assert(y._1.value === 2)
}
}
| Primetalk/synapse-expressions | synapse-typed-expressions/src/test/scala/ru/primetalk/typed/expressions/ParserTests.scala | Scala | bsd-2-clause | 2,100 |
package x7c1.wheat.splicer.core
trait StringLike[A]{
def from(x: A): String
}
object StringLike {
implicit object str extends StringLike[String]{
override def from(x: String): String = x
}
}
| x7c1/Wheat | wheat-splicer/src/test/scala/x7c1/wheat/splicer/core/StringLike.scala | Scala | mit | 203 |
package coursier.cli.launch
import caseapp.{ExtraName => Short, HelpMessage => Help, ValueDescription => Value, _}
import coursier.cli.install.SharedChannelOptions
import coursier.cli.jvm.SharedJavaOptions
import coursier.cli.options.SharedLaunchOptions
import coursier.install.RawAppDescriptor
// format: off
@ArgsName("org:name:version|app-name[:version]*")
final case class LaunchOptions(
@Recurse
sharedOptions: SharedLaunchOptions = SharedLaunchOptions(),
@Recurse
sharedJavaOptions: SharedJavaOptions = SharedJavaOptions(),
@Recurse
channelOptions: SharedChannelOptions = SharedChannelOptions(),
@Help("Add Java command-line options, when forking")
@Value("option")
javaOpt: List[String] = Nil,
fetchCacheIKnowWhatImDoing: Option[String] = None,
@Help("Launch child application via execve (replaces the coursier process)")
execve: Option[Boolean] = None,
json: Boolean = false, // move to SharedLaunchOptions? (and handle it from the other commands too)
jep: Boolean = false
) {
// format: on
def addApp(app: RawAppDescriptor): LaunchOptions =
copy(
sharedOptions = sharedOptions.addApp(app)
)
def app: RawAppDescriptor =
sharedOptions.app
}
object LaunchOptions {
implicit val parser = Parser[LaunchOptions]
implicit val help = caseapp.core.help.Help[LaunchOptions]
}
| alexarchambault/coursier | modules/cli/src/main/scala/coursier/cli/launch/LaunchOptions.scala | Scala | apache-2.0 | 1,356 |
/**
* @author Yuuto
*/
package yuuto.enhancedinventories.client.renderer
import java.awt.Color
import net.minecraft.block.Block
import net.minecraft.client.model.ModelChest
import net.minecraft.client.model.ModelLargeChest
import net.minecraft.client.renderer.Tessellator
import net.minecraft.client.renderer.texture.TextureMap
import net.minecraft.client.renderer.tileentity.TileEntitySpecialRenderer
import net.minecraft.tileentity.TileEntity
import net.minecraft.util.IIcon
import net.minecraft.util.ResourceLocation
import net.minecraftforge.common.util.ForgeDirection
import org.lwjgl.opengl.GL11
import org.lwjgl.opengl.GL12
import yuuto.enhancedinventories.client.renderer.model.ModelImprovedChestCore
import yuuto.enhancedinventories.client.renderer.model.ModelImprovedChestDoubleCore
import yuuto.enhancedinventories.materials.FrameMaterial
import yuuto.enhancedinventories.materials.FrameMaterials
import yuuto.enhancedinventories.tile.TileImprovedChest;
import yuuto.enhancedinventories.ref.ReferenceEI
object TileImprovedChestRenderer{
final val instance:TileImprovedChestRenderer = new TileImprovedChestRenderer();
}
class TileImprovedChestRenderer extends TileEntitySpecialRenderer{
//Textures
val woolTexture:ResourceLocation = new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/normalChestWool.png");
val woolTexture2:ResourceLocation = new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/doubleChestWool.png");
val frameTextures:Array[ResourceLocation] = Array(
new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/normalChestFrame.png"),
new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/normalChestFrameStone.png"),
new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/normalChestFrameObsidian.png")
);
val frameTexturesDouble:Array[ResourceLocation] = Array(
new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/doubleChestFrame.png"),
new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/doubleChestFrameStone.png"),
new ResourceLocation(ReferenceEI.MOD_ID.toLowerCase(), "textures/uvs/doubleChestFrameObsidian.png")
);
//Models
val modelSingleCore:ModelImprovedChestCore = new ModelImprovedChestCore();
val modelDoubleCore:ModelImprovedChestDoubleCore = new ModelImprovedChestDoubleCore();
val modelChest:ModelChest = new ModelChest();
val modelDoubleChest:ModelLargeChest = new ModelLargeChest();
//Icons
val icons:Array[IIcon] = new Array[IIcon](6);
override def renderTileEntityAt(tile:TileEntity, x:Double,
y:Double, z:Double, partialTick:Float) {
if(tile == null || !tile.isInstanceOf[TileImprovedChest])
return;
val chest:TileImprovedChest = tile.asInstanceOf[TileImprovedChest];
if(chest.decor == null)
return;
GL11.glPushMatrix();
//finds rotation
var angle:Float = 0;
chest.facing match{
case ForgeDirection.SOUTH=>angle = 180f;
case ForgeDirection.WEST=>angle = 90f;
case ForgeDirection.EAST=>angle = -90f;
case default=>{}
}
//finds door rotaion
var f1:Float = chest.prevLidAngle + (chest.lidAngle - chest.prevLidAngle) * partialTick;
f1 = 1.0F - f1;
f1 = 1.0F - f1 * f1 * f1;
f1 = (f1 * Math.PI.asInstanceOf[Float]/ 2.0F);
//Set Gl11 state
GL11.glTranslated(x+0.5, y+0.5, z+0.5);
GL11.glRotatef(angle, 0, 1, 0);
GL11.glTranslated(-0.5, -0.5, -0.5);
//render connected
if(chest.isConnected()){
if(chest.isMain()){
val x1:Int = if(chest.facing == ForgeDirection.EAST || chest.facing == ForgeDirection.NORTH){1}else{0};
renderCore(chest, x1, 0, 0, partialTick, f1, true);
renderFrame(chest, x1, 0, 0, partialTick, f1, true);
renderWool(chest, x1, 0, 0, partialTick, f1, true);
}
//render single
}else{
renderCore(chest, 0, 0, 0, partialTick, f1, false);
renderFrame(chest, 0, 0, 0, partialTick, f1, false);
renderWool(chest, 0, 0, 0, partialTick, f1, false);
}
GL11.glPopMatrix();
}
def renderCore(inv:TileImprovedChest, x:Double,
y:Double, z:Double, partialTick:Float, doorAngle:Float, doubleChest:Boolean){
GL11.glPushMatrix();
GL11.glEnable(GL12.GL_RESCALE_NORMAL);
GL11.glColor4f(1, 1, 1, 1);
GL11.glTranslated(x, y, z);
//Render core block
val t:Tessellator = Tessellator.instance;
if(inv.decor.coreBlock != null){
val b:Block = inv.decor.coreBlock;
val meta:Int = inv.decor.coreMetadata;
bindTexture(TextureMap.locationBlocksTexture);
for(i <-0 until icons.length){
icons(i) = b.getIcon(i, meta);
}
if(doubleChest){
modelDoubleCore.rotaionAngle = doorAngle;
modelDoubleCore.lightValue = b.getMixedBrightnessForBlock(inv.getWorldObj(), inv.xCoord, inv.yCoord, inv.zCoord);
modelDoubleCore.makeChestCoreList(icons, t);
}else{
modelSingleCore.rotaionAngle = doorAngle;
modelSingleCore.lightValue = b.getMixedBrightnessForBlock(inv.getWorldObj(), inv.xCoord, inv.yCoord, inv.zCoord);
modelSingleCore.drawChestCore(icons, t);
}
}
GL11.glPopMatrix();
GL11.glDisable(GL12.GL_RESCALE_NORMAL);
GL11.glColor4f(1, 1, 1, 1);
}
def renderFrame(inv:TileImprovedChest, x:Double,
y:Double, z:Double, partialTick:Float, doorAngle:Float, doubleChest:Boolean){
//Get frame material and color
var mat:FrameMaterial = inv.decor.frameMaterial;
if(inv.isReinforced() && !inv.isPainted())
mat = FrameMaterials.Obsidian;
if(mat == null)
mat = FrameMaterials.Stone;
val c:Color = mat.color();
GL11.glPushMatrix();
GL11.glEnable(GL12.GL_RESCALE_NORMAL);
GL11.glTranslated(x+1, y+1, z);
GL11.glColor4f(c.getRed()/255f, c.getGreen()/255f, c.getBlue()/255f, c.getAlpha()/255f);
//render chest frame
if(doubleChest){
GL11.glScalef(-1, -1, 1);
this.modelDoubleChest.chestLid.rotateAngleX = -doorAngle;
this.bindTexture(frameTexturesDouble(mat.getTextureIndex()));
this.modelDoubleChest.renderAll();
}else{
GL11.glRotatef(180, 0, 0, 1);
this.modelChest.chestLid.rotateAngleX = -doorAngle;//-(doorAngle * (float)Math.PI / 2.0F);
this.bindTexture(frameTextures(mat.getTextureIndex()));
this.modelChest.renderAll();
}
GL11.glPopMatrix();
GL11.glDisable(GL12.GL_RESCALE_NORMAL);
GL11.glColor4f(1, 1, 1, 1);
}
def renderWool(inv:TileImprovedChest, x:Double,
y:Double, z:Double, partialTick:Float, doorAngle:Float, doubleChest:Boolean){
val c:Color = inv.decor.decColor.getColor();
GL11.glPushMatrix();
GL11.glEnable(GL12.GL_RESCALE_NORMAL);
GL11.glColor4f(c.getRed()/255f, c.getGreen()/255f, c.getBlue()/255f, c.getAlpha()/255f);
//Render chest wool
if(doubleChest){
GL11.glTranslated(x+1, y+1, z);
GL11.glScalef(-1, -1, 1);
this.bindTexture(woolTexture2);
this.modelDoubleChest.chestLid.rotateAngleX = -doorAngle;//-(f1 * (float)Math.PI / 2.0F);
this.modelDoubleChest.renderAll();
}else{
GL11.glTranslated(x+1, y+1, z);
GL11.glRotatef(180, 0, 0, 1);
this.bindTexture(woolTexture);
this.modelChest.chestLid.rotateAngleX = -doorAngle;//-(f1 * (float)Math.PI / 2.0F);
this.modelChest.renderAll();
}
GL11.glPopMatrix();
GL11.glDisable(GL12.GL_RESCALE_NORMAL);
GL11.glColor4f(1, 1, 1, 1);
}
} | AnimeniacYuuto/EnhancedInventories | src/main/scala/yuuto/enhancedinventories/client/renderer/TileImprovedChestRenderer.scala | Scala | gpl-2.0 | 7,548 |
package com.datastax.spark.connector.rdd
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.rdd.ClusteringOrder.{Ascending, Descending}
import com.datastax.spark.connector.rdd.reader._
import com.datastax.spark.connector.types.TypeConverter
import com.datastax.spark.connector.util.ConfigCheck
import com.datastax.spark.connector.{ColumnSelector, SomeColumns, _}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Dependency, SparkContext}
import scala.language.existentials
import scala.reflect.ClassTag
abstract class CassandraRDD[R : ClassTag](
sc: SparkContext,
dep: Seq[Dependency[_]])
extends RDD[R](sc, dep) {
/** This is slightly different than Scala this.type.
* this.type is the unique singleton type of an object which is not compatible with other
* instances of the same type, so returning anything other than `this` is not really possible
* without lying to the compiler by explicit casts.
* Here SelfType is used to return a copy of the object - a different instance of the same type */
type Self <: CassandraRDD[R]
ConfigCheck.checkConfig(sc.getConf)
protected def keyspaceName: String
protected def tableName: String
protected def columnNames: ColumnSelector
protected def where: CqlWhereClause
protected def readConf: ReadConf
protected def limit: Option[Long]
require(limit.isEmpty || limit.get > 0, "Limit must be greater than 0")
protected def clusteringOrder: Option[ClusteringOrder]
protected def connector: CassandraConnector
def toEmptyCassandraRDD: EmptyCassandraRDD[R]
/** Allows to copy this RDD with changing some of the properties */
protected def copy(
columnNames: ColumnSelector = columnNames,
where: CqlWhereClause = where,
limit: Option[Long] = limit,
clusteringOrder: Option[ClusteringOrder] = None,
readConf: ReadConf = readConf,
connector: CassandraConnector = connector): Self
/** Allows to set custom read configuration, e.g. consistency level or fetch size. */
def withReadConf(readConf: ReadConf): Self =
copy(readConf = readConf)
/** Returns a copy of this Cassandra RDD with specified connector */
def withConnector(connector: CassandraConnector): Self = {
copy(connector = connector)
}
/** Adds a CQL `WHERE` predicate(s) to the query.
* Useful for leveraging secondary indexes in Cassandra.
* Implicitly adds an `ALLOW FILTERING` clause to the WHERE clause,
* however beware that some predicates might be rejected by Cassandra,
* particularly in cases when they filter on an unindexed, non-clustering column. */
def where(cql: String, values: Any*): Self = {
copy(where = where and CqlWhereClause(Seq(cql), values))
}
/** Narrows down the selected set of columns.
* Use this for better performance, when you don't need all the columns in the result RDD.
* When called multiple times, it selects the subset of the already selected columns, so
* after a column was removed by the previous `select` call, it is not possible to
* add it back.
*
* The selected columns are [[ColumnRef]] instances. This type allows to specify columns for
* straightforward retrieval and to read TTL or write time of regular columns as well. Implicit
* conversions included in [[com.datastax.spark.connector]] package make it possible to provide
* just column names (which is also backward compatible) and optional add `.ttl` or `.writeTime`
* suffix in order to create an appropriate [[ColumnRef]] instance.
*/
def select(columns: ColumnRef*): Self = {
copy(columnNames = SomeColumns(narrowColumnSelection(columns): _*))
}
/** Adds the limit clause to CQL select statement. The limit will be applied for each created
* Spark partition. In other words, unless the data are fetched from a single Cassandra partition
* the number of results is unpredictable.
*
* The main purpose of passing limit clause is to fetch top n rows from a single Cassandra
* partition when the table is designed so that it uses clustering keys and a partition key
* predicate is passed to the where clause. */
def limit(rowLimit: Long): Self = {
copy(limit = Some(rowLimit))
}
/** Adds a CQL `ORDER BY` clause to the query.
* It can be applied only in case there are clustering columns and primary key predicate is
* pushed down in `where`.
* It is useful when the default direction of ordering rows within a single Cassandra partition
* needs to be changed. */
def clusteringOrder(order: ClusteringOrder): Self = {
copy(clusteringOrder = Some(order))
}
def withAscOrder: Self = clusteringOrder(Ascending)
def withDescOrder: Self = clusteringOrder(Descending)
override def take(num: Int): Array[R] = {
limit match {
case Some(_) => super.take(num)
case None => limit(num).take(num)
}
}
protected def narrowColumnSelection(columns: Seq[ColumnRef]): Seq[ColumnRef]
// Needed to be public for JavaAPI
val selectedColumnRefs: Seq[ColumnRef]
def selectedColumnNames: Seq[String] = selectedColumnRefs.map(_.cqlValueName)
// convertTo must be implemented for classes which wish to support `.as`
protected def convertTo[B : ClassTag : RowReaderFactory]: CassandraRDD[B] =
throw new NotImplementedError(s"convertTo not implemented for this class")
/** Maps each row into object of a different type using provided function taking column
* value(s) as argument(s). Can be used to convert each row to a tuple or a case class object:
* {{{
* sc.cassandraTable("ks", "table")
* .select("column1")
* .as((s: String) => s) // yields CassandraRDD[String]
*
* sc.cassandraTable("ks", "table")
* .select("column1", "column2")
* .as((_: String, _: Long)) // yields CassandraRDD[(String, Long)]
*
* case class MyRow(key: String, value: Long)
* sc.cassandraTable("ks", "table")
* .select("column1", "column2")
* .as(MyRow) // yields CassandraRDD[MyRow]
* }}} */
def as[B: ClassTag, A0: TypeConverter](f: A0 => B): CassandraRDD[B] = {
implicit val ft = new FunctionBasedRowReader1(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter](f: (A0, A1) => B): CassandraRDD[B] = {
implicit val ft = new FunctionBasedRowReader2(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter]
(f: (A0, A1, A2) => B): CassandraRDD[B] = {
implicit val ft = new FunctionBasedRowReader3(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter,
A3: TypeConverter](f: (A0, A1, A2, A3) => B) = {
implicit val ft = new FunctionBasedRowReader4(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter](f: (A0, A1, A2, A3, A4) => B) = {
implicit val ft = new FunctionBasedRowReader5(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter](f: (A0, A1, A2, A3, A4, A5) => B) = {
implicit val ft = new FunctionBasedRowReader6(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter, A6: TypeConverter](f: (A0, A1, A2, A3, A4, A5, A6) => B) = {
implicit val ft = new FunctionBasedRowReader7(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter, A6: TypeConverter,
A7: TypeConverter](f: (A0, A1, A2, A3, A4, A5, A6, A7) => B) = {
implicit val ft = new FunctionBasedRowReader8(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter, A6: TypeConverter, A7: TypeConverter,
A8: TypeConverter](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8) => B) = {
implicit val ft = new FunctionBasedRowReader9(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter, A6: TypeConverter, A7: TypeConverter,
A8: TypeConverter, A9: TypeConverter](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9) => B) = {
implicit val ft = new FunctionBasedRowReader10(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter, A6: TypeConverter, A7: TypeConverter, A8: TypeConverter,
A9: TypeConverter, A10: TypeConverter](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10) => B) = {
implicit val ft = new FunctionBasedRowReader11(f)
convertTo[B]
}
def as[B: ClassTag, A0: TypeConverter, A1: TypeConverter, A2: TypeConverter, A3: TypeConverter,
A4: TypeConverter, A5: TypeConverter, A6: TypeConverter, A7: TypeConverter, A8: TypeConverter,
A9: TypeConverter, A10: TypeConverter, A11: TypeConverter](
f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11) => B) = {
implicit val ft = new FunctionBasedRowReader12(f)
convertTo[B]
}
}
object CassandraRDD {
def apply[T](sc: SparkContext, keyspaceName: String, tableName: String)
(implicit ct: ClassTag[T], rrf: RowReaderFactory[T]): CassandraRDD[T] =
new CassandraTableScanRDD[T](
sc,
CassandraConnector(sc.getConf),
keyspaceName,
tableName,
AllColumns,
CqlWhereClause.empty,
None,
None,
ReadConf.fromSparkConf(sc.getConf)
)
def apply[K, V](sc: SparkContext, keyspaceName: String, tableName: String)
(implicit keyCT: ClassTag[K], valueCT: ClassTag[V], rrf: RowReaderFactory[(K, V)]): CassandraRDD[(K, V)] =
new CassandraTableScanRDD[(K, V)](
sc,
CassandraConnector(sc.getConf),
keyspaceName,
tableName,
AllColumns,
CqlWhereClause.empty,
None,
None,
ReadConf.fromSparkConf(sc.getConf)
)
}
| chbatey/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/CassandraRDD.scala | Scala | apache-2.0 | 10,232 |
package com.github.aalbul.irc.domain
import java.util.UUID
import org.pircbotx.Channel
/**
* Created by nuru on 04.01.14.
*/
case class IrcChannel(id: UUID, name: String, mode: String, topic: String, topicTimestamp: Long, createdTimestamp: Long,
topicSetter: String, moderated: Boolean, noExternalMessages: Boolean, inviteOnly: Boolean,
secret: Boolean, channelPrivate: Boolean, topicProtection: Boolean, channelLimit: Int,
channelKey: String)
object IrcChannel {
def apply(channel: Channel): IrcChannel = new IrcChannel(
channel.getChannelId, channel.getName, channel.getMode, channel.getTopic, channel.getTopicTimestamp,
channel.getCreateTimestamp, channel.getTopicSetter, channel.isModerated, channel.isNoExternalMessages,
channel.isInviteOnly, channel.isSecret, channel.isChannelPrivate, channel.hasTopicProtection,
channel.getChannelLimit, channel.getChannelKey
)
} | aalbul/reactive-irc | src/main/scala/com/github/aalbul/irc/domain/IrcChannel.scala | Scala | gpl-3.0 | 961 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.util
import scala.language.implicitConversions
object OptionConversion {
implicit def someWrapper[A](noSome : A) : Option[A] = Option(noSome)
}
| eric-haibin-lin/mxnet | scala-package/core/src/main/scala/org/apache/mxnet/util/OptionConversion.scala | Scala | apache-2.0 | 975 |
package controllers
import akka.pattern.ask
import model.command.ShortenUrl
import model.event._
import model.query._
import play.api.mvc._
import play.api.libs.json.Json
import play.api.Logger
import plugins.{ DomainAsker, Domain }
import scala.concurrent.Future
abstract class ShortUrl extends Controller with PlayLogging with DomainAsker {
def shorten = Action.async(parse.json) { implicit request =>
(request.body \\ "target").validate[String].fold(
errors => Future.successful(jsonErrors2BadRequest(errors, log)),
target => (domain.shortUrlRegistry ? ShortenUrl(target)) map {
case ShortUrlCreated(shortUrl) => Ok(controllers.routes.ShortUrl.resolve(shortUrl.token).absoluteURL(sslEnabled))
case m =>
log.debug(s"Unable to create a short url for $target. Domain returned $m")
BadRequest
}
)
}
def resolve(token: String) = Action.async { request =>
(domain.shortUrlRegistry ? ResolveToken(token)) map {
case ShortUrlNotFound => NotFound
case ShortUrlFound(shortUrl) => TemporaryRedirect(shortUrl.target)
case m =>
log.debug(s"Unable to resolve a target url for $token. Domain returned $m")
BadRequest
}
}
def stats(token: String) = Action.async { request =>
(domain.shortUrlRegistry ? ReadTokenStats(token)) map {
case UrlStatNotFound => NotFound
case UrlStatFound(accessCount) =>
Ok(Json.obj("accessCount" -> accessCount))
case m =>
log.debug(s"Unable to resolve a target url for $token. Domain returned $m")
BadRequest
}
}
}
object ShortUrl extends ShortUrl() {
import play.api.Play.current
import play.api.PlayException
import play.api
override def domain = current.plugin[Domain].getOrElse(throw new PlayException("Domain plugin is not initialized", "Make sure the Domain plugin is defined in conf/play.plugin and is enabled in application.conf"))
override def log = Logger("application.controllers.ShortUrl")
override protected implicit def app: api.Application = current
} | jeantil/courtly | app/controllers/ShortUrl.scala | Scala | apache-2.0 | 2,070 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops
import org.platanios.tensorflow.api._
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.junit.JUnitSuite
import org.junit.Test
/**
* @author Emmanouil Antonios Platanios
*/
class NNSpec extends JUnitSuite with Matchers {
@Test def testLogSoftmax(): Unit = {
val tensor = Tensor(Tensor(Tensor(2, 3), Tensor(0, 0), Tensor(5, 7)),
Tensor(Tensor(1, 23), Tensor(4, -5), Tensor(7, 9)),
Tensor(Tensor(56, 1), Tensor(-2, -4), Tensor(-7, -9)))
val constant = tf.constant(tensor).toFloat
val logSoftmaxLastAxis = tf.logSoftmax(constant, axis = -1)
val logSoftmaxPenultimateAxis = tf.logSoftmax(constant, axis = 1)
val session = Session()
assertApproximatelyEqual(
session.run(fetches = logSoftmaxLastAxis).toArray,
Array(
-1.3132616f, -0.31326163f, -0.6931472f, -0.6931472f, -2.126928f, -0.12692805f,
-22.0f, 0.0f, -1.23374e-4f, -9.000123f, -2.126928f, -0.12692805f, 0.0f, -55.0f,
-0.12692805f, -2.126928f, -0.12692805f, -2.126928f,
),
)
assertApproximatelyEqual(
session.run(fetches = logSoftmaxPenultimateAxis).toArray,
Array(
-3.0549853f, -4.019045f, -5.054985f, -7.019045f, -0.054985214f, -0.019044992f,
-6.0509458f, -8.344647e-7f, -3.0509458f, -28.0f, -0.05094571f, -14.000001f, 0.0f,
-0.0067604627f, -58.0f, -5.0067606f, -63.0f, -10.006761f,
),
)
}
def assertApproximatelyEqual(x: Array[Float], y: Array[Float]): Unit = {
x.zip(y).foreach { case (xElement, yElement) =>
assert(xElement === yElement +- 1e-6f)
}
}
}
| eaplatanios/tensorflow_scala | modules/api/src/test/scala/org/platanios/tensorflow/api/ops/NNSpec.scala | Scala | apache-2.0 | 2,269 |
/**
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.dispatch
import akka.actor.{ Actor, ActorRef }
import akka.actor.newUuid
import akka.config.Config._
import akka.util.{ Duration, ReflectiveAccess }
import akka.config.Configuration
import java.util.concurrent.TimeUnit
/**
* Scala API. Dispatcher factory.
* <p/>
* Example usage:
* <pre/>
* val dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("name")
* dispatcher
* .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
* .setCorePoolSize(16)
* .setMaxPoolSize(128)
* .setKeepAliveTimeInMillis(60000)
* .setRejectionPolicy(new CallerRunsPolicy)
* .build
* </pre>
* <p/>
* Java API. Dispatcher factory.
* <p/>
* Example usage:
* <pre/>
* MessageDispatcher dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("name");
* dispatcher
* .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
* .setCorePoolSize(16)
* .setMaxPoolSize(128)
* .setKeepAliveTimeInMillis(60000)
* .setRejectionPolicy(new CallerRunsPolicy())
* .build();
* </pre>
* <p/>
*
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
*/
object Dispatchers {
val THROUGHPUT = config.getInt("akka.actor.throughput", 5)
val DEFAULT_SHUTDOWN_TIMEOUT = config.getLong("akka.actor.dispatcher-shutdown-timeout").
map(time => Duration(time, TIME_UNIT)).
getOrElse(Duration(1000, TimeUnit.MILLISECONDS))
val MAILBOX_CAPACITY = config.getInt("akka.actor.default-dispatcher.mailbox-capacity", -1)
val MAILBOX_PUSH_TIME_OUT = Duration(config.getInt("akka.actor.default-dispatcher.mailbox-push-timeout-time", 10), TIME_UNIT)
val THROUGHPUT_DEADLINE_TIME = Duration(config.getInt("akka.actor.throughput-deadline-time", -1), TIME_UNIT)
val THROUGHPUT_DEADLINE_TIME_MILLIS = THROUGHPUT_DEADLINE_TIME.toMillis.toInt
val MAILBOX_TYPE: MailboxType = if (MAILBOX_CAPACITY < 1) UnboundedMailbox() else BoundedMailbox()
lazy val defaultGlobalDispatcher = {
config.getSection("akka.actor.default-dispatcher").flatMap(from).getOrElse(globalExecutorBasedEventDrivenDispatcher)
}
object globalExecutorBasedEventDrivenDispatcher extends ExecutorBasedEventDrivenDispatcher("global", THROUGHPUT, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE)
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
* Uses the default timeout
* <p/>
* E.g. each actor consumes its own thread.
*/
def newThreadBasedDispatcher(actor: ActorRef) = new ThreadBasedDispatcher(actor)
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
* Uses the default timeout
* If capacity is negative, it's Integer.MAX_VALUE
* <p/>
* E.g. each actor consumes its own thread.
*/
def newThreadBasedDispatcher(actor: ActorRef, mailboxCapacity: Int) = new ThreadBasedDispatcher(actor, mailboxCapacity)
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
* If capacity is negative, it's Integer.MAX_VALUE
* <p/>
* E.g. each actor consumes its own thread.
*/
def newThreadBasedDispatcher(actor: ActorRef, mailboxCapacity: Int, pushTimeOut: Duration) =
new ThreadBasedDispatcher(actor, mailboxCapacity, pushTimeOut)
/**
* Creates an executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenDispatcher(name: String) =
ThreadPoolConfigDispatcherBuilder(config => new ExecutorBasedEventDrivenDispatcher(name, config), ThreadPoolConfig())
/**
* Creates an executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config =>
new ExecutorBasedEventDrivenDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType, config), ThreadPoolConfig())
/**
* Creates an executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config =>
new ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineMs, mailboxType, config), ThreadPoolConfig())
/**
* Creates an executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String) =
ThreadPoolConfigDispatcherBuilder(config => new ExecutorBasedEventDrivenWorkStealingDispatcher(name, config), ThreadPoolConfig())
/**
* Creates an executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, throughput: Int) =
ThreadPoolConfigDispatcherBuilder(config =>
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE, config), ThreadPoolConfig())
/**
* Creates an executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config =>
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType, config), ThreadPoolConfig())
/**
* Creates an executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config =>
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, throughput, throughputDeadlineMs, mailboxType, config), ThreadPoolConfig())
/**
* Utility function that tries to load the specified dispatcher config from the akka.conf
* or else use the supplied default dispatcher
*/
def fromConfig(key: String, default: => MessageDispatcher = defaultGlobalDispatcher): MessageDispatcher =
config getSection key flatMap from getOrElse default
/*
* Creates of obtains a dispatcher from a ConfigMap according to the format below
*
* default-dispatcher {
* type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable
* # (ExecutorBasedEventDrivenWorkStealing), ExecutorBasedEventDriven,
* # GlobalExecutorBasedEventDriven
* # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor
* keep-alive-time = 60 # Keep alive time for threads
* core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
* max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
* executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
* allow-core-timeout = on # Allow core threads to time out
* rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
* throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher
* }
* ex: from(config.getConfigMap(identifier).get)
*
* Gotcha: Only configures the dispatcher if possible
* Returns: None if "type" isn't specified in the config
* Throws: IllegalArgumentException if the value of "type" is not valid
* IllegalArgumentException if it cannot
*/
def from(cfg: Configuration): Option[MessageDispatcher] = {
cfg.getString("type") map {
case "ExecutorBasedEventDriven" => new ExecutorBasedEventDrivenDispatcherConfigurator()
case "ExecutorBasedEventDrivenWorkStealing" => new ExecutorBasedEventDrivenWorkStealingDispatcherConfigurator()
case "GlobalExecutorBasedEventDriven" => GlobalExecutorBasedEventDrivenDispatcherConfigurator
case fqn =>
ReflectiveAccess.getClassFor[MessageDispatcherConfigurator](fqn) match {
case r: Right[_, Class[MessageDispatcherConfigurator]] =>
ReflectiveAccess.createInstance[MessageDispatcherConfigurator](r.b, Array[Class[_]](), Array[AnyRef]()) match {
case r: Right[Exception, MessageDispatcherConfigurator] => r.b
case l: Left[Exception, MessageDispatcherConfigurator] =>
throw new IllegalArgumentException("Cannot instantiate MessageDispatcherConfigurator type [%s], make sure it has a default no-args constructor" format fqn, l.a)
}
case l: Left[Exception, _] =>
throw new IllegalArgumentException("Unknown MessageDispatcherConfigurator type [%s]" format fqn, l.a)
}
} map {
_ configure cfg
}
}
}
object GlobalExecutorBasedEventDrivenDispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = Dispatchers.globalExecutorBasedEventDrivenDispatcher
}
class ExecutorBasedEventDrivenDispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = {
configureThreadPool(config, threadPoolConfig => new ExecutorBasedEventDrivenDispatcher(
config.getString("name", newUuid.toString),
config.getInt("throughput", Dispatchers.THROUGHPUT),
config.getInt("throughput-deadline-time", Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS),
mailboxType(config),
threadPoolConfig)).build
}
}
class ExecutorBasedEventDrivenWorkStealingDispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = {
configureThreadPool(config, threadPoolConfig => new ExecutorBasedEventDrivenWorkStealingDispatcher(
config.getString("name", newUuid.toString),
config.getInt("throughput", Dispatchers.THROUGHPUT),
config.getInt("throughput-deadline-time", Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS),
mailboxType(config),
threadPoolConfig)).build
}
}
| felixmulder/scala | test/disabled/presentation/akka/src/akka/dispatch/Dispatchers.scala | Scala | bsd-3-clause | 11,088 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
object DebounceRepeatedSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val o = Observable.now(1L).delayOnComplete(1.day)
.debounceRepeated(1.second)
.take(sourceCount)
val count = sourceCount
val sum = sourceCount
Sample(o, count, sum, 1.second, 1.second)
}
def observableInError(sourceCount: Int, ex: Throwable) = None
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None
override def cancelableObservables(): Seq[Sample] = {
val debouncePeriod = 1.second + 200.millis
val sample1 = Observable.interval(2.seconds).map(_ => 1L)
.debounceRepeated(debouncePeriod)
val sample2 = Observable.now(1L).delayOnComplete(1.minute)
.debounceRepeated(1.second)
Seq(
Sample(sample1, 0, 0, 0.seconds, 0.seconds),
Sample(sample1, 1, 1, debouncePeriod, 0.seconds),
Sample(sample1, 1, 1, 2.seconds, 0.seconds),
Sample(sample1, 2, 2, 2.seconds + debouncePeriod, 0.seconds),
Sample(sample2, 0, 0, 0.seconds, 0.seconds),
Sample(sample2, 1, 1, 1.seconds, 0.seconds)
)
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DebounceRepeatedSuite.scala | Scala | apache-2.0 | 1,901 |
package scaldi.util
object JvmTestUtil {
// It is not pretty at all, but if you know better way to get JVM shutdown hook count, I would be happy to use it :)
def shutdownHookCount = {
val hooksField = Class.forName("java.lang.ApplicationShutdownHooks").getDeclaredField("hooks")
if (!hooksField.isAccessible) hooksField.setAccessible(true)
hooksField.get(null).asInstanceOf[java.util.Map[_, _]].size
}
}
| Mironor/scaldi | src/test/scala/scaldi/util/JvmTestUtil.scala | Scala | apache-2.0 | 426 |
package cc.factorie.app.nlp.phrase
import cc.factorie.app.nlp._
import scala.util.parsing.combinator.{ImplicitConversions, Parsers}
import cc.factorie.app.nlp.pos.PennPosTag
import scala.util.parsing.input.{Reader, Position}
import java.util.GregorianCalendar
import scala.collection.mutable.ArrayBuffer
import cc.factorie.app.nlp.lemma.TokenLemma
/**
* Finds and parses all kinds of dates in a document, Basic formats were taken from http://en.wikipedia.org/wiki/Calendar_date.
* DeterministicTokenizer was used as tokenizer as basis for the implementation.
* Implementation is based on scalas Parsers Combinators
* @author Dirk Weissenborn
*/
object DatePhraseFinder extends DocumentAnnotator with Parsers with ImplicitConversions {
type Elem = Token
def prereqAttrs = List(classOf[TokenLemma])
def postAttrs: Iterable[Class[_]] = List()
implicit val err: (Elem) => String = _.lemmaString + "was unexpected!"
implicit def toInt(tokenAndInt: (Token, Int)) = tokenAndInt._2
implicit def toToken(tokenAndInt: (Token, Int)) = tokenAndInt._1
val monthToNr = "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec".toLowerCase.split("\\\\|").zipWithIndex.toMap.mapValues(_ + 1)
val weekDayNr = "Mon|Tue|Wed|Thu|Fri|Sat|Sun".toLowerCase.split("\\\\|").zipWithIndex.toMap
val nrToMonth = Array.ofDim[String](monthToNr.size);
monthToNr.foreach(el => nrToMonth(el._2 - 1) = el._1)
val nrToWeekDay = Array.ofDim[String](weekDayNr.size);
weekDayNr.foreach(el => nrToWeekDay(el._2) = el._1)
val dayOfMonthRegex = "([1-3]0|[0-3]?1(st)?|[0-2]?(2(nd)?|3(rd)?|[4-9](th)?))"
val monthAbbrRegex: String = "(Jan|Feb|Mar|Apr|Jun|Jul|Aug|Sep|Sept|Oct|Nov|Dec)\\\\.?"
val monthRegex = "(January|February|March|April|May|June|July|August|September|October|November|December)"
val monthAbbr: Parser[Token] = acceptIf(_.string.toLowerCase.matches(monthAbbrRegex.toLowerCase))(err)
val monthParser: Parser[Token] = acceptIf(_.string.toLowerCase.matches(monthRegex.toLowerCase))(err)
val monthNumber: Parser[Token] = "0?[1-12]"
val monthDayNumber: Parser[(Token, Int)] = hasString(dayOfMonthRegex) ^^ { case dayToken =>
(dayToken, dayToken.string.replaceAll("[sthrnd]+", "").toInt)
}
val weekDayAbbr: Parser[Token] = acceptIf(_.string.toLowerCase.matches("(Mon|Tue|Tues|Wed|Thu|Thurs|Fri)\\\\.?".toLowerCase))(err)
val weekDay: Parser[(Token, Int)] = (weekDayAbbr | acceptIf(_.string.toLowerCase.matches("Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday".toLowerCase))(err)) ^^ {
case weekDayToken => (weekDayToken, weekDayNr(weekDayToken.string.take(3).toLowerCase))
}
val temporalPreps: Parser[Token] = hasLemma("in|from|to|until|since")
val digits: Parser[(Token, Int)] = acceptIf(_.isDigits)(err) ^^ { case t => (t, t.string.toInt)}
val bcAd: Parser[Token] = "B\\\\.?C\\\\.?|A\\\\.?D\\\\.?"
val year: Parser[(Token, Int)] = bcAd.? ~ digits ~ bcAd.? ^^ { case bcAdOpt1 ~ y ~ bcAdOpt2 =>
val bcAdOpt = if (bcAdOpt1.isDefined) bcAdOpt1 else bcAdOpt2
(y._1,
if (bcAdOpt.isDefined)
if (bcAdOpt.exists(_.string.startsWith("B"))) -y
else y._2
else normalizeYear(y._2))
}
def hasLemma(lemmaRegex: String): Parser[Token] = acceptIf(_.lemmaString.matches(lemmaRegex))(err)
implicit def hasString(stringRegex: String): Parser[Token] = acceptIf(_.string.matches(stringRegex))(err)
val simpleSep = hasString("[\\\\-/]")
val yearOnly = (temporalPreps | hasLemma("year")) ~> year <~ acceptIf(!_.attr[PennPosTag].isNoun)(err) ^^ { case y => new DatePhrase(y._1, year = y)}
val onlyMonth: Parser[(Token, Int)] = (monthParser | monthAbbr) ^^ { case monthToken => (monthToken, monthToNr(monthToken.string.substring(0, 3).toLowerCase))}
val monthOnly: Parser[DatePhrase] = onlyMonth ^^ { case m => new DatePhrase(m._1, month = m._2)}
def l(t1: Token, t2: Token) = t2.positionInSection - t1.positionInSection + 1
// Real date parsers with Date as output
val monthYear = onlyMonth ~ hasLemma("of|,|/").? ~ year ^^ { case m ~ _ ~ y => new DatePhrase(m._1, length = l(m._1, y._1), month = m, year = y)}
val yearMonth = year ~ hasLemma(",").? ~ onlyMonth ^^ { case y ~ _ ~ m => new DatePhrase(y._1, length = l(y._1, m._1), month = m, year = y)}
def normalizeYear(y: Int) = {
if (y < 100)
if (y < 20) y + 2000
else y + 1900
else y
}
//DeterministicTokenizer adaptions
//2003-11-09
val yearMonthDayFromTokenNr = "(19|20)?[0-9]{2}[\\\\-/][0-3]?[0-9][\\\\-/][0-3]?[0-9]" ^^ { case ymdToken =>
val split = ymdToken.string.split("\\\\-|/")
val y = normalizeYear(split(0).toInt)
new DatePhrase(ymdToken, year = y, month = split(1).toInt, day = split(2).toInt)
}
//the US way
//11-29-2003
val monthDayYearFromTokenNr = "0?[1-12][\\\\-/][0-3]?[0-9][\\\\-/][0-9]{2,}" ^^ { case mdyToken =>
val split = mdyToken.string.split("\\\\-|/")
val y = normalizeYear(split(2).toInt)
new DatePhrase(mdyToken, year = y, month = split(0).toInt, day = split(1).toInt)
}
//2003 November 9, 2003-Nov-9, 2003-Nov-09, 2003-Nov-9, Sunday
val yearMonthDay = year ~ simpleSep.? ~ onlyMonth ~ simpleSep.? ~ monthDayNumber ~ ("," ~ weekDay).? ^^ { case y ~ _ ~ m ~ _ ~ d ~ wOpt =>
val w = wOpt.fold(-1)(_._2._2)
new DatePhrase(y._1, length = l(y._1, wOpt.fold(d._1)(_._2._1)), month = m, day = d, year = y, weekDay = w)
}
//due to tokenization of DeterministicTokenizer (factorie) a small adaption
val yearMonthDayAdap = year ~ "-".? ~ s"($monthRegex|$monthAbbrRegex)-?$dayOfMonthRegex" ~ ("," ~ weekDay).? ^^ { case y ~ _ ~ mdToken ~ wOpt =>
val w = wOpt.fold(-1)(_._2._2)
val split = if (mdToken.string.contains("-")) {
val Array(a, b) = mdToken.string.split("-"); (a, b)
} else mdToken.string.splitAt(mdToken.string.indexWhere(_.isDigit))
val m = monthToNr(split._1.substring(0, 3).toLowerCase)
val d = split._2.toInt
new DatePhrase(y._1, length = l(y._1, wOpt.fold(mdToken)(_._2._1)), month = m, day = d, year = y, weekDay = w)
}
//2003Nov9, 2003Nov09
val yearMonthDayString = year ~ hasString(monthAbbrRegex + dayOfMonthRegex) ^^ { case y ~ mdToken =>
val dStart = mdToken.string.indexWhere(_.isDigit)
val m = monthToNr(mdToken.string.substring(0, dStart).take(3).toLowerCase)
new DatePhrase(y._1, length = 2, year = y._2, month = m, day = mdToken.string.substring(dStart).toInt)
}
//Sunday, November 9, 2003; November 9, 2003; Nov. 9, 2003; November 9
val monthDayYear = (weekDay ~ ",".?).? ~ onlyMonth ~ monthDayNumber ~ (",".? ~ year).? ^^ {
case wdOpt ~ m ~ d ~ yOpt =>
val startToken = wdOpt.fold(m._1)(_._1._1)
val endToken = yOpt.fold(d._1)(_._2._1)
new DatePhrase(startToken, length = l(startToken, endToken), day = d, month = m, year = yOpt.fold(Int.MinValue)(_._2._2), weekDay = wdOpt.fold(-1)(_._1._2))
}
//08-Nov-2003, [The] 8th [of] November 2003, 08/Nov/2003, Sunday, 8 November 2003
val dayMonthYear = (weekDay ~ ",".?).? ~ "the".? ~ monthDayNumber ~ "of".? ~ onlyMonth ~ (",".? ~ year).? ^^ {
case wdOpt ~ _ ~ d ~ _ ~ m ~ yOpt =>
val startToken = wdOpt.fold(d._1)(_._1._1)
val endToken = yOpt.fold(m._1)(_._2._1)
new DatePhrase(startToken, length = l(startToken, endToken), day = d, month = m, year = yOpt.fold(Int.MinValue)(_._2._2), weekDay = wdOpt.fold(-1)(_._1._2))
}
//due to tokenization of DeterministicTokenizer (factorie) a small adaption
val dayMonthYearAdap = monthDayNumber ~ "-".? ~ s"($monthRegex|$monthAbbrRegex)-?[0-9]{2,}" ~ ("," ~ weekDay).? ^^ { case d ~ _ ~ myToken ~ wOpt =>
val w = wOpt.fold(-1)(_._2._2)
val split = if (myToken.string.contains("-")) {
val Array(a, b) = myToken.string.split("-"); (a, b)
} else myToken.string.splitAt(myToken.string.indexWhere(_.isDigit))
val m = monthToNr(split._1.substring(0, 3).toLowerCase)
val y = split._2.toInt
new DatePhrase(d._1, length = l(d._1, wOpt.fold(myToken)(_._2._1)), month = m, day = d._2, year = y, weekDay = w)
}
//9Nov2003
val dayMonthYearString = monthDayNumber ~ hasString(dayOfMonthRegex + monthAbbrRegex + "[0-9]{4}") ^^ { case d ~ myToken =>
val yStart = myToken.string.indexWhere(_.isDigit)
val y = normalizeYear(myToken.string.substring(yStart).toInt)
val m = monthToNr(myToken.string.substring(0, yStart).take(3).toLowerCase)
new DatePhrase(d, length = 2, year = y, month = m, day = d._2)
}
//order is important
val parser: Parser[DatePhrase] = dayMonthYear | monthDayYear | yearMonthDay | yearMonthDayAdap | yearMonthDayString | dayMonthYearAdap | dayMonthYearString | monthDayYearFromTokenNr | yearMonthDayFromTokenNr | yearOnly | monthOnly
def reader(ts: Iterable[Token]): scala.util.parsing.input.Reader[Token] = new scala.util.parsing.input.Reader[Token] {
override def first: Token = ts.head
override def atEnd: Boolean = ts.isEmpty
override def pos: Position = new Position {
override def column: Int = if (atEnd) Int.MaxValue else first.position
override def line: Int = 0
override protected def lineContents: String = ""
}
override def rest: Reader[Token] = if (atEnd) reader(ts) else reader(ts.tail)
}
def process(document: Document) = {
val mentions = parseAll(document.tokens)
document.attr += new DatePhraseList(mentions)
document
}
override def tokenAnnotationString(token: Token): String = token.document.attr[DatePhraseList].find(phrase => phrase.contains(token)).fold("")("Date: " + _.asInstanceOf[DatePhrase].toString())
/** A collection of Phrases that are noun phrases. Typically used as an attribute of a Section or a Document. */
class DatePhraseList(phrases: Iterable[DatePhrase]) extends PhraseList(phrases)
class DatePhrase(startToken: Token, length: Int = 1, val day: Int = -1, val month: Int = -1, val year: Int = Int.MinValue, val weekDay: Int = -1)
extends Phrase(startToken.section, startToken.positionInSection, length, 0) {
def toJavaDate: java.util.Date = new GregorianCalendar(year, month, day).getTime
override def toString(): String = {
var s = ""
if (weekDay >= 0) s += nrToWeekDay(weekDay) + ", "
if (day >= 0) s += day + " "
if (month >= 0) s += nrToMonth(month - 1) + " "
if (year >= 0) s += year
s.trim
}
}
def parseAll(tokens: Iterable[Token]) = {
var r = reader(tokens)
val mentions = ArrayBuffer[DatePhrase]()
while (r != null && !r.atEnd)
parser.apply(r) match {
case Success(s, rest) =>
mentions += s
r = rest
case Failure(_, rest) => r = rest.rest
case Error(_, rest) => r = rest.rest
}
mentions
}
}
| malcolmgreaves/factorie | src/main/scala/cc/factorie/app/nlp/phrase/DatePhraseFinder.scala | Scala | apache-2.0 | 10,667 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Aug 23 15:42:06 EDT 2015
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra
import reflect.ClassTag
import scalation.math.{Complex, Rational, Real}
import scalation.math.StrO.StrNum
import scalation.stat.{vectorC2StatVector, vectorD2StatVector, vectorI2StatVector,
vectorL2StatVector, vectorQ2StatVector, vectorR2StatVector}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Vec` trait establishes a common base type for all vectors (e.g., VectorD).
*/
trait Vec
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the size (number of elements) of the vector.
*/
def size: Int
} // Vec trait
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Vec` object provides a minimal set of functions that apply across all
* types of vectors.
* @see `scalation.relalgebra.Relation`
*/
object Vec
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the 'i'th element.
* @param i the index position
*/
def apply (x: Vec, i: Int): Any =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC] (i)
case _: VectorD => x.asInstanceOf [VectorD] (i)
case _: VectorI => x.asInstanceOf [VectorI] (i)
case _: VectorL => x.asInstanceOf [VectorL] (i)
case _: VectorQ => x.asInstanceOf [VectorQ] (i)
case _: VectorR => x.asInstanceOf [VectorR] (i)
case _: VectorS => x.asInstanceOf [VectorS] (i)
case _ => println ("apply: vector type not supported"); 0
} // match
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate vectors 'x' and 'y'.
* @param x the first vector
* @param y the second vector
*/
def ++ (x: Vec, y: Vec): Vec =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC] ++ y.asInstanceOf [VectorC]
case _: VectorD => x.asInstanceOf [VectorD] ++ y.asInstanceOf [VectorD]
case _: VectorI => x.asInstanceOf [VectorI] ++ y.asInstanceOf [VectorI]
case _: VectorL => x.asInstanceOf [VectorL] ++ y.asInstanceOf [VectorL]
case _: VectorQ => x.asInstanceOf [VectorQ] ++ y.asInstanceOf [VectorQ]
case _: VectorR => x.asInstanceOf [VectorR] ++ y.asInstanceOf [VectorR]
case _: VectorS => x.asInstanceOf [VectorS] ++ y.asInstanceOf [VectorS]
case _ => println ("++ vector type not supported"); null
} // match
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Copy of vector 'x' with scalar 's' appended.
* @param x the vector
* @param y the scalar to append
*/
def :+ [T <: Any] (x: Vec, s: T): Vec =
{
s match {
case _: Complex => if (x == null) VectorC (s.asInstanceOf [Complex])
else x.asInstanceOf [VectorC] ++ s.asInstanceOf [Complex]
case _: Double => if (x == null) VectorD (s.asInstanceOf [Double])
else x.asInstanceOf [VectorD] ++ s.asInstanceOf [Double]
case _: Int => if (x == null) VectorI (s.asInstanceOf [Int])
else x.asInstanceOf [VectorI] ++ s.asInstanceOf [Int]
case _: Long => if (x == null) VectorL (s.asInstanceOf [Long])
else x.asInstanceOf [VectorL] ++ s.asInstanceOf [Long]
case _: Rational => if (x == null) VectorQ (s.asInstanceOf [Rational])
else x.asInstanceOf [VectorQ] ++ s.asInstanceOf [Rational]
case _: Real => if (x == null) VectorR (s.asInstanceOf [Real])
else x.asInstanceOf [VectorR] ++ s.asInstanceOf [Real]
case _: StrNum => if (x == null) VectorS (s.asInstanceOf [StrNum])
else x.asInstanceOf [VectorS] ++ s.asInstanceOf [StrNum]
case _: String => if (x == null) VectorS (StrNum (s.asInstanceOf [String]))
else x.asInstanceOf [VectorS] ++ StrNum (s.asInstanceOf [String])
case _ => println (":+ vector type not supported"); null
} // match
} // :+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter vector 'x' based on predicate 'p', returning a new vector.
* @param x the vector to filter
* @param p the predicate (Boolean function) to apply
*/
def filter [T: ClassTag: Numeric] (x: Vec, p: T => Boolean): Vec =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].filter (p.asInstanceOf [Complex => Boolean])
case _: VectorD => x.asInstanceOf [VectorD].filter (p.asInstanceOf [Double => Boolean])
case _: VectorI => x.asInstanceOf [VectorI].filter (p.asInstanceOf [Int => Boolean])
case _: VectorL => x.asInstanceOf [VectorL].filter (p.asInstanceOf [Long => Boolean])
case _: VectorQ => x.asInstanceOf [VectorQ].filter (p.asInstanceOf [Rational => Boolean])
case _: VectorR => x.asInstanceOf [VectorR].filter (p.asInstanceOf [Real => Boolean])
case _: VectorS => x.asInstanceOf [VectorS].filter (p.asInstanceOf [StrNum => Boolean])
case _ => println ("filter: vector type not supported"); null
} // match
} // filter
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter vector 'x' based on predicate 'p', returning the positions in the vector.
* @param x the vector to filter
* @param p the predicate (Boolean function) to apply
*/
def filterPos [T: ClassTag: Numeric] (x: Vec, p: T => Boolean): Array [Int] =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].filterPos (p.asInstanceOf [Complex => Boolean])
case _: VectorD => x.asInstanceOf [VectorD].filterPos (p.asInstanceOf [Double => Boolean])
case _: VectorI => x.asInstanceOf [VectorI].filterPos (p.asInstanceOf [Int => Boolean])
case _: VectorL => x.asInstanceOf [VectorL].filterPos (p.asInstanceOf [Long => Boolean])
case _: VectorQ => x.asInstanceOf [VectorQ].filterPos (p.asInstanceOf [Rational => Boolean])
case _: VectorR => x.asInstanceOf [VectorR].filterPos (p.asInstanceOf [Real => Boolean])
case _: VectorS => x.asInstanceOf [VectorS].filterPos (p.asInstanceOf [StrNum => Boolean])
case _ => println ("filterPos: vector type not supported"); null
} // match
} // filterPos
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select elements from vector 'x' at the given index positions.
* @param x the vector to select from
* @param pos the positions to select
*/
def select (x: Vec, pos: Seq [Int]): Vec =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].select (pos.toArray)
case _: VectorD => x.asInstanceOf [VectorD].select (pos.toArray)
case _: VectorI => x.asInstanceOf [VectorI].select (pos.toArray)
case _: VectorL => x.asInstanceOf [VectorL].select (pos.toArray)
case _: VectorQ => x.asInstanceOf [VectorQ].select (pos.toArray)
case _: VectorR => x.asInstanceOf [VectorR].select (pos.toArray)
case _: VectorS => x.asInstanceOf [VectorS].select (pos.toArray)
case _ => println ("select: vector type not supported"); null
} // match
} // select
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert a vector of a different type to `VectorI`.
* @param x the vector to convert
*/
def toInt (x: Vec) =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].toInt
case _: VectorD => x.asInstanceOf [VectorD].toInt
case _: VectorI => x.asInstanceOf [VectorI]
case _: VectorL => x.asInstanceOf [VectorL].toInt
case _: VectorQ => x.asInstanceOf [VectorQ].toInt
case _: VectorR => x.asInstanceOf [VectorR].toInt
case _: VectorS => x.asInstanceOf [VectorS].toInt
case _ => println ("toInt: vector type not supported"); null
} // match
} // toInt
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert a vector of a different type to `VectorD`.
* @param x the vector to convert
*/
def toDouble (x: Vec) =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].toDouble
case _: VectorD => x.asInstanceOf [VectorD]
case _: VectorI => x.asInstanceOf [VectorI].toDouble
case _: VectorL => x.asInstanceOf [VectorL].toDouble
case _: VectorQ => x.asInstanceOf [VectorQ].toDouble
case _: VectorR => x.asInstanceOf [VectorR].toDouble
case _: VectorS => x.asInstanceOf [VectorS].toDouble
case _ => println ("toDouble: vector type not supported"); null
} // match
} // toDouble
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the minimum of vector 'x'.
* @param x the vector whose min is sought
*/
def min (x: Vec): Any =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].min ()
case _: VectorD => x.asInstanceOf [VectorD].min ()
case _: VectorI => x.asInstanceOf [VectorI].min ()
case _: VectorL => x.asInstanceOf [VectorL].min ()
case _: VectorQ => x.asInstanceOf [VectorQ].min ()
case _: VectorR => x.asInstanceOf [VectorR].min ()
case _: VectorS => x.asInstanceOf [VectorS].min ()
case _ => println ("min: vector type not supported"); null
} // match
} // min
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the maximum of vector 'x'.
* @param x the vector whose max is sought
*/
def max (x: Vec): Any =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].max ()
case _: VectorD => x.asInstanceOf [VectorD].max ()
case _: VectorI => x.asInstanceOf [VectorI].max ()
case _: VectorL => x.asInstanceOf [VectorL].max ()
case _: VectorQ => x.asInstanceOf [VectorQ].max ()
case _: VectorR => x.asInstanceOf [VectorR].max ()
case _: VectorS => x.asInstanceOf [VectorS].max ()
case _ => println ("max: vector type not supported"); null
} // match
} // max
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of vector 'x'.
* @param x the vector whose sum is sought
*/
def sum (x: Vec): Any =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].sum
case _: VectorD => x.asInstanceOf [VectorD].sum
case _: VectorI => x.asInstanceOf [VectorI].sum
case _: VectorL => x.asInstanceOf [VectorL].sum
case _: VectorQ => x.asInstanceOf [VectorQ].sum
case _: VectorR => x.asInstanceOf [VectorR].sum
case _: VectorS => x.asInstanceOf [VectorS].sum
case _ => println ("sum: vector type not supported"); null
} // match
} // sum
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the mean of vector 'x'.
* @param x the vector whose mean is sought
*/
def mean (x: Vec): Any =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].mean
case _: VectorD => x.asInstanceOf [VectorD].mean
case _: VectorI => x.asInstanceOf [VectorI].mean
case _: VectorL => x.asInstanceOf [VectorL].mean
case _: VectorQ => x.asInstanceOf [VectorQ].mean
case _: VectorR => x.asInstanceOf [VectorR].mean
case _: VectorS => x.asInstanceOf [VectorS].mean
case _ => println ("mean: vector type not supported"); null
} // match
} // mean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the variance of vector 'x'.
* @param x the vector whose variance is sought
*/
def variance (x: Vec): Any =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].variance
case _: VectorD => x.asInstanceOf [VectorD].variance
case _: VectorI => x.asInstanceOf [VectorI].variance
case _: VectorL => x.asInstanceOf [VectorL].variance
case _: VectorQ => x.asInstanceOf [VectorQ].variance
case _: VectorR => x.asInstanceOf [VectorR].variance
case _: VectorS => x.asInstanceOf [VectorS].variance
case _ => println ("variance: vector type not supported"); null
} // match
} // variance
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the correlation between vectors 'x' and 'y'.
* @param x the first vector
* @param y the second vector
*/
def corr (x: Vec, y: Vec): Double =
{
x match {
case _: VectorC => x.asInstanceOf [VectorC].corr (y.asInstanceOf [VectorC].toDouble)
case _: VectorD => x.asInstanceOf [VectorD].corr (y.asInstanceOf [VectorD])
case _: VectorI => x.asInstanceOf [VectorI].corr (y.asInstanceOf [VectorI].toDouble)
case _: VectorL => x.asInstanceOf [VectorL].corr (y.asInstanceOf [VectorL].toDouble)
case _: VectorQ => x.asInstanceOf [VectorQ].corr (y.asInstanceOf [VectorQ].toDouble)
case _: VectorR => x.asInstanceOf [VectorR].corr (y.asInstanceOf [VectorR].toDouble)
case _ => println ("corr: vector type not supported"); Double.NaN
} // match
} // corr
} // Vec object
| mvnural/scalation | src/main/scala/scalation/linalgebra/Vec.scala | Scala | mit | 13,919 |
package argon.codegen.chiselgen
import argon.core._
import argon.nodes._
trait ChiselGenIfThenElse extends ChiselCodegen {
override protected def emitNode(lhs: Sym[_], rhs: Op[_]): Unit = rhs match {
case IfThenElse(cond, thenp, elsep) =>
open(src"val $lhs = {")
open(src"if ($cond) { ")
emitBlock(thenp)
close("}")
open("else {")
emitBlock(elsep)
close("}")
close("}")
case _ => super.emitNode(lhs, rhs)
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/codegen/chiselgen/ChiselGenIfThenElse.scala | Scala | mit | 474 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.flink
import java.io.File
import java.net.URLClassLoader
import java.util.Properties
import org.apache.zeppelin.interpreter.InterpreterContext
import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion
import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter.{IMain, JPrintWriter}
class FlinkScala212Interpreter(override val properties: Properties,
override val flinkScalaClassLoader: URLClassLoader)
extends FlinkScalaInterpreter(properties, flinkScalaClassLoader) {
override def completion(buf: String,
cursor: Int,
context: InterpreterContext): java.util.List[InterpreterCompletion] = {
val completions = scalaCompletion.complete(buf.substring(0, cursor), cursor).candidates
.map(e => new InterpreterCompletion(e, e, null))
scala.collection.JavaConversions.seqAsJavaList(completions)
}
override def createIMain(settings: Settings, out: JPrintWriter): IMain = new IMain(settings, out)
override def createSettings(): Settings = {
val settings = new Settings()
settings.embeddedDefaults(flinkScalaClassLoader)
settings.usejavacp.value = true
settings.Yreplsync.value = true
settings.classpath.value = userJars.mkString(File.pathSeparator)
settings
}
}
| fogbeam/zeppelin_mirror | flink/flink-scala-2.12/src/main/scala/org/apache/zeppelin/flink/FlinkScala212Interpreter.scala | Scala | apache-2.0 | 2,146 |
package graycode
import org.scalacheck.Gen
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.{FlatSpec, Matchers}
import graycode.GrayCode._
class GrayCodeSpec extends FlatSpec with GeneratorDrivenPropertyChecks with Matchers {
behavior of "binary reflected Gray code"
val numbersLessThan128 = Gen.posNum[Int].suchThat(_ < 128)
it should "" in {
forAll(numbersLessThan128) { (x: Int) =>
fail
}
}
}
| HolyHaddock/scalacheck-example | src/test/scala/graycode/GrayCodeSpec.scala | Scala | mit | 454 |
/*******************************************************************************
* Copyright 2010 Maxime Lévesque
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************** */
package org.squeryl.internals
import java.lang.annotation.Annotation
import java.lang.reflect.{Field, Method, Constructor, InvocationTargetException, Type, ParameterizedType}
import java.sql.ResultSet
import scala.annotation.tailrec
import org.squeryl.annotations.{ColumnBase, Column}
import collection.mutable.{HashMap, HashSet}
import org.squeryl.Session
import org.squeryl.dsl.CompositeKey
import org.squeryl.customtypes.CustomType
import org.json4s.scalap.scalasig._
import java.lang.reflect.Member
import org.squeryl.dsl.ast.ConstantTypedExpression
import org.squeryl.customtypes.CustomType
class FieldMetaData(
val parentMetaData: PosoMetaData[_],
val nameOfProperty:String,
val fieldType: Class[_], // if isOption, this fieldType is the type param of Option, i.e. the T in Option[T]
val wrappedFieldType: Class[_], //in primitive type mode fieldType == wrappedFieldType, in custom type mode wrappedFieldType is the 'real'
// type, i.e. the (primitive) type that jdbc understands
val customTypeFactory: Option[AnyRef => Product1[Any] with AnyRef],
val isOption: Boolean,
getter: Option[Method],
setter: Option[Method],
field: Option[Field],
columnAnnotation: Option[Column],
val isOptimisticCounter: Boolean,
val sampleValue: Any) {
def nativeJdbcType =
this.schema.fieldMapper.nativeJdbcTypeFor(wrappedFieldType)
/**
* None if this FieldMetaData is not an enumeration, Some(theParentEnumeration) otherwise
*/
val enumeration: Option[Enumeration] =
sampleValue match {
case Some(e: Enumeration#Value) => Some(Utils.enumerationForValue(e))
case e: Enumeration#Value => Some(Utils.enumerationForValue(e))
case _ => None
}
def canonicalEnumerationValueFor(id: Int) =
if(sampleValue == null)
org.squeryl.internals.Utils.throwError("classes with Enumerations must have a zero param constructor that assigns a sample to the enumeration field")
else
enumeration flatMap { e: Enumeration =>
e.values find { _.id == id }
} get
/**
* This field is mutable only by the Schema trait, and only during the Schema instantiation,
* so it can safely be considered immutable (read only) by the columnAttributes accessor
*/
private val _columnAttributes = new HashSet[ColumnAttribute]
private [squeryl] def _clearColumnAttributes = {
_columnAttributes.clear
}
private [squeryl] def _addColumnAttribute(ca: ColumnAttribute) =
_columnAttributes.add(ca)
/**
* In some circumstances (like in the test suite) a Schema instance must run on multiple database types,
* this Map keeps the sequence names 'per schema'
*/
private val _sequenceNamePerDBAdapter = new HashMap[Class[_],String]
def sequenceName: String = {
val ai = _columnAttributes.collectFirst{case a: AutoIncremented => a}.
getOrElse(org.squeryl.internals.Utils.throwError(this + " is not declared as autoIncremented, hence it has no sequenceName"))
if(ai.nameOfSequence != None) {
return ai.nameOfSequence.get
}
synchronized {
val c = Session.currentSession.databaseAdapter.getClass
val s = _sequenceNamePerDBAdapter.get(c)
if(s != None)
return s.get
val s0 = Session.currentSession.databaseAdapter.createSequenceName(this)
_sequenceNamePerDBAdapter.put(c, s0)
return s0
}
}
def isIdFieldOfKeyedEntity: Boolean =
parentMetaData.viewOrTable.ked.exists(_.idPropertyName == nameOfProperty)
if(isIdFieldOfKeyedEntity && ! classOf[CompositeKey].isAssignableFrom(wrappedFieldType)) {
schema.defaultColumnAttributesForKeyedEntityId(wrappedFieldType).foreach(ca => {
if(ca.isInstanceOf[AutoIncremented] && ! (wrappedFieldType.isAssignableFrom(classOf[java.lang.Long]) || wrappedFieldType.isAssignableFrom(classOf[java.lang.Integer])))
org.squeryl.internals.Utils.throwError("Schema " + schema.getClass.getName + " has method defaultColumnAttributesForKeyedEntityId returning AutoIncremented \\nfor " +
" all KeyedEntity tables, while class " + parentMetaData.clasz.getName +
"\\n has it's id field of type " + fieldType.getName + ", that is neither an Int or a Long, \\n the only two types that can " +
"be auto incremented")
_addColumnAttribute(ca)
})
}
private [squeryl] var _defaultValue: Option[ConstantTypedExpression[_,_]] = None
def columnAttributes: Iterable[ColumnAttribute] = _columnAttributes
def defaultValue: Option[ConstantTypedExpression[_,_]] = _defaultValue
/**
* The db column type declaration overriden in the schema, if None, it means that it is the default value for
* the adapter (see Correspondance of field types to database column types http://squeryl.org/schema-definition.html)
*/
def explicitDbTypeDeclaration: Option[String] = {
_columnAttributes.collectFirst{case d: DBType => d.declaration}
}
/**
* If explicit db type case has been requested
*/
def explicitDbTypeCast: Boolean = _columnAttributes.collectFirst{case d: DBType => d.explicit}.getOrElse(false)
def isTransient =
_columnAttributes.exists(_.isInstanceOf[IsTransient])
def isCustomType = customTypeFactory != None
/**
* @return the length defined in org.squeryl.annotations.Column.length
* if it is defined, or the default length for Java primitive types.
* The unit of the length is dependent on the type, the convention is
* that numeric types have a length in byte, boolean is bits
* date has -1, and for string the lenght is in chars.
* double,long -> 8, float,int -> 4, byte -> 1, boolean -> 1
* java.util.Date -> -1.
*
* The use of this field is to help custom schema generators select
* the most appropriate column type
*/
def length: Int =
if(columnAnnotation == None || columnAnnotation.get.length == -1) {
FieldMetaData.defaultFieldLength(wrappedFieldType, this)
}
else
columnAnnotation.get.length
def scale: Int =
if(columnAnnotation == None || columnAnnotation.get.scale == -1)
schema.defaultSizeOfBigDecimal._2
else
columnAnnotation.get.scale
def schema = parentMetaData.schema
/**
* The name of the database column
*/
def columnName =
if(columnAnnotation == None) {
val nameDefinedInSchema = _columnAttributes.collectFirst{case n: Named => n.name}
parentMetaData.schema.columnNameFromPropertyName(nameDefinedInSchema.getOrElse(nameOfProperty))
}
else {
val ca = columnAnnotation.get
var res = ca.name
if(res == "")
res = ca.value
if(res == "")
parentMetaData.schema.columnNameFromPropertyName(nameOfProperty)
else
res
}
protected def createResultSetHandler =
this.schema.fieldMapper.resultSetHandlerFor(wrappedFieldType)
val resultSetHandler = createResultSetHandler
if(!isCustomType)
assert(fieldType == wrappedFieldType,
"expected fieldType == wrappedFieldType in primitive type mode, got "+
fieldType.getName + " != " + wrappedFieldType.getName)
override def toString =
parentMetaData.clasz.getSimpleName + "." + columnName + ":" + displayType
def isStringType =
wrappedFieldType.isAssignableFrom(classOf[String])
def displayType =
(if(isOption)
"Option[" + fieldType.getName + "]"
else
fieldType.getName)
/**
* When true, will cause Schema generation to declare as PrimaryKey, Note that for
* KeyedEntity[]s, declaredAsPrimaryKeyInSchema is always true, and the cannot be made otherwise,
* the inverse is not true, a field can be declared as primary key in the Shema without it being the
* id of a KeyedEntity[], ex. :
*
* <pre>
* on(myTable)(t =>declare(
* myField.is(primaryKey) // myField doesn't need to be a KeyedEntity.id
* ))
* </pre>
*
* <pre>
* on(myKeyedEntityTable)(t =>declare(
* id.is(autoIncremented) // omiting primaryKey here has no effect, it is equivalent as id.is(primaryKey,autoIncremented)
* ))
* </pre>
*/
def declaredAsPrimaryKeyInSchema =
columnAttributes.exists(_.isInstanceOf[PrimaryKey])
def isAutoIncremented =
columnAttributes.exists(_.isInstanceOf[AutoIncremented])
/**
* Inserts will only set values for a column if isInsertable is true
*/
def isInsertable =
!columnAttributes.exists(_.isInstanceOf[Uninsertable])
/**
* Updates will only set values for a column if isUpdatable is true
*/
def isUpdatable =
!columnAttributes.exists(_.isInstanceOf[Unupdatable])
/**
* gets the value of the field from the object.
* Note that it will unwrap Option[] and return null instead of None, i.e.
* if converts None and Some to null and some.get respectively
* @param o the object that owns the field
*/
def get(o:AnyRef): AnyRef =
try {
val res =
if(getter != None)
_getFromGetter(o)
else
_getFromField(o)
if(isOption) {
if(res == None)
null
else
res.asInstanceOf[Option[_]].get.asInstanceOf[AnyRef]
}
else
res
}
catch {
case e: IllegalArgumentException => org.squeryl.internals.Utils.throwError(wrappedFieldType.getName + " used on " + o.getClass.getName)
}
def getNativeJdbcValue(o:AnyRef): AnyRef = {
val r = get(o)
schema.fieldMapper.nativeJdbcValueFor(wrappedFieldType, r)
}
def setFromResultSet(target: AnyRef, rs: ResultSet, index: Int) = {
val v = resultSetHandler(rs, index)
set(target, v)
}
/**
* Sets the value 'v' to the object, the value will be converted to Some or None
* if the field is an Option[], (if isOption).
*/
def set(target: AnyRef, v: AnyRef): Unit = {
try {
val v0: AnyRef =
if(v == null)
null
else if(enumeration != None)
canonicalEnumerationValueFor(v.asInstanceOf[java.lang.Integer].intValue)
else if(customTypeFactory == None)
v
else {
val f = customTypeFactory.get
v match {
case r: CustomType[_] =>
f(if(r == null) null else r._1.asInstanceOf[AnyRef])
case _ =>
f(v)
}
}
val actualValue =
if(!isOption)
v0
else
Option(v0)
if(setter != None)
_setWithSetter(target, actualValue)
else
_setWithField(target, actualValue)
}
catch {
case e: Exception => {
val typeOfV = if(v == null) "null" else v.getClass.getCanonicalName
org.squeryl.internals.Utils.throwError(
this + " was invoked with value '" + v + "' of type " + typeOfV + " on object of type " + target.getClass.getName + " \\n" + e)
}
}
}
private def _getFromGetter(o:AnyRef) =
getter.get.invoke(o)
private def _setWithSetter(target: AnyRef, v: AnyRef) =
setter.get.invoke(target, v)
private def _getFromField(o:AnyRef) =
field.get.get(o)
private def _setWithField(target: AnyRef, v: AnyRef) =
field.get.set(target, v)
}
trait FieldMetaDataFactory {
def hideFromYieldInspection(o: AnyRef, f: Field): Boolean = false
def build(parentMetaData: PosoMetaData[_], name: String, property: (Option[Field], Option[Method], Option[Method], Set[Annotation]), sampleInstance4OptionTypeDeduction: AnyRef, isOptimisticCounter: Boolean): FieldMetaData
def createPosoFactory(posoMetaData: PosoMetaData[_]): ()=>AnyRef
}
object FieldMetaData {
private val _EMPTY_ARRAY = new Array[Object](0)
var factory = new FieldMetaDataFactory {
def createPosoFactory(posoMetaData: PosoMetaData[_]): ()=>AnyRef =
() => {
val c = posoMetaData.constructor
c._1.newInstance(c._2 :_*).asInstanceOf[AnyRef];
}
def build(parentMetaData: PosoMetaData[_], name: String, property: (Option[Field], Option[Method], Option[Method], Set[Annotation]), sampleInstance4OptionTypeDeduction: AnyRef, isOptimisticCounter: Boolean) = {
val fieldMapper = parentMetaData.schema.fieldMapper
val field = property._1
val getter = property._2
val setter = property._3
val annotations = property._4
val colAnnotation = annotations.collectFirst{case a: ColumnBase => a}
/*
* Retrieve the member in use, its class and its generic type
*/
val (member, clsOfField, typeOfField) =
(setter.map(s => (s: Member, s.getParameterTypes.head, s.getGenericParameterTypes.head))
.orElse(getter.map(g => (g: Member, g.getReturnType, g.getGenericReturnType)))
.orElse(field.map(f => (f: Member, f.getType, f.getType)))
.getOrElse(org.squeryl.internals.Utils.throwError("invalid field group")))
/*
* Look for a value in the sample type. If one exists and
* it is not None, we can use it to deduce the Option type.
*/
var v: AnyRef =
if(sampleInstance4OptionTypeDeduction != null) {
field flatMap { f =>
f.get(sampleInstance4OptionTypeDeduction) match {
case a: AnyRef => Some(a)
case _ => None
}
} orElse {
getter flatMap { _.invoke(sampleInstance4OptionTypeDeduction, _EMPTY_ARRAY : _*) match {
case a: AnyRef => Some(a)
case _ => None
}
}
} getOrElse
createDefaultValue(fieldMapper, member, clsOfField, Some(typeOfField), colAnnotation)
}
else null
if(v != null && v == None) // can't deduce the type from None keep trying
v = null
val constructorSuppliedDefaultValue = v
var customTypeFactory: Option[AnyRef=>Product1[Any] with AnyRef] = None
if(classOf[Product1[Any]].isAssignableFrom(clsOfField))
customTypeFactory = _createCustomTypeFactory(fieldMapper, parentMetaData.clasz, clsOfField)
if(customTypeFactory != None) {
val f = customTypeFactory.get
v = f(null) // this creates a dummy (sample) field
}
if(v == null)
/*
* If we have not yet been able to deduce the value of the field, delegate to createDefaultValue
* in order to do so.
*/
v = createDefaultValue(fieldMapper, member, clsOfField, Some(typeOfField), colAnnotation)
val deductionFailed =
v match {
case Some(None) => true
case null => true
case a:Any => false
}
if(deductionFailed) {
val errorMessage = "Could not deduce Option[] type of field '" + name + "' of class " + parentMetaData.clasz.getName
org.squeryl.internals.Utils.throwError(errorMessage)
}
val isOption = v.isInstanceOf[Some[_]]
val typeOfFieldOrTypeOfOption = v match {
case Some(x) =>
x.getClass
case _ =>
v.getClass
}
val primitiveFieldType = v match {
case p: Product1[_] =>
p._1.getClass
case Some(x: Product1[_]) =>
//if we get here, customTypeFactory has not had a chance to get created
customTypeFactory = _createCustomTypeFactory(fieldMapper, parentMetaData.clasz, typeOfFieldOrTypeOfOption)
x._1.asInstanceOf[AnyRef].getClass
case _ =>
typeOfFieldOrTypeOfOption
}
if(typeOfFieldOrTypeOfOption == None.getClass) {
Utils.throwError(
"class " + parentMetaData.clasz.getCanonicalName +" used in table " +
parentMetaData.viewOrTable.name +
", needs a zero arg constructor with sample values for Option[] field " +
name
)
}
new FieldMetaData(
parentMetaData,
name,
typeOfFieldOrTypeOfOption,
primitiveFieldType,
customTypeFactory,
isOption,
getter,
setter,
field,
colAnnotation,
isOptimisticCounter,
constructorSuppliedDefaultValue)
}
}
/**
* creates a closure that takes a java.lang. primitive wrapper (ex.: java.lang.Integer) and
* that creates an instance of a custom type with it, the factory accepts null to create
* default values for non nullable primitive types (int, long, etc...)
*/
private def _createCustomTypeFactory(fieldMapper: FieldMapper, ownerClass: Class[_], typeOfField: Class[_]): Option[AnyRef=>Product1[Any] with AnyRef] = {
// run through the given class hierarchy and return the first method
// which is called "value" and doesn't return java.lang.Object
@tailrec
def find(c: Class[_]): Option[Method] =
if(c != null)
c.getMethods.find(m => m.getName == "value" && m.getReturnType != classOf[java.lang.Object]) match {
case Some(m) => Some(m)
case None => find(c.getSuperclass)
}
else None
// invoke the given constructor and expose possible exceptions to the caller.
def invoke(c: Constructor[_], value: AnyRef) =
try {
c.newInstance(value).asInstanceOf[Product1[Any] with AnyRef]
} catch {
case ex: InvocationTargetException =>
throw ex.getTargetException
}
find(typeOfField) flatMap(m => {
val pType = m.getReturnType
assert(fieldMapper.isSupported(pType),
"enclosed type %s of CustomType %s is not a supported field type!"
.format(pType.getName, typeOfField.getName))
val c = typeOfField.getConstructor(pType)
val defaultValue = createDefaultValue(fieldMapper, c, pType, None, None)
if(defaultValue == null) None
else
Some((i: AnyRef) =>
if(i == null) invoke(c, defaultValue)
else invoke(c, i)
)
})
}
def defaultFieldLength(fieldType: Class[_], fmd: FieldMetaData) = {
if(classOf[String].isAssignableFrom(fieldType))
fmd.schema.defaultLengthOfString
else if(classOf[java.math.BigDecimal].isAssignableFrom(fieldType) || classOf[scala.math.BigDecimal].isAssignableFrom(fieldType)) {
fmd.schema.defaultSizeOfBigDecimal._1
}
else {
fmd.schema.fieldMapper.defaultColumnLength(fieldType)
}
}
def optionTypeFromScalaSig(member: Member): Option[Class[_]] = {
val scalaSigOption = ScalaSigParser.parse(member.getDeclaringClass())
scalaSigOption flatMap { scalaSig =>
val result = scalaSig.symbols.filter { sym =>
member.getName == sym.name
}.collect {
case sym: MethodSymbol => sym.infoType
}.collect {
case tpe: NullaryMethodType => tpe.resultType
}.collect {
case TypeRefType(_, _, Seq(TypeRefType(_, tpe, _))) =>
PartialFunction.condOpt(tpe.name){
case "Int" => classOf[scala.Int]
case "Short" => classOf[scala.Short]
case "Long" => classOf[scala.Long]
case "Double" => classOf[scala.Double]
case "Float" => classOf[scala.Float]
case "Boolean" => classOf[scala.Boolean]
case "Byte" => classOf[scala.Byte]
case "Char" => classOf[scala.Char]
}
}
assert(result.size <= 1)
result.headOption.flatten
}
}
def createDefaultValue(fieldMapper: FieldMapper, member: Member, p: Class[_], t: Option[Type], optionFieldsInfo: Option[Column]): Object = {
if (p.isAssignableFrom(classOf[Option[Any]])) {
/*
* First we'll look at the annotation if it exists as it's the lowest cost.
*/
optionFieldsInfo.flatMap(ann =>
if(ann.optionType != classOf[Object])
Some(createDefaultValue(fieldMapper, member, ann.optionType, None, None))
else None).orElse{
/*
* Next we'll try the Java generic type. This will fail if the generic parameter is a primitive as
* we'll see Object instead of scala.X
*/
t match {
case Some(pt: ParameterizedType) => {
pt.getActualTypeArguments.toList match {
case oType :: Nil => {
if(classOf[Class[_]].isInstance(oType)) {
/*
* Primitive types are seen by Java reflection as classOf[Object],
* if that's what we find then we need to get the real value from @ScalaSignature
*/
val trueTypeOption =
if (classOf[Object] == oType) optionTypeFromScalaSig(member)
else Some(oType.asInstanceOf[Class[_]])
trueTypeOption flatMap { trueType =>
val deduced = createDefaultValue(fieldMapper, member, trueType, None, optionFieldsInfo)
Option(deduced) // Couldn't create default for type param if null
}
} else{
None //Type parameter is not a Class
}
}
case _ => None //Not a single type parameter
}
}
case _ => None //Not a parameterized type
}
}
}
else {
fieldMapper.trySampleValueFor(p)
}
}
}
| ccap/Squeryl | src/main/scala/org/squeryl/internals/FieldMetaData.scala | Scala | apache-2.0 | 21,974 |
package fb
import scala.collection.mutable.HashMap
import scala.collection.mutable.ListBuffer
import Models._
object Data {
val users: HashMap[String, User] = new HashMap
val publicKeys: HashMap[String, String] = new HashMap
val pages: HashMap[String, Page] = new HashMap
val posts: HashMap[String, Post] = new HashMap
val friends: HashMap[String, ListBuffer[String]] = new HashMap
}
| BabakAp/ProjectFB | src/main/scala/fb/Data.scala | Scala | apache-2.0 | 396 |
package colossus.testkit
import akka.actor._
import akka.testkit.TestProbe
import colossus.core._
import scala.concurrent.duration._
trait MockConnection extends Connection with MockChannelActions {
/**
* Simulate event-loop iterations, calling readyForData until this buffer
* fills or everything is written. This can be used to test backpressure
* situations
*
* Be aware you need to call clearBuffer yourself
*/
def iterate[T](outputBufferSize: Int)(f: => T): T = {
val res = f
if (writeReadyEnabled && handleWrite(new DynamicOutBuffer(outputBufferSize))) {}
res
}
/**
* keep performing event loop iterations until the output buffer fills or
* there's no more to write
*/
def loop(outputBufferSize: Int = 100) {
while (writeReadyEnabled && handleWrite(new DynamicOutBuffer(outputBufferSize))) {}
}
/**
* Simulates event loop iteration, clearing the buffer on each iteration to avoid any backpressure
*/
def iterateAndClear(outputBufferSize: Int = 100) {
val buf = new DynamicOutBuffer(outputBufferSize)
while (writeReadyEnabled) {
buf.reset()
handleWrite(buf)
clearBuffer()
}
}
def iterate(bsize: Int = 100) = iterate[Unit](bsize)({})
def disrupt() {
close(DisconnectCause.Closed)
}
def testWrite(d: DataBuffer): WriteStatus = write(d)
def workerProbe: TestProbe
def serverProbe: Option[TestProbe]
/**
* checks to see if the connection handler has attempted to close the
* connection.
*/
def expectDisconnectAttempt() {
workerProbe.expectMsg(100.milliseconds, WorkerCommand.Disconnect(id))
}
}
trait TypedMockConnection[T <: ConnectionHandler] extends MockConnection {
def typedHandler: T
}
object MockConnection {
def server[T <: ServerConnectionHandler](handlerF: ServerContext => T, _maxWriteSize: Int = 1024)(
implicit sys: ActorSystem): ServerConnection with TypedMockConnection[T] = {
val (_serverProbe, server) = FakeIOSystem.fakeServerRef
val fw = FakeIOSystem.fakeWorker
val ctx = ServerContext(server, fw.worker.generateContext())
val _handler = handlerF(ctx)
_handler.setBind()
new ServerConnection(_handler.context.id, _handler, server, fw.worker) with TypedMockConnection[T] {
def maxWriteSize = _maxWriteSize
def workerProbe = fw.probe
def serverProbe = Some(_serverProbe)
def typedHandler = _handler
}
}
def client[T <: ClientConnectionHandler](_handler: T, fakeworker: FakeWorker, _maxWriteSize: Int)(
implicit sys: ActorSystem): ClientConnection with TypedMockConnection[T] = {
new ClientConnection(_handler.id, _handler, fakeworker.worker) with TypedMockConnection[T] {
def maxWriteSize = _maxWriteSize
def workerProbe = fakeworker.probe
def serverProbe = None
def typedHandler =
_handler //don't rename _handler to handler, since Connection already has a member with that name
}
}
def client[T <: ClientConnectionHandler](handlerF: Context => T, _maxWriteSize: Int = 1024)(
implicit sys: ActorSystem): ClientConnection with TypedMockConnection[T] = {
val fakeworker = FakeIOSystem.fakeWorker
val ctx = fakeworker.worker.generateContext()
val handler = handlerF(ctx)
handler.setBind()
client(handler, fakeworker, _maxWriteSize)
}
/**
* This is intended to be used when creating protocol clients. The underlying
* handler and the user-facing client API are returned
*/
def apiClient[T](handlerF: WorkerRef => T, _maxWriteSize: Int = 1024)(
implicit sys: ActorSystem): (ClientConnection with MockConnection, T) = {
val fakeworker = FakeIOSystem.fakeWorker
val clnt = handlerF(fakeworker.worker)
val handler = fakeworker.probe.receiveOne(1.second) match {
case WorkerCommand.Bind(c: ClientConnectionHandler) => c
case other => throw new Exception("Invalid worker command received by probe")
}
handler.setBind()
(client(handler, fakeworker, _maxWriteSize), clnt)
}
}
| tumblr/colossus | colossus-testkit/src/main/scala/colossus.testkit/MockConnection.scala | Scala | apache-2.0 | 4,172 |
package nl.dekkr.feedfrenzy.backend.extractor.action
import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder, DateTimeParseException}
import java.time.{LocalDate, LocalTime, OffsetDateTime, ZoneOffset}
import java.util.Locale
import nl.dekkr.feedfrenzy.backend.model.DateParser
class ParseDateAction extends BaseAction {
def execute(vars : VariableMap, a: DateParser): List[String] = {
val inputVar = getVariable(a.inputVariable, vars)
try {
val locale: Locale = new Locale(a.locale)
val formatter = createFormatter(a.pattern, locale)
inputVar map { inputVar =>
try {
val date = LocalDate.parse(inputVar, formatter)
val time = try {
LocalTime.parse(inputVar, formatter)
} catch {
case e: DateTimeParseException =>
if (a.padTime) {
LocalTime.now()
} else {
LocalTime.of(0, 0)
}
}
OffsetDateTime.of(date, time, ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)
} catch {
case e: DateTimeParseException =>
val response = s"ERROR: ${e.getMessage} - Format [${a.pattern}], locale [${a.locale}]"
logger.debug(response)
response
}
}
} catch {
case e: IllegalArgumentException => logger.debug(s"Could not create date format from format [${a.pattern}] and locale [${a.locale}] - ${e.getMessage}")
inputVar
}
}
/** *
* Need a case-insensitive parser
* @param pattern - Pattern to use for parsing
* @param locale - The locale to be used
*/
private def createFormatter(pattern: String, locale: Locale): DateTimeFormatter =
new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.appendPattern(pattern)
.toFormatter
.withLocale(locale)
}
| dekkr/feedfrenzy-backend | src/main/scala/nl/dekkr/feedfrenzy/backend/extractor/action/ParseDateAction.scala | Scala | mit | 1,885 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.scalatest.PrivateMethodTester
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.{SchedulerBackend, TaskScheduler, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.local.LocalSchedulerBackend
class SparkContextSchedulerCreationSuite
extends SparkFunSuite with LocalSparkContext with PrivateMethodTester with Logging {
def createTaskScheduler(master: String): TaskSchedulerImpl =
createTaskScheduler(master, "client")
def createTaskScheduler(master: String, deployMode: String): TaskSchedulerImpl =
createTaskScheduler(master, deployMode, new SparkConf())
def createTaskScheduler(
master: String,
deployMode: String,
conf: SparkConf): TaskSchedulerImpl = {
// Create local SparkContext to setup a SparkEnv. We don't actually want to start() the
// real schedulers, so we don't want to create a full SparkContext with the desired scheduler.
sc = new SparkContext("local", "test", conf)
val createTaskSchedulerMethod =
PrivateMethod[Tuple2[SchedulerBackend, TaskScheduler]]('createTaskScheduler)
val (_, sched) = SparkContext invokePrivate createTaskSchedulerMethod(sc, master, deployMode)
sched.asInstanceOf[TaskSchedulerImpl]
}
test("bad-master") {
val e = intercept[SparkException] {
createTaskScheduler("localhost:1234")
}
assert(e.getMessage.contains("Could not parse Master URL"))
}
test("local") {
val sched = createTaskScheduler("local")
sched.backend match {
case s: LocalSchedulerBackend => assert(s.totalCores === 1)
case _ => fail()
}
}
test("local-*") {
val sched = createTaskScheduler("local[*]")
sched.backend match {
case s: LocalSchedulerBackend =>
assert(s.totalCores === Runtime.getRuntime.availableProcessors())
case _ => fail()
}
}
test("local-n") {
val sched = createTaskScheduler("local[5]")
assert(sched.maxTaskFailures === 1)
sched.backend match {
case s: LocalSchedulerBackend => assert(s.totalCores === 5)
case _ => fail()
}
}
test("local-*-n-failures") {
val sched = createTaskScheduler("local[* ,2]")
assert(sched.maxTaskFailures === 2)
sched.backend match {
case s: LocalSchedulerBackend =>
assert(s.totalCores === Runtime.getRuntime.availableProcessors())
case _ => fail()
}
}
test("local-n-failures") {
val sched = createTaskScheduler("local[4, 2]")
assert(sched.maxTaskFailures === 2)
sched.backend match {
case s: LocalSchedulerBackend => assert(s.totalCores === 4)
case _ => fail()
}
}
test("bad-local-n") {
val e = intercept[SparkException] {
createTaskScheduler("local[2*]")
}
assert(e.getMessage.contains("Could not parse Master URL"))
}
test("bad-local-n-failures") {
val e = intercept[SparkException] {
createTaskScheduler("local[2*,4]")
}
assert(e.getMessage.contains("Could not parse Master URL"))
}
test("local-default-parallelism") {
val conf = new SparkConf().set("spark.default.parallelism", "16")
val sched = createTaskScheduler("local", "client", conf)
sched.backend match {
case s: LocalSchedulerBackend => assert(s.defaultParallelism() === 16)
case _ => fail()
}
}
test("local-cluster") {
createTaskScheduler("local-cluster[3, 14, 1024]").backend match {
case s: StandaloneSchedulerBackend => // OK
case _ => fail()
}
}
}
| akopich/spark | core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala | Scala | apache-2.0 | 4,378 |
/*
* Copyright 2015 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package controllers.s
import play.api.test.{FakeRequest, PlaySpecification, WithApplication}
/**
* The class MultipleTasksSampleSpec is a specification class for [[MultipleTasksSample]].
*
* @author Yinan Ding ([email protected])
*/
class MultipleTasksSampleSpec extends PlaySpecification {
"The MultipleTasksSample" should {
"respond to GET demo" in new WithApplication {
val result = route(app, FakeRequest(GET, routes.MultipleTasksSample.demo().url)).get
// Assert the status and the content
status(result) must equalTo(OK)
contentType(result) must beSome("text/plain")
contentAsString(result).toInt must greaterThan(0)
}
}
}
| linkedin/play-parseq | sample/test/controllers/s/MultipleTasksSampleSpec.scala | Scala | apache-2.0 | 1,164 |
package extractors.exercises
object Exercise2 {
object At {
def unapply(s: String): Option[(String, String)] = sys.error("TODO")
}
}
| julienrf/scala-lessons | highlights/extractors/code/src/main/scala/extractors/exercises/Exercise2.scala | Scala | mit | 146 |
package io.skysail.api.osgi.bundlerepository.domain
import org.osgi.framework.Version
case class Resource(symbolicName: String, version: Version)
| evandor/skysail-server | skysail.api/src/io/skysail/api/osgi/bundlerepository/domain/Resource.scala | Scala | apache-2.0 | 148 |
package webpageparser
import model.estate.WebPage
import model.DomainLocalhost
import model.estate.Estate
import model.Url
class ParserTestLocalhost(webPage: WebPage[DomainLocalhost]) extends WebPageParser[DomainLocalhost] {
def parse(): Option[Estate[DomainLocalhost]] = {
None
}
def getEstateUrls: List[Url] = List()
} | tiborbotos/domino | domino-crawler/src/main/scala/webpageparser/ParserTestLocalhost.scala | Scala | lgpl-3.0 | 337 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon.integration.solvers
import leon.test._
import leon.test.helpers._
import leon.purescala.Common._
import leon.purescala.Definitions._
import leon.purescala.ExprOps._
import leon.purescala.Constructors._
import leon.purescala.Expressions._
import leon.purescala.Types._
import leon.LeonContext
import leon.solvers._
import leon.solvers.smtlib._
import leon.solvers.unrolling._
import leon.solvers.z3._
class GlobalVariablesSuite extends LeonTestSuiteWithProgram with ExpressionsDSL {
val sources = List(
"""|import leon.lang._
|import leon.annotation._
|
|object GlobalVariables {
|
| def test(i: BigInt): BigInt = {
| 0 // will be replaced
| }
|} """.stripMargin
)
val solverNames: Seq[String] = {
(if (SolverFactory.hasNativeZ3) Seq("fairz3") else Nil) ++
(if (SolverFactory.hasZ3) Seq("smt-z3") else Nil) ++
(if (SolverFactory.hasCVC4) Seq("smt-cvc4") else Nil)
}
// Check that we correctly extract several types from solver models
for (sname <- solverNames) {
test(s"Global Variables in $sname") { implicit fix =>
val ctx = fix._1
val pgm = fix._2
pgm.lookup("GlobalVariables.test") match {
case Some(fd: FunDef) =>
val b0 = FreshIdentifier("B", BooleanType);
fd.body = Some(IfExpr(b0.toVariable, bi(1), bi(-1)))
val cnstr = LessThan(FunctionInvocation(fd.typed, Seq(bi(42))), bi(0))
val solver = SolverFactory.getFromName(ctx, pgm)(sname).getNewSolver()
solver.assertCnstr(And(b0.toVariable, cnstr))
try {
if (solver.check != Some(false)) {
fail("Global variables not correctly handled.")
}
} finally {
solver.free()
}
case _ =>
fail("Function with global body not found")
}
}
}
}
| regb/leon | src/test/scala/leon/integration/solvers/GlobalVariablesSuite.scala | Scala | gpl-3.0 | 1,939 |
package be.ellefant.cloudr
import roboguice.activity.RoboPreferenceActivity
import android.os.Bundle
class AccountPreferencesActivity extends RoboPreferenceActivity
with Base.CloudrActivity {
override def onCreate(savedInstance: Bundle) {
super.onCreate(savedInstance)
addPreferencesFromResource(R.xml.account_preferences_activity)
}
} | sdb/cloudr | app/src/be/ellefant/cloudr/AccountPreferencesActivity.scala | Scala | gpl-3.0 | 355 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.sql.catalyst.expressions.{And, Expression, NamedExpression, ProjectionOverSchema, SubqueryExpression}
import org.apache.spark.sql.catalyst.planning.ScanOperation
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.connector.read.{Scan, V1Scan}
import org.apache.spark.sql.execution.datasources.DataSourceStrategy
import org.apache.spark.sql.sources
import org.apache.spark.sql.types.StructType
object V2ScanRelationPushDown extends Rule[LogicalPlan] {
import DataSourceV2Implicits._
override def apply(plan: LogicalPlan): LogicalPlan = plan transformDown {
case ScanOperation(project, filters, relation: DataSourceV2Relation) =>
val scanBuilder = relation.table.asReadable.newScanBuilder(relation.options)
val normalizedFilters = DataSourceStrategy.normalizeExprs(filters, relation.output)
val (normalizedFiltersWithSubquery, normalizedFiltersWithoutSubquery) =
normalizedFilters.partition(SubqueryExpression.hasSubquery)
// `pushedFilters` will be pushed down and evaluated in the underlying data sources.
// `postScanFilters` need to be evaluated after the scan.
// `postScanFilters` and `pushedFilters` can overlap, e.g. the parquet row group filter.
val (pushedFilters, postScanFiltersWithoutSubquery) = PushDownUtils.pushFilters(
scanBuilder, normalizedFiltersWithoutSubquery)
val postScanFilters = postScanFiltersWithoutSubquery ++ normalizedFiltersWithSubquery
val normalizedProjects = DataSourceStrategy
.normalizeExprs(project, relation.output)
.asInstanceOf[Seq[NamedExpression]]
val (scan, output) = PushDownUtils.pruneColumns(
scanBuilder, relation, normalizedProjects, postScanFilters)
logInfo(
s"""
|Pushing operators to ${relation.name}
|Pushed Filters: ${pushedFilters.mkString(", ")}
|Post-Scan Filters: ${postScanFilters.mkString(",")}
|Output: ${output.mkString(", ")}
""".stripMargin)
val wrappedScan = scan match {
case v1: V1Scan =>
val translated = filters.flatMap(DataSourceStrategy.translateFilter(_, true))
V1ScanWrapper(v1, translated, pushedFilters)
case _ => scan
}
val scanRelation = DataSourceV2ScanRelation(relation.table, wrappedScan, output)
val projectionOverSchema = ProjectionOverSchema(output.toStructType)
val projectionFunc = (expr: Expression) => expr transformDown {
case projectionOverSchema(newExpr) => newExpr
}
val filterCondition = postScanFilters.reduceLeftOption(And)
val newFilterCondition = filterCondition.map(projectionFunc)
val withFilter = newFilterCondition.map(Filter(_, scanRelation)).getOrElse(scanRelation)
val withProjection = if (withFilter.output != project) {
val newProjects = normalizedProjects
.map(projectionFunc)
.asInstanceOf[Seq[NamedExpression]]
Project(newProjects, withFilter)
} else {
withFilter
}
withProjection
}
}
// A wrapper for v1 scan to carry the translated filters and the handled ones. This is required by
// the physical v1 scan node.
case class V1ScanWrapper(
v1Scan: V1Scan,
translatedFilters: Seq[sources.Filter],
handledFilters: Seq[sources.Filter]) extends Scan {
override def readSchema(): StructType = v1Scan.readSchema()
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala | Scala | apache-2.0 | 4,373 |
/*
* Copyright 2013-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.parse.rst
import laika.parse.core.Parser
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.Matchers
import laika.parse.helper.DefaultParserHelpers
import laika.parse.helper.ParseResultHelpers
import laika.tree.helper.ModelBuilder
import laika.parse.rst.Elements._
import laika.tree.Elements._
import laika.parse.rst.TextRoles.RoleDirectivePart
import laika.parse.rst.Directives.DirectivePart
class ListParsersSpec extends FlatSpec
with Matchers
with BlockParsers
with InlineParsers
with ParseResultHelpers
with DefaultParserHelpers[RootElement]
with ModelBuilder {
val defaultParser: Parser[RootElement] = rootElement
def blockDirective (name: String): Option[DirectivePart[Block]] = None
def spanDirective (name: String): Option[DirectivePart[Span]] = None
def textRole (name: String): Option[RoleDirectivePart[String => Span]] = None
def fp (content: String) = ForcedParagraph(List(Text(content)))
def ss (content: String) = SpanSequence(List(Text(content)))
def fl (fields: Field*) = FieldList(fields.toList)
def field (name: String, blocks: Block*) = Field(List(Text(name)), blocks.toList)
def oli (name: String, value: Block*) = OptionListItem(List(ProgramOption(name, None)), value.toList)
def oli (name: String, value: String) = OptionListItem(List(ProgramOption(name, None)), List(p(value)))
def oli (name: String, argDelim: String, arg: String, value: String) =
OptionListItem(List(ProgramOption(name, Some(OptionArgument(arg,argDelim)))), List(p(value)))
def optL (items: OptionListItem*) = OptionList(items.toList)
"The bullet list parser" should "parse items that are not separated by blank lines" in {
val input = """* aaa
|* bbb
|* ccc""".stripMargin
Parsing (input) should produce (root( bulletList() + "aaa" + "bbb" + "ccc"))
}
it should "parse items that are separated by blank lines" in {
val input = """* aaa
|
|* bbb
|
|* ccc""".stripMargin
Parsing (input) should produce (root( bulletList() + "aaa" + "bbb" + "ccc"))
}
it should "parse items starting with a '+' the same way as those starting with a '*'" in {
val input = """+ aaa
|+ bbb
|+ ccc""".stripMargin
Parsing (input) should produce (root( bulletList("+") + "aaa" + "bbb" + "ccc"))
}
it should "parse items starting with a '-' the same way as those starting with a '*'" in {
val input = """- aaa
|- bbb
|- ccc""".stripMargin
Parsing (input) should produce (root( bulletList("-") + "aaa" + "bbb" + "ccc"))
}
it should "parse items containing multiple paragraphs in a single item" in {
val input = """* aaa
|
| bbb
| bbb
|
|* ccc
|
|* ddd""".stripMargin
Parsing (input) should produce (root( bulletList() + (p("aaa"), p("bbb\\nbbb")) + fp("ccc") + fp("ddd")))
}
it should "parse nested items indented by spaces" in {
val input = """* aaa
|
| * bbb
|
| * ccc""".stripMargin
val list3 = bulletList() + "ccc"
val list2 = bulletList() + (ss("bbb"), list3)
val list1 = bulletList() + (ss("aaa"), list2)
Parsing (input) should produce (root(list1))
}
it should "ignore items when the second line is not indented" in {
val input = """* aaa
|bbb""".stripMargin
Parsing (input) should produce (root(p("* aaa\\nbbb")))
}
it should "parse a literal block after the first line of a list item" in {
val input = """* aaa::
|
| bbb
| bbb
|
|* ccc
|
|* ddd""".stripMargin
Parsing (input) should produce (root( bulletList() + (p("aaa:"), litBlock("bbb\\nbbb")) + fp("ccc") + fp("ddd")))
}
"The enumerated list parser" should "parse items with arabic enumeration style" in {
val input = """1. aaa
|2. bbb
|3. ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(Arabic, "", ".")) + "aaa" + "bbb" + "ccc"))
}
it should "parse items with lowercase alphabetic enumeration style" in {
val input = """a. aaa
|b. bbb
|c. ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(LowerAlpha, "", ".")) + "aaa" + "bbb" + "ccc"))
}
it should "parse items with uppercase alphabetic enumeration style" in {
val input = """A. aaa
|B. bbb
|C. ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(UpperAlpha, "", ".")) + "aaa" + "bbb" + "ccc"))
}
it should "parse items with lowercase Roman enumeration style" in {
val input = """i. aaa
|ii. bbb
|iii. ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(LowerRoman, "", ".")) + "aaa" + "bbb" + "ccc"))
}
it should "parse items with uppercase Roman enumeration style" in {
val input = """I. aaa
|II. bbb
|III. ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(UpperRoman, "", ".")) + "aaa" + "bbb" + "ccc"))
}
it should "keep the right start value for arabic enumeration style" in {
val input = """4. aaa
|5. bbb""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(Arabic, "", "."), 4) + "aaa" + "bbb"))
}
it should "keep the right start value for lowercase alphabetic enumeration style" in {
val input = """d. aaa
|e. bbb""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(LowerAlpha, "", "."), 4) + "aaa" + "bbb"))
}
it should "keep the right start value for uppercase alphabetic enumeration style" in {
val input = """D. aaa
|E. bbb""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(UpperAlpha, "", "."), 4) + "aaa" + "bbb"))
}
it should "keep the right start value for lowercase Roman enumeration style" in {
val input = """iv. aaa
|v. bbb""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(LowerRoman, "", "."), 4) + "aaa" + "bbb"))
}
it should "keep the right start value for uppercase Roman enumeration style" in {
val input = """IV. aaa
|V. bbb""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(UpperRoman, "", "."), 4) + "aaa" + "bbb"))
}
it should "not try to parse a Roman Numeral in a normal paragraph (issue #19)" in {
val input = "imp"
Parsing (input) should produce (root(p("imp")))
}
it should "parse items suffixed by right-parenthesis" in {
val input = """1) aaa
|2) bbb
|3) ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(Arabic, "", ")")) + "aaa" + "bbb" + "ccc"))
}
it should "parse items surrounded by parenthesis" in {
val input = """(1) aaa
|(2) bbb
|(3) ccc""".stripMargin
Parsing (input) should produce(root(enumList(EnumFormat(Arabic, "(", ")")) + "aaa" + "bbb" + "ccc"))
}
it should "parse items that are separated by blank lines" in {
val input = """1. aaa
|
|2. bbb
|
|3. ccc""".stripMargin
Parsing (input) should produce (root(enumList(EnumFormat(Arabic)) + "aaa" + "bbb" + "ccc"))
}
it should "parse items containing multiple paragraphs in a single item" in {
val input = """1. aaa
|
| bbb
| bbb
|
|2. ccc
|
|3. ddd""".stripMargin
Parsing (input) should produce (root( enumList() + (p("aaa"), p("bbb\\nbbb")) + fp("ccc") + fp("ddd")))
}
it should "parse nested items indented by spaces" in {
val input = """1. aaa
|
| 1. bbb
|
| 1. ccc""".stripMargin
val list3 = enumList() + "ccc"
val list2 = enumList() + (ss("bbb"), list3)
val list1 = enumList() + (ss("aaa"), list2)
Parsing (input) should produce (root(list1))
}
it should "parse items with different enumeration patterns into separate lists" in {
val input = """1. aaa
|
|2. bbb
|
|1) ccc
|
|2) ddd""".stripMargin
val f = EnumFormat(Arabic,"",")")
Parsing (input) should produce (root(enumList() + "aaa" + "bbb", enumList(f) + "ccc" + "ddd"))
}
"The definition list parser" should "parse items that are not separated by blank lines" in {
val input = """term 1
| aaa
|term 2
| bbb""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb"))))
}
it should "parse items that are separated by blank lines" in {
val input = """term 1
| aaa
|
|term 2
| bbb""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb"))))
}
it should "parse a term with a classifier" in {
val input = """term 1
| aaa
|
|term 2 : classifier
| bbb""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + (List(txt("term 2 "), Classifier(List(txt("classifier")))), p("bbb"))))
}
it should "parse items containing multiple paragraphs in a single item" in {
val input = """term 1
| aaa
| aaa
|
| bbb
|
|term 2
| ccc""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa\\naaa"), p("bbb")) + ("term 2", p("ccc"))))
}
it should "parse items containing multiple paragraphs with different identation in a single item" in {
val input = """term 1
| aaa
| aaa
|
| bbb
|
|term 2
| ccc""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", quote("aaa\\naaa"), p("bbb")) + ("term 2", p("ccc"))))
}
it should "support inline markup in the term" in {
val input = """term *em*
| aaa
|
|term 2
| bbb""".stripMargin
Parsing (input) should produce (root( defList + (List(txt("term "), em(txt("em"))), p("aaa")) + ("term 2", p("bbb"))))
}
it should "ignore subsequent tables" in {
val input = """term 1
| aaa
|
|term 2
| bbb
|
|=== ===
| a b
|=== ===""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb")),
table(strrow("a","b"))))
}
it should "ignore subsequent directives" in {
val input = """term 1
| aaa
|
|term 2
| bbb
|
|.. foo::
| :name: value""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb")),
InvalidBlock(SystemMessage(laika.tree.Elements.Error, "unknown directive: foo"), LiteralBlock(".. foo:: \\n:name: value"))))
}
it should "ignore subsequent bullet lists" in {
val input = """term 1
| aaa
|
|term 2
| bbb
|
|* list
| list""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb")),
bulletList() + (p("list\\nlist"))))
}
it should "ignore subsequent enum lists" in {
val input = """term 1
| aaa
|
|term 2
| bbb
|
|1. list
| list""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb")),
enumList(EnumFormat(Arabic)) + (p("list\\nlist"))))
}
it should "ignore subsequent headers with overline" in {
val input = """term 1
| aaa
|
|term 2
| bbb
|
|########
| Header
|########""".stripMargin
Parsing (input) should produce (root( defList + ("term 1", p("aaa")) + ("term 2", p("bbb")),
DecoratedHeader(OverlineAndUnderline('#'), List(Text("Header")), Id("header"))))
}
"The field list parser" should "parse a list with all bodies on the same line as the name" in {
val input = """:name1: value1
|:name2: value2
|:name3: value3""".stripMargin
Parsing (input) should produce (root( fl( field("name1", p("value1")), field("name2", p("value2")), field("name3", p("value3")))))
}
it should "parse a list with bodies spanning multiple lines" in {
val input = """:name1: line1a
| line1b
|:name2: line2a
| line2b""".stripMargin
Parsing (input) should produce (root( fl( field("name1", p("line1a\\nline1b")), field("name2", p("line2a\\nline2b")))))
}
it should "parse a list with bodies spanning multiple blocks" in {
val input = """:name1: line1a
| line1b
|
| line1c
| line1d
|:name2: line2a
| line2b""".stripMargin
Parsing (input) should produce (root( fl( field("name1", p("line1a\\nline1b"), p("line1c\\nline1d")), field("name2", p("line2a\\nline2b")))))
}
"The option list parser" should "parse a list with short posix options" in {
val input = """-a Option1
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", "Option1"), oli("-b", "Option2"))))
}
it should "parse a list with long posix options" in {
val input = """--aaaa Option1
|--bbbb Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("--aaaa", "Option1"), oli("--bbbb", "Option2"))))
}
it should "parse a list with short GNU-style options" in {
val input = """+a Option1
|+b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("+a", "Option1"), oli("+b", "Option2"))))
}
it should "parse a list with short DOS-style options" in {
val input = """/a Option1
|/b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("/a", "Option1"), oli("/b", "Option2"))))
}
it should "parse an option argument separated by a space" in {
val input = """-a FILE Option1
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", " ", "FILE", "Option1"), oli("-b", "Option2"))))
}
it should "parse an option argument separated by '='" in {
val input = """-a=FILE Option1
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", "=", "FILE", "Option1"), oli("-b", "Option2"))))
}
it should "parse an option argument enclosed in angle brackets" in {
val input = """-a <in=out> Option1
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", " ", "<in=out>", "Option1"), oli("-b", "Option2"))))
}
it should "parse a description starting on the next line" in {
val input = """-a
| Option1
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", "Option1"), oli("-b", "Option2"))))
}
it should "parse a block of options with blank lines between them" in {
val input = """-a Option1
|
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", "Option1"), oli("-b", "Option2"))))
}
it should "parse a description containing multiple paragraphs" in {
val input = """-a Line1
| Line2
|
| Line3
|
|-b Option2""".stripMargin
Parsing (input) should produce (root( optL( oli("-a", p("Line1\\nLine2"), p("Line3")), oli("-b", "Option2"))))
}
it should "parse an option separated by more than 2 spaces" in {
val input = """-a Option""".stripMargin
Parsing (input) should produce (root( optL(
oli("-a", "Option")
)))
}
"The line block parser" should "parse a block with out continuation or indentation" in {
val input = """|| Line1
|| Line2
|| Line3""".stripMargin
Parsing (input) should produce (root( lb( line("Line1"), line("Line2"), line("Line3"))))
}
it should "parse a block with a continuation line" in {
val input = """|| Line1
| Line2
|| Line3
|| Line4""".stripMargin
Parsing (input) should produce (root( lb( line("Line1\\nLine2"), line("Line3"), line("Line4"))))
}
it should "parse a nested structure (pointing right)" in {
val input = """|| Line1
|| Line2
|| Line3
|| Line4
|| Line5""".stripMargin
Parsing (input) should produce (root( lb( line("Line1"), lb(line("Line2"), lb(line("Line3")), line("Line4")), line("Line5"))))
}
it should "parse a nested structure (pointing left)" in {
val input = """|| Line1
|| Line2
|| Line3
|| Line4
|| Line5""".stripMargin
Parsing (input) should produce (root( lb( lb( lb(line("Line1")), line("Line2")), line("Line3"), lb(line("Line4"), lb(line("Line5"))))))
}
}
| amuramatsu/Laika | core/src/test/scala/laika/parse/rst/ListParsersSpec.scala | Scala | apache-2.0 | 17,590 |
/**
* This file is part of the Uniscala Couch project.
* Copyright (C) 2012 Sustainable Software Pty Ltd.
* This is open source software, licensed under the Apache License
* version 2.0 license - please see the LICENSE file included in
* the distribution.
*
* Authors:
* Sam Stainsby ([email protected])
*/
package net.uniscala.couch
/**
* Represents a failure that was returned as a JSON object from a couch
* server. Such failures have an 'error' field and (optionally?) a
* 'reason' field, which are converted here into the 'error' and
* 'reasonOption' members.
*/
case class CouchFailure(error: String, reasonOption: Option[String])
extends Throwable(error + reasonOption.map(": " + _).getOrElse(""))
| stainsby/uniscala-couch | src/main/scala/net/uniscala/couch/CouchFailure.scala | Scala | apache-2.0 | 736 |
package com.sksamuel.elastic4s.fields
import com.sksamuel.elastic4s.analysis.LanguageAnalyzers
import com.sksamuel.elastic4s.requests.mappings.{MappingBuilderFn, MappingDefinition}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class FieldMappingJsonTest extends AnyFunSuite with Matchers {
test("adding ElasticFields to mapping def") {
val field1 = LongField(
name = "myfield1",
store = Some(true),
boost = Some(1.2),
ignoreMalformed = Some(true),
copyTo = List("q", "er")
)
val field2 = TextField(
name = "myfield2",
analyzer = Some(LanguageAnalyzers.bengali),
searchQuoteAnalyzer = Some(LanguageAnalyzers.english),
ignoreAbove = Some(4),
similarity = Some("Classic1"),
norms = Some(true)
)
val field3 = KeywordField(
name = "myfield3",
normalizer = Some("foo"),
copyTo = List("q", "er"),
ignoreAbove = Some(4),
indexOptions = Some("freqs"),
similarity = Some("Classic1"),
norms = Some(true)
)
val mapping = MappingDefinition(properties = List(field1, field2, field3))
MappingBuilderFn.build(mapping).string() shouldBe """{"properties":{"myfield1":{"type":"long","copy_to":["q","er"],"boost":1.2,"store":true,"ignore_malformed":true},"myfield2":{"type":"text","analyzer":"bengali","norms":true,"ignore_above":4,"search_quote_analyzer":"english","similarity":"Classic1"},"myfield3":{"type":"keyword","copy_to":["q","er"],"ignore_above":4,"index_options":"freqs","norms":true,"normalizer":"foo","similarity":"Classic1"}}}"""
}
test("MappingBuilderFn should throw an exception if multiple properties with the same field name") {
val field1 = LongField(
name = "myfield",
copyTo = List("q", "er")
)
val field2 = TextField(
name = "myfield",
norms = Some(true)
)
val mapping = MappingDefinition(properties = List(field1, field2))
intercept[RuntimeException] {
MappingBuilderFn.build(mapping).string()
}
}
}
| stringbean/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/fields/FieldMappingJsonTest.scala | Scala | apache-2.0 | 2,062 |
import com.rabbitmq.client.ConnectionFactory
import com.rabbitmq.client.Connection
import com.rabbitmq.client.Channel
import com.rabbitmq.client.MessageProperties
val factory = new ConnectionFactory()
factory.setHost("hive.kaicode.com")
factory.setPort(5672)
factory.setUsername("order")
factory.setPassword("EisKisP1")
val connection: Connection = factory.newConnection()
val channel: Channel = connection.createChannel()
val queueName = "order-queue"
val durable = true
val exclusive = false
val autoDelete = true
channel.queueDeclare(queueName, durable, exclusive, autoDelete, null)
val message = "一儿三四五六七八九十啊吧色:::0:::20:::365:::40"
//val message="hello"
channel.basicPublish("", queueName, MessageProperties.PERSISTENT_TEXT_PLAIN, message.getBytes("UTF-8"))
channel.close()
connection.close()
println("foo")
| sonwh98/helloPOS | src/sandbox/rabbitmq-tests.scala | Scala | gpl-2.0 | 840 |
/**
* SparklineData, Inc. -- http://www.sparklinedata.com/
*
* Scala based Audience Behavior APIs
*
* Copyright 2014-2015 SparklineData, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Created by Harish.
*/
package org.sparkline.etl
class MetricAnalysisException(
val message: String,
val line: Option[Int] = None,
val startPosition: Option[Int] = None)
extends Exception with Serializable {
def withPosition(line: Option[Int], startPosition: Option[Int]): MetricAnalysisException = {
val newException = new MetricAnalysisException(message, line, startPosition)
newException.setStackTrace(getStackTrace)
newException
}
override def getMessage: String = {
val lineAnnotation = line.map(l => s" line $l").getOrElse("")
val positionAnnotation = startPosition.map(p => s" pos $p").getOrElse("")
s"$message;$lineAnnotation$positionAnnotation"
}
} | cubefyre/audience-behavior-semantic-etl | etl/src/main/scala/org/sparkline/etl/MetricAnalysisException.scala | Scala | apache-2.0 | 1,500 |
/*
* Copyright (C) 2018 Joan Goyeau.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.scala.kstream
import java.time.Duration
import org.apache.kafka.streams.kstream.internals.MaterializedInternal
import org.apache.kafka.streams.scala.Serdes._
import org.apache.kafka.streams.scala._
import org.apache.kafka.streams.state.Stores
import org.junit.runner.RunWith
import org.scalatest.{FlatSpec, Matchers}
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MaterializedTest extends FlatSpec with Matchers {
"Create a Materialized" should "create a Materialized with Serdes" in {
val materialized: Materialized[String, Long, ByteArrayKeyValueStore] =
Materialized.`with`[String, Long, ByteArrayKeyValueStore]
val internalMaterialized = new MaterializedInternal(materialized)
internalMaterialized.keySerde.getClass shouldBe Serdes.String.getClass
internalMaterialized.valueSerde.getClass shouldBe Serdes.Long.getClass
}
"Create a Materialize with a store name" should "create a Materialized with Serdes and a store name" in {
val storeName = "store"
val materialized: Materialized[String, Long, ByteArrayKeyValueStore] =
Materialized.as[String, Long, ByteArrayKeyValueStore](storeName)
val internalMaterialized = new MaterializedInternal(materialized)
internalMaterialized.keySerde.getClass shouldBe Serdes.String.getClass
internalMaterialized.valueSerde.getClass shouldBe Serdes.Long.getClass
internalMaterialized.storeName shouldBe storeName
}
"Create a Materialize with a window store supplier" should "create a Materialized with Serdes and a store supplier" in {
val storeSupplier = Stores.persistentWindowStore("store", Duration.ofMillis(1), Duration.ofMillis(1), true)
val materialized: Materialized[String, Long, ByteArrayWindowStore] =
Materialized.as[String, Long](storeSupplier)
val internalMaterialized = new MaterializedInternal(materialized)
internalMaterialized.keySerde.getClass shouldBe Serdes.String.getClass
internalMaterialized.valueSerde.getClass shouldBe Serdes.Long.getClass
internalMaterialized.storeSupplier shouldBe storeSupplier
}
"Create a Materialize with a key value store supplier" should "create a Materialized with Serdes and a store supplier" in {
val storeSupplier = Stores.persistentKeyValueStore("store")
val materialized: Materialized[String, Long, ByteArrayKeyValueStore] =
Materialized.as[String, Long](storeSupplier)
val internalMaterialized = new MaterializedInternal(materialized)
internalMaterialized.keySerde.getClass shouldBe Serdes.String.getClass
internalMaterialized.valueSerde.getClass shouldBe Serdes.Long.getClass
internalMaterialized.storeSupplier shouldBe storeSupplier
}
"Create a Materialize with a session store supplier" should "create a Materialized with Serdes and a store supplier" in {
val storeSupplier = Stores.persistentSessionStore("store", Duration.ofMillis(1))
val materialized: Materialized[String, Long, ByteArraySessionStore] =
Materialized.as[String, Long](storeSupplier)
val internalMaterialized = new MaterializedInternal(materialized)
internalMaterialized.keySerde.getClass shouldBe Serdes.String.getClass
internalMaterialized.valueSerde.getClass shouldBe Serdes.Long.getClass
internalMaterialized.storeSupplier shouldBe storeSupplier
}
}
| sslavic/kafka | streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/MaterializedTest.scala | Scala | apache-2.0 | 4,196 |
package dsmoq.maintenance.data.user
import org.joda.time.DateTime
/**
* ユーザの検索結果を表すケースクラス
*/
case class SearchResultUser(
id: String,
name: String,
fullname: String,
mailAddress: String,
organization: String,
title: String,
description: String,
createdAt: DateTime,
updatedAt: DateTime,
disabled: Boolean
)
| nkawa/dsmoq | server/maintenance/src/main/scala/dsmoq/maintenance/data/user/SearchResultUser.scala | Scala | apache-2.0 | 365 |
package org.mdoc.common.model
import org.scalacheck.Prop._
import org.scalacheck.Properties
object FormatSpec extends Properties("Format") {
property("fromExtension . toExtension ~= id") = secure {
Format.values.forall { format =>
Format.fromExtension(format.toExtension).fold(false)(_ == format)
}
}
}
| m-doc/common-model | shared/src/test/scala/org/mdoc/common/model/FormatSpec.scala | Scala | apache-2.0 | 324 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.couchbase.planner
import slamdata.Predef._
import quasar.{Data => QData, NameGenerator}
import quasar.contrib.pathy.AFile
import quasar.physical.couchbase._,
common.ContextReader,
N1QL.{Eq, Id, _},
Select.{Filter, Value, _}
import quasar.qscript, qscript._
import matryoshka._
import matryoshka.implicits._
import scalaz._, Scalaz._
final class ShiftedReadFilePlanner[T[_[_]]: CorecursiveT, F[_]: Applicative: ContextReader: NameGenerator]
extends Planner[T, F, Const[ShiftedRead[AFile], ?]] {
def str(v: String) = Data[T[N1QL]](QData.Str(v))
def id(v: String) = Id[T[N1QL]](v)
val plan: AlgebraM[F, Const[ShiftedRead[AFile], ?], T[N1QL]] = {
case Const(ShiftedRead(absFile, idStatus)) =>
(genId[T[N1QL], F] ⊛ ContextReader[F].ask) { (gId, ctx) =>
val collection = common.docTypeValueFromPath(absFile)
val v =
IfMissing(
SelectField(gId.embed, str("value").embed).embed,
gId.embed).embed
val mId = SelectField(Meta(gId.embed).embed, str("id").embed)
val r = idStatus match {
case IdOnly => gId.embed
case IncludeId => Arr(List(gId.embed, v)).embed
case ExcludeId => v
}
Select(
Value(true),
ResultExpr(r, none).wrapNel,
Keyspace(id(ctx.bucket.v).embed, gId.some).some,
join = none,
unnest = none,
let = nil,
filter = collection.v.nonEmpty.option(
Eq(id(ctx.docTypeKey.v).embed, str(collection.v).embed).embed
) ∘ (Filter(_)),
groupBy = none,
orderBy = nil).embed
}
}
}
| drostron/quasar | couchbase/src/main/scala/quasar/physical/couchbase/planner/ShiftedReadFilePlanner.scala | Scala | apache-2.0 | 2,300 |
class A {
val x : { val self = this } = new { self => }
}
| AlexSikia/dotty | tests/untried/neg/t5361.scala | Scala | bsd-3-clause | 60 |
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
import play.api.test._
import play.api.test.Helpers._
/**
* Add your spec here.
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
@RunWith(classOf[JUnitRunner])
class ApplicationSpec extends Specification {
"Application" should {
"send 404 on a bad request" in new WithApplication{
route(FakeRequest(GET, "/boum")) must beSome.which (status(_) == NOT_FOUND)
}
"render the index page" in new WithApplication{
val home = route(FakeRequest(GET, "/")).get
status(home) must equalTo(OK)
// contentType(home) must beSome.which(_ == "text/html")
//contentAsString(home) must contain ("Your new application is ready.")
}
}
}
| Hofmaier/comstock | movierecommenderweb/test/ApplicationSpec.scala | Scala | mit | 824 |
package com.datastax.spark.connector.rdd.typeTests
import com.datastax.driver.core.{ProtocolVersion, Row}
import com.datastax.driver.core.ProtocolVersion._
import com.datastax.spark.connector._
import java.util.Date
class TimeTypeTest extends AbstractTypeTest[Long, java.lang.Long] {
override val minPV = ProtocolVersion.V4
override def getDriverColumn(row: Row, colName: String): Long = row.getTime(colName)
override protected val typeName: String = "time"
override protected val typeData: Seq[Long] = 1L to 5L
override protected val addData: Seq[Long] = 6L to 10L
"Time Types" should "be writable as dates" in skipIfProtocolVersionLT(V4) {
val dates = (100 to 500 by 100).map(new Date(_))
val times = dates.map(_.getTime)
sc.parallelize(
dates.map(x => (x, x, x, x))
).saveToCassandra(keyspaceName, typeNormalTable)
val results = sc.cassandraTable[(Long, Long, Long, Long)](keyspaceName, typeNormalTable).collect
checkNormalRowConsistency(times.map(_ * 1000000), results)
}
/*
Todo Determine a way to detect when a column is being read as a Long and it is an underlying Time
type. This needs a special conversion since a normal Long will convert into milliseconds rather than
nanoseconds.
*/
ignore should "be readable as dates" in {
val dates = (100 to 500 by 100).map(new Date(_))
val times = dates.map(_.getTime)
val results = sc.cassandraTable[(Date, Date, Date, Date)](keyspaceName, "time_compound").collect
val resultsAsLong = results.map{ case (x, y, z, a) => (x.getTime, y.getTime, z.getTime, a.getTime)}
checkNormalRowConsistency(times, resultsAsLong)
}
}
| shashwat7/spark-cassandra-connector | spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/rdd/typeTests/TimeTypeTest.scala | Scala | apache-2.0 | 1,655 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.annotation.Since
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol}
import org.apache.spark.ml.util._
import org.apache.spark.mllib.feature
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{ArrayType, StructType}
/**
* Maps a sequence of terms to their term frequencies using the hashing trick.
* Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32)
* to calculate the hash code value for the term object.
* Since a simple modulo is used to transform the hash function to a column index,
* it is advisable to use a power of two as the numFeatures parameter;
* otherwise the features will not be mapped evenly to the columns.
*/
@Since("1.2.0")
class HashingTF @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends Transformer with HasInputCol with HasOutputCol with DefaultParamsWritable {
@Since("1.2.0")
def this() = this(Identifiable.randomUID("hashingTF"))
/** @group setParam */
@Since("1.4.0")
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
@Since("1.4.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
/**
* Number of features. Should be > 0.
* (default = 2^18^)
* @group param
*/
@Since("1.2.0")
val numFeatures = new IntParam(this, "numFeatures", "number of features (> 0)",
ParamValidators.gt(0))
/**
* Binary toggle to control term frequency counts.
* If true, all non-zero counts are set to 1. This is useful for discrete probabilistic
* models that model binary events rather than integer counts.
* (default = false)
* @group param
*/
@Since("2.0.0")
val binary = new BooleanParam(this, "binary", "If true, all non zero counts are set to 1. " +
"This is useful for discrete probabilistic models that model binary events rather " +
"than integer counts")
setDefault(numFeatures -> (1 << 18), binary -> false)
/** @group getParam */
@Since("1.2.0")
def getNumFeatures: Int = $(numFeatures)
/** @group setParam */
@Since("1.2.0")
def setNumFeatures(value: Int): this.type = set(numFeatures, value)
/** @group getParam */
@Since("2.0.0")
def getBinary: Boolean = $(binary)
/** @group setParam */
@Since("2.0.0")
def setBinary(value: Boolean): this.type = set(binary, value)
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
val outputSchema = transformSchema(dataset.schema)
val hashingTF = new feature.HashingTF($(numFeatures)).setBinary($(binary))
// TODO: Make the hashingTF.transform natively in ml framework to avoid extra conversion.
val t = udf { terms: Seq[_] => hashingTF.transform(terms).asML }
val metadata = outputSchema($(outputCol)).metadata
dataset.select(col("*"), t(col($(inputCol))).as($(outputCol), metadata))
}
@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
val inputType = schema($(inputCol)).dataType
require(inputType.isInstanceOf[ArrayType],
s"The input column must be ArrayType, but got $inputType.")
val attrGroup = new AttributeGroup($(outputCol), $(numFeatures))
SchemaUtils.appendColumn(schema, attrGroup.toStructField())
}
@Since("1.4.1")
override def copy(extra: ParamMap): HashingTF = defaultCopy(extra)
}
@Since("1.6.0")
object HashingTF extends DefaultParamsReadable[HashingTF] {
@Since("1.6.0")
override def load(path: String): HashingTF = super.load(path)
}
| gioenn/xSpark | mllib/src/main/scala/org/apache/spark/ml/feature/HashingTF.scala | Scala | apache-2.0 | 4,540 |
package ru.maizy.influxdbclient.data
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2016-2017
* See LICENSE.txt for details.
*/
case class SeriesError(error: String)
| maizy/ambient7 | influxdb-client/src/main/scala/ru/maizy/influxdbclient/data/SeriesError.scala | Scala | apache-2.0 | 170 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka010._
/**
* Consumes messages from one or more topics in Kafka and does wordcount.
* Usage: DirectKerberizedKafkaWordCount <brokers> <topics>
* <brokers> is a list of one or more Kafka brokers
* <groupId> is a consumer group name to consume from topics
* <topics> is a list of one or more kafka topics to consume from
*
* Example:
* Yarn client:
* $ bin/run-example --files ${jaas_path}/kafka_jaas.conf,${keytab_path}/kafka.service.keytab \\
* --driver-java-options "-Djava.security.auth.login.config=${path}/kafka_driver_jaas.conf" \\
* --conf \\
* "spark.executor.extraJavaOptions=-Djava.security.auth.login.config=./kafka_jaas.conf" \\
* --master yarn
* streaming.DirectKerberizedKafkaWordCount broker1-host:port,broker2-host:port \\
* consumer-group topic1,topic2
* Yarn cluster:
* $ bin/run-example --files \\
* ${jaas_path}/kafka_jaas.conf,${keytab_path}/kafka.service.keytab,${krb5_path}/krb5.conf \\
* --driver-java-options \\
* "-Djava.security.auth.login.config=./kafka_jaas.conf \\
* -Djava.security.krb5.conf=./krb5.conf" \\
* --conf \\
* "spark.executor.extraJavaOptions=-Djava.security.auth.login.config=./kafka_jaas.conf" \\
* --master yarn --deploy-mode cluster \\
* streaming.DirectKerberizedKafkaWordCount broker1-host:port,broker2-host:port \\
* consumer-group topic1,topic2
*
* kafka_jaas.conf can manually create, template as:
* KafkaClient {
* com.sun.security.auth.module.Krb5LoginModule required
* keyTab="./kafka.service.keytab"
* useKeyTab=true
* storeKey=true
* useTicketCache=false
* serviceName="kafka"
* principal="kafka/[email protected]";
* };
* kafka_driver_jaas.conf (used by yarn client) and kafka_jaas.conf are basically the same
* except for some differences at 'keyTab'. In kafka_driver_jaas.conf, 'keyTab' should be
* "${keytab_path}/kafka.service.keytab".
* In addition, for IBM JVMs, please use 'com.ibm.security.auth.module.Krb5LoginModule'
* instead of 'com.sun.security.auth.module.Krb5LoginModule'.
*
* Note that this example uses SASL_PLAINTEXT for simplicity; however,
* SASL_PLAINTEXT has no SSL encryption and likely be less secure. Please consider
* using SASL_SSL in production.
*/
object DirectKerberizedKafkaWordCount {
def main(args: Array[String]): Unit = {
if (args.length < 3) {
System.err.println(s"""
|Usage: DirectKerberizedKafkaWordCount <brokers> <groupId> <topics>
| <brokers> is a list of one or more Kafka brokers
| <groupId> is a consumer group name to consume from topics
| <topics> is a list of one or more kafka topics to consume from
|
""".stripMargin)
System.exit(1)
}
StreamingExamples.setStreamingLogLevels()
val Array(brokers, groupId, topics) = args
// Create context with 2 second batch interval
val sparkConf = new SparkConf().setAppName("DirectKerberizedKafkaWordCount")
val ssc = new StreamingContext(sparkConf, Seconds(2))
// Create direct kafka stream with brokers and topics
val topicsSet = topics.split(",").toSet
val kafkaParams = Map[String, Object](
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> brokers,
ConsumerConfig.GROUP_ID_CONFIG -> groupId,
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
CommonClientConfigs.SECURITY_PROTOCOL_CONFIG -> SecurityProtocol.SASL_PLAINTEXT.name)
val messages = KafkaUtils.createDirectStream[String, String](
ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String](topicsSet, kafkaParams))
// Get the lines, split them into words, count the words and print
val lines = messages.map(_.value)
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1L)).reduceByKey(_ + _)
wordCounts.print()
// Start the computation
ssc.start()
ssc.awaitTermination()
}
}
// scalastyle:on println
| lhfei/spark-in-action | spark-3.x/src/main/scala/org/apache/spark/examples/streaming/DirectKerberizedKafkaWordCount.scala | Scala | apache-2.0 | 5,326 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package main.display.animations
import com.anathema_roguelike.main.display.Color
import com.anathema_roguelike.main.display.VisualRepresentation
import com.anathema_roguelike.main.utilities.position.Point
import squidpony.squidgrid.gui.gdx.SColor
class Blink(representation: VisualRepresentation, position: Point, duration: Float) extends PersistentAnimation(position, duration) {
override protected def update(percent: Float): Unit = {
val color = if(percent < .5) {
Color.factory.blend(Color.BLACK, representation.color, percent * 2)
} else {
Color.factory.blend(representation.color, Color.BLACK, (percent - .5) * 2)
}
renderChar(getPosition, representation.char, color)
}
} | carlminden/anathema-roguelike | src/com/anathema_roguelike/main/display/animations/Blink.scala | Scala | gpl-3.0 | 1,591 |
package com.github.gigurra.serviceutils.twitter.service
import com.twitter.util.Future
import com.github.gigurra.serviceutils.twitter.logging.Logging
import com.github.gigurra.serviceutils.twitter.service.ServiceErrors.{AutoLoggingOff, AutoLoggingOn}
trait ServiceErrors extends Logging {
def autoLog: Boolean
def serviceName: String = getClass.getSimpleName
def badRequest(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Bad Request to service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.error(msg)
ServiceException(Responses.badRequest(msg))
}
def BadRequest[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(badRequest(message, exc))
}
def unauthorized(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Unauthorized request to service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.warning(msg)
ServiceException(Responses.unauthorized(msg))
}
def Unauthorized[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(unauthorized(message, exc))
}
def internalServerError(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Internal Server Error in service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.error(new RuntimeException(msg, exc), msg)
ServiceException(Responses.internalServerError(msg))
}
def InternalServerError[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(internalServerError(message, exc))
}
def notFound(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Not Found in service service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.warning(msg)
ServiceException(Responses.notFound(msg))
}
def NotFound[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(notFound(message, exc))
}
def conflict(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Conflict in service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.warning(msg)
ServiceException(Responses.conflict(msg))
}
def Conflict[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(conflict(message, exc))
}
def timeout(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Timeout in service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.warning(msg)
ServiceException(Responses.timeout(msg))
}
def Timeout[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(timeout(message, exc))
}
def tooManyRequests(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Too many requests in service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.warning(msg)
ServiceException(Responses.tooManyRequests(msg))
}
def TooManyRequests[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(tooManyRequests(message, exc))
}
def unavailable(message: String, exc: Throwable = null): ServiceException = {
val msg = s"Service unavailable in service '$serviceName' (exception/cause = $exc): $message"
if (autoLog)
logger.warning(msg)
ServiceException(Responses.unavailable(msg))
}
def Unavailable[T](message: String, exc: Throwable = null): Future[T] = {
Future.exception(unavailable(message, exc))
}
}
trait ServiceErrorsWithoutAutoLogging extends ServiceErrors with AutoLoggingOff
trait ServiceErrorsWithAutoLogging extends ServiceErrors with AutoLoggingOn
object ServiceErrors {
trait AutoLoggingOff { _ : ServiceErrors =>
def autoLog = false
}
trait AutoLoggingOn { _ : ServiceErrors =>
def autoLog = true
}
}
| GiGurra/service-utils | src/main/scala/com/github/gigurra/serviceutils/twitter/service/ServiceErrors.scala | Scala | mit | 3,937 |
package core.node
/**
* This service allows to register a Node against a centralized lookup service for nodes across a network.
*
* @author Manuel Bernhardt <[email protected]>
*/
trait NodeRegistrationService {
/**
* Registers a new node. The organization doing the request will automatically become the owner.
*
* @param node the node to register
* @param userName the userName of who registered the node
*/
def registerNode(node: Node, userName: String)
/**
* Updates a node information
*
* @param node the node to update
*/
def updateNode(node: Node)
/**
* Removes a node
*
* @param node the node to update
*/
def removeNode(node: Node)
/**
* List all members of this node
*
* @param node the node to list users of
* @return a list of userNames
*/
def listMembers(node: Node): Seq[String]
/**
* Adds a member to a node
*
* @param node the node to update
* @param userName the member to add
*/
def addMember(node: Node, userName: String)
/**
* Removes a member from a node
*
* @param node the node to update
* @param userName the member to remove
*/
def removeMember(node: Node, userName: String)
} | delving/culture-hub | web-core/app/core/node/NodeRegistrationService.scala | Scala | apache-2.0 | 1,230 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control.controls
import java.{util ⇒ ju}
import org.orbeon.dom.Element
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.util.CollectionUtils
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.action.actions.{XFormsDeleteAction, XFormsInsertAction}
import org.orbeon.oxf.xforms.analysis.ElementAnalysis
import org.orbeon.oxf.xforms.analysis.controls.{RepeatControl, RepeatIterationControl}
import org.orbeon.oxf.xforms.control._
import org.orbeon.oxf.xforms.control.controls.XFormsRepeatControl._
import org.orbeon.oxf.xforms.event.events.{XXFormsDndEvent, XXFormsIndexChangedEvent, XXFormsNodesetChangedEvent, XXFormsSetindexEvent}
import org.orbeon.oxf.xforms.event.{Dispatch, XFormsEvent}
import org.orbeon.oxf.xforms.state.ControlState
import org.orbeon.oxf.xforms.xbl.XBLContainer
import org.orbeon.oxf.xforms.{BindingContext, ControlTree, XFormsContainingDocument}
import org.orbeon.oxf.xml.SaxonUtils
import org.orbeon.saxon.om.{Item, NodeInfo}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.collection.{mutable ⇒ m}
import org.orbeon.oxf.util.CoreUtils._
// Represents an xf:repeat container control.
class XFormsRepeatControl(
container : XBLContainer,
parent : XFormsControl,
element : Element,
effectiveId : String
) extends XFormsNoSingleNodeContainerControl(
container,
parent,
element,
effectiveId
) with NoLHHATrait {
override type Control <: RepeatControl
// TODO: Check whether this should be handled following the same pattern as usual refresh events.
// 2018-03-07: Since we no longer update, by default, repeats upon `xf:insert`/`xf:delete`, the time
// between setting `refreshInfoOpt` in the control and using it is much shorter. The value is set
// when control bindings are updated (which usually should not cause any events to be dispatched,
// although `xforms-disabled` can be I think), and used in the later `dispatchRefreshEvents()`. A
// better mechanism would, for sure, be desirable.
private var refreshInfoOpt: Option[RefreshInfo] = None
// Initial local state
setLocal(new XFormsRepeatControlLocal)
// The repeat's sequence binding
final override def binding = Option(bindingContext) filter (_.newBind) map (_.nodeset.asScala) getOrElse Nil
// Store initial repeat index information
private val startIndexString = element.attributeValue("startindex")
private val startIndex = Option(startIndexString) map (_.toInt) getOrElse 1
def getStartIndex = startIndex
override def supportsRefreshEvents = true
override def children = super.children.asInstanceOf[Seq[XFormsRepeatIterationControl]]
override def onCreate(restoreState: Boolean, state: Option[ControlState]): Unit = {
super.onCreate(restoreState, state)
// Ensure that the initial state is set, either from default value, or for state deserialization.
state match {
case Some(state) ⇒
setLocal(new XFormsRepeatControlLocal(state.keyValues("index").toInt))
case None if restoreState ⇒
// This can happen with xxf:dynamic, which does not guarantee the stability of ids, therefore state for
// a particular control might not be found.
setLocal(new XFormsRepeatControlLocal(ensureIndexBounds(getStartIndex)))
case None ⇒
setIndexInternal(getStartIndex)
}
// Reset refresh information
refreshInfoOpt = None
}
// Set the repeat index. The index is automatically adjusted to fall within bounds.
def setIndex(index: Int): Unit = {
val oldRepeatIndex = getIndex // 1-based
// Set index
setIndexInternal(index)
if (oldRepeatIndex != getIndex) {
// Dispatch custom event to notify that the repeat index has changed
Dispatch.dispatchEvent(new XXFormsIndexChangedEvent(this, oldRepeatIndex, getIndex))
}
// Handle rebuild flags for container affected by changes to this repeat
val resolutionScopeContainer = container.findScopeRoot(getPrefixedId)
resolutionScopeContainer.setDeferredFlagsForSetindex()
}
private def setIndexInternal(index: Int): Unit = {
val local = getLocalForUpdate.asInstanceOf[XFormsRepeatControl.XFormsRepeatControlLocal]
local.index = ensureIndexBounds(index)
}
private def ensureIndexBounds(index: Int) =
math.min(math.max(index, if (getSize > 0) 1 else 0), getSize)
// Return the size based on the nodeset size, so we can call this before all iterations have been added.
// Scenario:
// - call index() or xxf:index() from within a variable within the iteration:
// - not all iterations have been added, but the size must be known
override def getSize =
Option(bindingContext) map (_.nodeset.size) getOrElse 0
def getIndex =
if (isRelevant) {
val local = getCurrentLocal.asInstanceOf[XFormsRepeatControl.XFormsRepeatControlLocal]
if (local.index != -1)
local.index
else
throw new OXFException("Repeat index was not set for repeat id: " + effectiveId)
} else
0
def doDnD(dndEvent: XXFormsDndEvent): Unit = {
require(isDnD, s"attempt to process `xxforms-dnd` event on non-DnD-enabled control `$effectiveId`")
// Get all repeat iteration details
val dndStart = dndEvent.getDndStart.splitTo[List]("-")
val dndEnd = dndEvent.getDndEnd.splitTo[List]("-")
require(dndStart.size == 1 && dndEnd.size == 1, "DnD over repeat boundaries not supported yet")
// Find source information
val sourceItems = bindingContext.nodeset.asScala
val sourceItemsSize = sourceItems.size
val requestedSourceIndex = dndStart.last.toInt
val requestedDestinationIndex = dndEnd.last.toInt
require(
requestedSourceIndex >= 1 && requestedSourceIndex <= sourceItemsSize,
s"Out of range DnD start iteration: $requestedSourceIndex"
)
require(
requestedDestinationIndex >= 1 && requestedDestinationIndex <= sourceItemsSize,
s"Out of range DnD end iteration: $requestedDestinationIndex"
)
val destinationItemsCopy = new ju.ArrayList[Item](sourceItems.asJava)
require(requestedSourceIndex != requestedDestinationIndex, "`dnd-start` must be different from `dnd-end`")
val deletedNodeInfo = {
val deletionDescriptors =
XFormsDeleteAction.doDelete(
containingDocument = containingDocument,
collectionToUpdate = sourceItems,
deleteIndexOpt = Some(requestedSourceIndex),
doDispatch = false, // don't dispatch event because one call to updateRepeatNodeset() is enough
updateRepeats = false // meaningless if `doDispatch == false`
)
deletionDescriptors.head.nodeInfo // above deletes exactly one node
}
// This removes from our copy of the nodeset, not from the control's nodeset, which must not be
// touched until control bindings are updated.
destinationItemsCopy.remove(requestedSourceIndex - 1)
// Below we still try to use `before` when we can so that handles better the case of a repeat over
// a hierarchy of nodes where children come after containers.
val (actualDestinationIndex, destinationBeforeAfter) = {
if (requestedDestinationIndex < requestedSourceIndex)
(requestedDestinationIndex, "before") // insertion point is before or on (degenerate case) deleted node
else if (requestedDestinationIndex == sourceItemsSize) // must become last element of collection
(requestedDestinationIndex - 1, "after")
else
(requestedDestinationIndex, "before") // insertion point is after deleted node
}
// 3. Insert node into destination
XFormsInsertAction.doInsert(
/* containingDocument = */ containingDocument,
/* indentedLogger = */ containingDocument.getControls.indentedLogger,
/* positionAttribute = */ destinationBeforeAfter,
/* collectionToBeUpdated = */ destinationItemsCopy,
/* insertContextNodeInfo = */ null, // `insertContextNodeInfo` doesn't actually matter because `collectionToBeUpdated` is not empty
/* originItems = */ List(deletedNodeInfo: Item).asJava,
/* insertionIndex = */ actualDestinationIndex,
/* doClone = */ false, // do not clone the node as we know the node it is ready for insertion
/* doDispatch = */ true,
/* requireDefaultValues = */ false,
/* updateRepeats = */ true,
/* searchForInstance = */ true
)
// TODO: should dispatch xxforms-move instead of xforms-insert?
}
def isDnD =
element.attributeValueOpt(XXFORMS_DND_QNAME) exists (_ != "none")
// Push binding but ignore non-relevant iterations
override protected def computeBinding(parentContext: BindingContext) = {
val contextStack = container.getContextStack
contextStack.setBinding(parentContext)
contextStack.pushBinding(element, effectiveId, staticControl.scope)
// Keep only the relevant items
import XFormsSingleNodeControl.isRelevantItem
val items = contextStack.getCurrentBindingContext.nodeset
val allRelevant = items.asScala forall isRelevantItem
if (allRelevant)
contextStack.getCurrentBindingContext
else
contextStack.getCurrentBindingContext.copy(nodeset = items.asScala filter isRelevantItem asJava)
}
def updateSequenceForInsertDelete(insertedNodeInfos: Option[Seq[NodeInfo]]): Unit = {
// NOTE: This can be called even if we are not relevant!
// Don't do any work if our parent is non-relevant because that means we are necessarily not relevant either
if (! parent.isRelevant)
return
// Get old nodeset
val oldRepeatNodeset = bindingContext.nodeset.asScala
// Set new binding context on the repeat control
locally {
// NOTE: Here we just reevaluate against the parent; maybe we should reevaluate all the way down. See also:
// https://github.com/orbeon/orbeon-forms/issues/2156
val contextStack = container.getContextStack
if (bindingContext.parent eq null)
// This might happen at the top-level if there is no model and no variables in scope?
contextStack.resetBindingContext
else {
contextStack.setBinding(bindingContext)
// If there are some preceding variables in scope, the top of the stack is now the last scoped variable
contextStack.popBinding
}
// Do this before evaluating the binding because after that controls are temporarily in an inconsistent
// state
containingDocument.getControls.cloneInitialStateIfNeeded()
evaluateBindingAndValues(
parentContext = contextStack.getCurrentBindingContext,
update = true,
restoreState = false,
state = None
)
}
// Move things around and create new iterations if needed
if (! SaxonUtils.compareItemSeqs(oldRepeatNodeset, bindingContext.nodeset.asScala)) {
// Update iterationsInitialStateIfNeeded()
val focusedBeforeOpt = containingDocument.getControls.getFocusedControl
val (newIterations, partialFocusRepeatOption) =
updateIterations(oldRepeatNodeset, insertedNodeInfos, isInsertDelete = true)
// Evaluate all controls and then dispatches creation events
val currentControlTree = containingDocument.getControls.getCurrentControlTree
for (newIteration ← newIterations)
currentControlTree.initializeSubTree(newIteration, includeCurrent = true)
// This will dispatch xforms-enabled/xforms-disabled/xxforms-nodeset-changed/xxforms-index-changed events
// if needed on the repeat control itself (subtrees are handled above).
containingDocument.getControls.getCurrentControlTree.dispatchRefreshEvents(List(getEffectiveId))
// Handle focus changes
Focus.updateFocusWithEvents(focusedBeforeOpt, partialFocusRepeatOption)(containingDocument)
}
}
/**
* Update this repeat's iterations given the old and new sequences, and a list of inserted nodes if any (used for
* index updates). This returns a list of entirely new repeat iterations added, if any. The repeat's index is
* adjusted.
*
* This dispatches destruction events for removed iterations, but does not dispatch creation events.
*
* NOTE: The new binding context must have been set on this control before calling.
*/
def updateIterations(
oldRepeatItems : Seq[Item], // old items
insertedItems : Option[Seq[NodeInfo]], // items just inserted by `xf:insert` if any
isInsertDelete : Boolean
): (Seq[XFormsRepeatIterationControl], Option[XFormsRepeatControl]) = {
// NOTE: The following assumes the nodesets have changed
val controls = containingDocument.getControls
// Get current (new) nodeset
val newRepeatItems = bindingContext.nodeset.asScala
val currentControlTree = controls.getCurrentControlTree
val oldRepeatIndex = getIndex // 1-based
var updated = false
val (newIterations, movedIterationsOldPositions, movedIterationsNewPositions, partialFocusRepeatOption) =
if (newRepeatItems.nonEmpty) {
// This may be set to this repeat or to a nested repeat if focus was within a removed iteration
var partialFocusRepeatOption: Option[XFormsRepeatControl] = None
// For each new item, what its old index was, -1 if it was not there
val oldIndexes = findItemIndexes(newRepeatItems, oldRepeatItems)
// For each old item, what its new index is, -1 if it is no longer there
val newIndexes = findItemIndexes(oldRepeatItems, newRepeatItems)
// Remove control information for iterations that move or just disappear
val oldChildren = children
for (i ← newIndexes.indices) {
val currentNewIndex = newIndexes(i)
if (currentNewIndex != i) {
// Node has moved or is removed
val isRemoved = currentNewIndex == -1
val movedOrRemovedIteration = oldChildren(i)
if (isRemoved) {
withDebug("removing iteration", Seq("id" → getEffectiveId, "index" → (i + 1).toString)) {
// If focused control is in removed iteration, remember this repeat and partially remove
// focus before deindexing the iteration. The idea here is that we don't want to dispatch
// events to controls that have been removed from the index. So we dispatch all the
// possible focus out events here.
if (partialFocusRepeatOption.isEmpty && Focus.isFocusWithinContainer(movedOrRemovedIteration)) {
partialFocusRepeatOption = Some(XFormsRepeatControl.this)
Focus.removeFocusPartially(containingDocument, boundary = partialFocusRepeatOption)
}
// Dispatch destruction events
currentControlTree.dispatchDestructionEventsForRemovedRepeatIteration(movedOrRemovedIteration, includeCurrent = true)
// Indicate to iteration that it is being removed
// As of 2012-03-07, only used by XFormsComponentControl to destroy the XBL container
// This also removes the nested models from XPath dependencies
movedOrRemovedIteration.iterationRemoved()
}
}
// Deindex old iteration
currentControlTree.deindexSubtree(movedOrRemovedIteration, includeCurrent = true)
updated = true
}
}
// Set new repeat index (do this before creating new iterations so that index is available then)
val didSetIndex =
insertedItems match {
case Some(insertedItems) ⇒
// Insert logic
// We want to point to a new node (case of insert)
// First, try to point to the last inserted node if found
findItemIndexes(insertedItems, newRepeatItems).reverse find (_ != -1) exists { index ⇒
val newRepeatIndex = index + 1
debug("setting index to new node", Seq(
"id" → getEffectiveId,
"new index" → newRepeatIndex.toString
))
setIndexInternal(newRepeatIndex)
true
}
case None ⇒ false
}
if (! didSetIndex) {
// Non-insert logic (covers delete and other arbitrary changes to the repeat sequence)
val indexOfLastNewIteration = oldIndexes lastIndexOf -1
if (indexOfLastNewIteration != -1) {
// Items were inserted so pick as new index the last of the new items, unless they are all new,
// in which case we pick the first one.
// We could pick another logic, see also: https://github.com/orbeon/orbeon-forms/issues/3503
val newRepeatIndex =
if (oldRepeatItems.isEmpty)
1
else
indexOfLastNewIteration + 1
if (newRepeatIndex != oldRepeatIndex) {
debug("adjusting index for new item", Seq(
"id" → getEffectiveId,
"old index" → oldRepeatIndex.toString,
"new index" → newRepeatIndex.toString
))
setIndexInternal(newRepeatIndex)
}
} else if (oldRepeatIndex > 0 && oldRepeatIndex <= newIndexes.length && newIndexes(oldRepeatIndex - 1) != -1) {
// The index was pointing to an item which is still there, so just move the index
val newRepeatIndex = newIndexes(oldRepeatIndex - 1) + 1
if (newRepeatIndex != oldRepeatIndex) {
debug("adjusting index for existing item", Seq(
"id" → getEffectiveId,
"old index" → oldRepeatIndex.toString,
"new index" → newRepeatIndex.toString
))
setIndexInternal(newRepeatIndex)
}
} else if (oldRepeatIndex > 0 && oldRepeatIndex <= newIndexes.length) {
// The index was pointing to an item which has been removed
if (oldRepeatIndex > newRepeatItems.size) {
// "if the repeat index was pointing to one of the deleted repeat items, and if the new size
// of the collection is smaller than the index, the index is changed to the new size of the
// collection."
debug("setting index to the size of the new sequence", Seq(
"id" → getEffectiveId,
"new index" → newRepeatItems.size.toString
))
setIndexInternal(newRepeatItems.size)
} else {
// "if the new size of the collection is equal to or greater than the index, the index is
// not changed"
// NOP
}
} else {
// Old index was out of bounds?
setIndexInternal(getStartIndex)
debug("resetting index", Seq("id" → getEffectiveId, "new index" → getIndex.toString))
}
}
// Iterate over new sequence to move or add iterations
val newSize = newRepeatItems.size
val newChildren = new ArrayBuffer[XFormsControl](newSize)
val newIterations = ListBuffer[XFormsRepeatIterationControl]()
val movedIterationsOldPositions = ListBuffer[Int]()
val movedIterationsNewPositions = ListBuffer[Int]()
for (repeatIndex ← 1 to newSize) {
val currentOldIndex = oldIndexes(repeatIndex - 1)
if (currentOldIndex == -1) {
// This new item was not in the old sequence so create a new one
// Add new iteration
newChildren +=
withDebug("creating new iteration", Seq(
"id" → getEffectiveId,
"index" → repeatIndex.toString
)) {
// Create repeat iteration
val newIteration = controls.createRepeatIterationTree(this, repeatIndex)
updated = true
newIterations += newIteration
newIteration
}
} else {
// This new item was in the old nodeset so keep it
val existingIteration = oldChildren(currentOldIndex)
val newIterationOldIndex = existingIteration.iterationIndex
def updateBindingsIfNeeded(): Unit = {
// NOTE: We used to only update the binding on the iteration itself
if (isInsertDelete) {
val updater = Controls.updateBindings(existingIteration)
if (partialFocusRepeatOption.isEmpty && updater.partialFocusRepeat.isDefined)
partialFocusRepeatOption = updater.partialFocusRepeat
}
}
if (newIterationOldIndex != repeatIndex) {
// Iteration index changed
debug("moving iteration", Seq(
"id" → getEffectiveId,
"old index" → newIterationOldIndex.toString,
"new index" → repeatIndex.toString
))
// Set new index
existingIteration.setIterationIndex(repeatIndex)
// Update iteration bindings
updateBindingsIfNeeded()
// Index iteration
currentControlTree.indexSubtree(existingIteration, includeCurrent = true)
updated = true
// Add information for moved iterations
movedIterationsOldPositions += newIterationOldIndex
movedIterationsNewPositions += repeatIndex
} else {
// Iteration index stayed the same
// Update iteration bindings
updateBindingsIfNeeded()
}
// Add existing iteration
newChildren += existingIteration
}
}
// Set the new children iterations
setChildren(newChildren)
(
newIterations,
movedIterationsOldPositions.toList,
movedIterationsNewPositions.toList,
partialFocusRepeatOption
)
} else {
// New repeat nodeset is now empty
// If focused control is in removed iterations, remove focus first
if (Focus.isFocusWithinContainer(XFormsRepeatControl.this))
Focus.removeFocus(containingDocument)
// Remove control information for iterations that disappear
for (removedIteration ← children) {
withDebug("removing iteration", Seq(
"id" → getEffectiveId,
"index" → removedIteration.iterationIndex.toString
)) {
// Dispatch destruction events and deindex old iteration
currentControlTree.dispatchDestructionEventsForRemovedRepeatIteration(removedIteration, includeCurrent = true)
currentControlTree.deindexSubtree(removedIteration, includeCurrent = true)
}
updated = true
}
if (getIndex != 0)
debug("setting index to 0", Seq("id" → getEffectiveId))
clearChildren()
setIndexInternal(0)
(Nil, Nil, Nil, None)
}
// Keep information available until refresh events are dispatched, which must happen soon after this method was
// called
this.refreshInfoOpt =
(updated || oldRepeatIndex != getIndex) option
RefreshInfo(
updated,
if (updated) newIterations else Nil,
if (updated) movedIterationsOldPositions else Nil,
if (updated) movedIterationsNewPositions else Nil,
oldRepeatIndex
)
(newIterations, partialFocusRepeatOption)
}
override def dispatchChangeEvents(): Unit =
refreshInfoOpt foreach { localRefreshInfo ⇒
this.refreshInfoOpt = None
// Dispatch custom event to `xf:repeat` to notify that the nodeset has changed
if (localRefreshInfo.isNodesetChanged)
Dispatch.dispatchEvent(new XXFormsNodesetChangedEvent(this, localRefreshInfo.newIterations,
localRefreshInfo.movedIterationsOldPositions, localRefreshInfo.movedIterationsNewPositions))
// Dispatch custom event to notify that the repeat index has changed
if (localRefreshInfo.oldRepeatIndex != getIndex)
Dispatch.dispatchEvent(new XXFormsIndexChangedEvent(this, localRefreshInfo.oldRepeatIndex, getIndex))
}
private def findItemIndexes(items1: Seq[Item], items2: Seq[Item]) = {
def indexOfItem(otherItem: Item) =
items2 indexWhere (SaxonUtils.compareItems(_, otherItem))
items1 map indexOfItem toArray
}
// Serialize index
override def serializeLocal: ju.Map[String, String] =
ju.Collections.singletonMap("index", Integer.toString(getIndex))
// "4.3.7 The xforms-focus Event [...] Setting focus to a repeat container form control sets the focus to the
// repeat object associated with the repeat index"
override def focusableControls =
if (isRelevant && getIndex > 0)
children(getIndex - 1).focusableControls
else
Iterator.empty
// NOTE: pushBindingImpl ensures that any item we are bound to is relevant
override def computeRelevant = super.computeRelevant && getSize > 0
override def performDefaultAction(event: XFormsEvent) = event match {
case e: XXFormsSetindexEvent ⇒ setIndex(e.index)
case e: XXFormsDndEvent ⇒ doDnD(e)
case _ ⇒ super.performDefaultAction(event)
}
override def buildChildren(
buildTree : (XBLContainer, BindingContext, ElementAnalysis, Seq[Int]) ⇒ Option[XFormsControl],
idSuffix : Seq[Int]
): Unit = {
// Build all children that are not repeat iterations
Controls.buildChildren(
this,
staticControl.children filterNot (_.isInstanceOf[RepeatIterationControl]),
buildTree,
idSuffix
)
// Build one sub-tree per repeat iteration (iteration itself handles its own binding with pushBinding,
// depending on its index/suffix)
val iterationAnalysis = staticControl.iteration.get
for (iterationIndex ← 1 to bindingContext.nodeset.size)
buildTree(container, bindingContext, iterationAnalysis, idSuffix :+ iterationIndex)
// TODO LATER: handle isOptimizeRelevance()
}
}
object XFormsRepeatControl {
class XFormsRepeatControlLocal(var index: Int = -1)
extends ControlLocalSupport.XFormsControlLocal
case class RefreshInfo(
isNodesetChanged: Boolean,
newIterations: Seq[XFormsRepeatIterationControl],
movedIterationsOldPositions: Seq[Int],
movedIterationsNewPositions: Seq[Int],
oldRepeatIndex: Int
)
// Find the initial repeat indexes for the given doc
def initialIndexes(doc: XFormsContainingDocument) =
findIndexes(
doc.getControls.getCurrentControlTree,
doc.getStaticOps.repeats,
_.initialLocal.asInstanceOf[XFormsRepeatControlLocal].index
)
// Find the current repeat indexes for the given doc
def currentIndexes(doc: XFormsContainingDocument) =
findIndexes(doc.getControls.getCurrentControlTree, doc.getStaticOps.repeats, _.getIndex)
// Find the current repeat indexes for the given doc, as a string
def currentNamespacedIndexesString(doc: XFormsContainingDocument) = {
val ns = doc.getContainerNamespace
val repeats =
for ((repeatId, index) ← currentIndexes(doc))
yield ns + repeatId + ' ' + index
repeats mkString ","
}
// For the given control, return the matching control that follows repeat indexes
// This might be the same as the given control if it is within the repeat indexes chain, or another control if not
def findControlFollowIndexes(control: XFormsControl) = {
val doc = control.containingDocument
val tree = doc.getControls.getCurrentControlTree
val ancestorRepeatsFromRoot = control.staticControl.ancestorRepeatsAcrossParts.reverse
// Find just the indexes we need
val indexes = findIndexes(tree, ancestorRepeatsFromRoot, _.getIndex)
// Build a suffix based on the ancestor repeats' current indexes
val suffix = suffixForRepeats(indexes, ancestorRepeatsFromRoot)
tree.findControl(addSuffix(control.prefixedId, suffix))
}
// Return all the controls with the same prefixed id as the control specified
def findAllRepeatedControls(control: XFormsControl): Iterator[XFormsControl] = {
val doc = control.containingDocument
val tree = doc.getControls.getCurrentControlTree
val controlPrefixedId = control.prefixedId
def search(ancestorRepeats: List[RepeatControl], suffix: String): Iterator[String] =
ancestorRepeats match {
case Nil ⇒
Iterator(addSuffix(controlPrefixedId, suffix))
case head :: tail ⇒
val repeatEffectiveId = addSuffix(head.prefixedId, suffix)
val repeatControl =
tree.findRepeatControl(repeatEffectiveId) getOrElse
(throw new IllegalStateException)
for {
index ← Iterator.from(1).take(repeatControl.getSize)
i ← search(tail, suffix + (if (suffix.isEmpty) "" else REPEAT_INDEX_SEPARATOR) + index)
} yield
i
}
search(control.staticControl.ancestorRepeatsAcrossParts.reverse, "") flatMap tree.findControl
}
// Find indexes for the given repeats in the current document
private def findIndexes(tree: ControlTree, repeats: Seq[RepeatControl], index: XFormsRepeatControl ⇒ Int) =
repeats.foldLeft(m.LinkedHashMap[String, Int]()) {
(indexes, repeat) ⇒
// Build the suffix based on all the ancestor repeats' indexes
val suffix = suffixForRepeats(indexes, repeat.ancestorRepeatsAcrossParts.reverse)
// Build the effective id
val effectiveId = addSuffix(repeat.prefixedId, suffix)
// Add the index to the map (0 if the control is not found)
indexes += (repeat.prefixedId → {
tree.findRepeatControl(effectiveId) match {
case Some(control) ⇒ index(control)
case _ ⇒ 0
}
})
}
private def suffixForRepeats(indexes: collection.Map[String, Int], repeats: Seq[RepeatControl]) =
repeats map (repeat ⇒ indexes(repeat.prefixedId)) mkString REPEAT_INDEX_SEPARATOR_STRING
private def addSuffix(prefixedId: String, suffix: String) =
prefixedId + (if (suffix.length > 0) REPEAT_SEPARATOR + suffix else "")
} | brunobuzzi/orbeon-forms | xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/control/controls/XFormsRepeatControl.scala | Scala | lgpl-2.1 | 31,377 |
/* Copyright 2012 Christian Douven
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package almhirt.problem
import scala.language.implicitConversions
import almhirt.common._
import scalaz.Show
import scalaz.Monoid
trait ProblemInstances {
implicit def ToAggregateProblemSemiGroup: Monoid[AggregatedProblem] =
new Monoid[AggregatedProblem] {
override val zero = AggregatedProblem.empty
override def append(a: AggregatedProblem, b: ⇒ AggregatedProblem): AggregatedProblem = {
AggregatedProblem(a.problems ++ b.problems)
}
}
}
| chridou/almhirt | almhirt-common/src/main/scala/almhirt/problem/ProblemInstances.scala | Scala | apache-2.0 | 1,102 |
package lib
import helpers.ServiceHelpers
import io.apibuilder.api.v0.models.{DiffBreaking, DiffNonBreaking}
import io.apibuilder.spec.v0.models.{Attribute, Deprecation, Interface}
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.libs.json.Json
class ServiceInterfaceDiffSpec extends PlaySpec
with GuiceOneAppPerSuite
with ServiceHelpers {
"no change" in {
val svc = makeService(
interfaces = Seq(makeInterface())
)
ServiceDiff(svc, svc).differences must be(Nil)
}
"add/remove interface" in {
val svc1 = makeService()
val svc2 = svc1.copy(
interfaces = Seq(makeInterface(name = "person"))
)
ServiceDiff(svc1, svc2).differences must be(
Seq(
DiffNonBreaking("interface added: person")
)
)
ServiceDiff(svc2, svc1).differences must be(
Seq(
DiffBreaking("interface removed: person")
)
)
}
"change interface" in {
def test(f: Interface => Interface) = {
val interface = makeInterface(
name = "person",
plural = "people",
)
val svc = makeService(
interfaces = Seq(interface),
)
val update = svc.copy(
interfaces = Seq(f(interface)),
)
ServiceDiff(svc, update).differences
}
test(_.copy(plural = "persons")) must be(
Seq(
DiffNonBreaking("interface person plural changed from people to persons")
)
)
test(_.copy(description = Some("test"))) must be(
Seq(
DiffNonBreaking("interface person description added: test")
)
)
test(_.copy(deprecation = Some(Deprecation()))) must be(
Seq(
DiffNonBreaking("interface person deprecated")
)
)
}
"fields" in {
def test(f: Interface => Interface) = {
val interface = makeInterface(
name = "person",
fields = Seq(makeField(name = "id")),
)
val svc = makeService(
interfaces = Seq(interface),
)
val update = svc.copy(
interfaces = Seq(f(interface)),
)
ServiceDiff(svc, update).differences
}
test(_.copy(fields = Nil)) must be(
Seq(
DiffBreaking("interface person field removed: id")
)
)
test { i => i.copy(fields = i.fields ++ Seq(makeField(name = "name", required = false))) } must be(
Seq(
DiffNonBreaking("interface person optional field added: name")
)
)
test { i => i.copy(fields = i.fields ++ Seq(makeField(name = "name", required = false, default = Some("test")))) } must be(
Seq(
DiffNonBreaking("interface person optional field added: name, defaults to test")
)
)
test { i => i.copy(fields = i.fields ++ Seq(makeField(name = "name", required = true))) } must be(
Seq(
DiffBreaking("interface person required field added: name")
)
)
test { i => i.copy(fields = i.fields ++ Seq(makeField(name = "name", required = true, default = Some("test")))) } must be(
Seq(
DiffNonBreaking("interface person required field added: name, defaults to test")
)
)
}
"attributes" in {
def test(f: Seq[Attribute] => Seq[Attribute]) = {
val interface = makeInterface(
name = "person",
attributes = Seq(
makeAttribute(name = "test", value = Json.obj("a" -> "b"))
)
)
val svc = makeService(
interfaces = Seq(interface),
)
val update = svc.copy(
interfaces = Seq(interface.copy(attributes = f(interface.attributes))),
)
ServiceDiff(svc, update).differences
}
test { _ => Nil } must be(
Seq(
DiffNonBreaking("interface person attribute removed: test")
)
)
test { a => a ++ Seq(makeAttribute(name = "foo")) } must be(
Seq(
DiffNonBreaking("interface person attribute added: foo")
)
)
test { a => a.map(_.copy(value = Json.obj())) } must be(
Seq(
DiffNonBreaking("""interface person attribute 'test' value changed from {"a":"b"} to {}""")
)
)
}
}
| gheine/apidoc | api/test/lib/ServiceInterfaceDiffSpec.scala | Scala | mit | 4,128 |
package io.questions.model.questionnaire
import cats.data.NonEmptyList
import cats.syntax.eq._
import io.questions.model.questionnaire.Element.{ Derived, Enumerations, Info, Parent, Primitive }
import io.questions.model.questionnaire.nodekey.{ NodeKey, NodeKeyExtension }
object QuestionnaireNodeChildAppender {
def apply(rootNode: QuestionnaireNode, repeatingParentKey: NodeKey, extension: NodeKeyExtension): Either[String, QuestionnaireNode] =
for {
parent ← rootNode.find(repeatingParentKey)
children ← parent.getRepeatingChildNodes
} yield {
val childTemplate = reduceToTemplate(children.head)
val keyMap = childTemplate.collectKeys.map(k ⇒ (k, k.copy(extension = extension))).toMap
val newChild = replaceKeys(childTemplate, keyMap)
addChild(rootNode, repeatingParentKey, newChild)
}
private def reduceToTemplate(n: QuestionnaireNode): QuestionnaireNode =
n.copy(
element = n.element match {
case p: Parent if p.repeating ⇒ Element.RepeatingParent(reduceToTemplate(p.children.head))
case p: Parent ⇒ p.copy(children = p.children.map(reduceToTemplate))
case p: Primitive ⇒ p
case d: Derived ⇒ d
case d: Info ⇒ d
case d: Enumerations ⇒ d
}
)
.blank
def addChild(current: QuestionnaireNode, repeatingParentKey: NodeKey, child: QuestionnaireNode): QuestionnaireNode =
current.element match {
case p: Parent if current.key === repeatingParentKey ⇒
current.copy(element = p.copy(children = p.children ::: NonEmptyList.of(child)))
case p: Parent ⇒
current.copy(element = p.copy(children = p.children.map(addChild(_, repeatingParentKey, child))))
case _: Primitive ⇒ current
case _: Derived ⇒ current
case _: Info ⇒ current
case _: Enumerations ⇒ current
}
private def replaceKeys(node: QuestionnaireNode, keyMap: Map[NodeKey, NodeKey]): QuestionnaireNode =
node.copy(
key = keyMap(node.key),
element = node.element match {
case p: Parent ⇒ p.copy(children = p.children.map(replaceKeys(_, keyMap)))
case p: Primitive ⇒ p
case d: Derived ⇒ d
case a: Info ⇒ a
case a: Enumerations ⇒ a
}
)
}
| channingwalton/qanda | questionnaire/src/main/scala/io/questions/model/questionnaire/QuestionnaireNodeChildAppender.scala | Scala | mit | 2,425 |
package com.twitter.finagle.service
import com.twitter.finagle.Service
import com.twitter.util.TimeConversions._
import com.twitter.util.{Await, Future, Time}
import org.mockito.Matchers
import org.specs.SpecificationWithJUnit
import org.specs.mock.Mockito
class RateLimitingFilterSpec extends SpecificationWithJUnit with Mockito {
"RateLimitingFilter" should {
def categorize(i: Int) = (i%5).toString
val strategy = new LocalRateLimitingStrategy[Int](categorize, 1.second, 5)
val filter = new RateLimitingFilter[Int, Int](strategy)
val service = mock[Service[Int, Int]]
service.close(any) returns Future.Done
service(Matchers.anyInt) returns Future.value(1)
val rateLimitedService = filter andThen service
"Execute requests below rate limit" in {
var t = Time.now
Time.withTimeFunction(t) { _ =>
(1 to 5) foreach { _ =>
Await.result(rateLimitedService(1)) mustBe 1
t += 100.milliseconds
}
}
}
"Refuse request if rate is above limit" in {
var t = Time.now
Time.withTimeFunction(t) { _ =>
(1 to 5) foreach { _ =>
Await.result(rateLimitedService(1)) mustBe 1
t += 100.milliseconds
}
Await.result(rateLimitedService(1)) must throwA[Exception]
}
}
"Execute different categories of requests and keep a window per category" in {
var t = Time.now
Time.withTimeFunction(t) { _ =>
(1 to 5) foreach { _ =>
(1 to 5) foreach { i => Await.result(rateLimitedService(i)) mustBe 1 }
t += 100.milliseconds
}
}
}
}
}
| firebase/finagle | finagle-core/src/test/scala/com/twitter/finagle/service/RateLimitingFilterSpec.scala | Scala | apache-2.0 | 1,633 |
package unfiltered.request
import java.io.{File => JFile}
trait MultiPartMatcher[T] {
def unapply(req: T): Option[T]
}
/** Multipart file upload utilities should extract data
* using this common format */
case class MultipartData[W](
params: String => Seq[String], files: String => W)
/** Describes an uploaded file, its content type, and
* a means of copying its content to another file */
trait FileWrapper {
val name: String
val contentType: String
def write(out: JFile): Option[JFile]
}
/** Describes some abstract file which exists on
* disk or in memory */
trait AbstractDiskFile extends FileWrapper {
def inMemory: Boolean
def bytes: Array[Byte]
def size: Long
val name: String
val contentType: String
}
/** Describes a file whose content may be written to a stream */
trait AbstractStreamedFile extends FileWrapper {
def stream[T]: (java.io.InputStream => T) => T
}
/** Base trait for disk-based multi part form data extraction */
trait AbstractDiskExtractor[R] {
/** @return the number of bytes to load a file into memory
* before writing to disk */
def memLimit: Int
/** @return the directory to write temp files to */
def tempDir: JFile
/**
* Given a req, extract the multipart form params into a
* (Map[String, Seq[String]], Map[String, Seq[FileItem]], request).
* The Map is assigned a default value of Nil, so param("p") would
* return Nil if there is no such parameter, or (as normal for
* servlets) a single empty string if the parameter was
* supplied without a value. */
def apply(req: R): MultipartData[Seq[AbstractDiskFile]]
}
trait DiskExtractor {
val memLimit = Int.MaxValue
val tempDir = new JFile(".")
}
/** Stream-based multi-part form data extractor */
trait StreamedExtractor[R] {
import java.io.{InputStream => JInputStream}
/**
* Provides extraction similar to MultiPartParams.Disk, except
* the second map will contain Map[String, Seq[StreamedFileWrapper]] rather
* than Map[String, Seq[DiskFileWrapper]].
* @note the seq returned by keys will only return the `first`
* named value. This is based on a limitation on apache commons file upload
* streaming interface. To read from the stream iterator,
* you must read before #next is called or the stream read will fail. */
def apply(req: R): MultipartData[Seq[AbstractStreamedFile]]
def withStreamedFile[T](istm: JInputStream)(f: java.io.InputStream => T): T = {
try { f(istm) } finally { istm.close }
}
}
trait TupleGenerator {
/** generates a tuple of (Map[String, List[A]], Map[String, List[B]]) */
protected def genTuple[A, B, C](iter: Iterator[C])(f: ((Map[String, List[A]], Map[String, List[B]]), C) => (Map[String, List[A]], Map[String, List[B]])) = {
val a: Map[String, List[A]] = Map.empty[String, List[A]].withDefaultValue(Nil)
val b: Map[String, List[B]] = Map.empty[String, List[B]].withDefaultValue(Nil)
iter.foldLeft((a, b))(f)
}
}
| omarkilani/unfiltered | uploads/src/main/scala/request/uploads.scala | Scala | mit | 2,975 |
import scala.reflect.macros.blackbox.Context
object Impls {
def foo(c: Context) = {
import c.universe._
val body = Ident(TermName("IDoNotExist"))
c.Expr[Int](body)
}
}
object Macros {
def foo = macro Impls.foo
} | yusuke2255/dotty | tests/disabled/macro/run/macro-invalidret-nontypeable/Impls_Macros_1.scala | Scala | bsd-3-clause | 231 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.spray.routes
import akka.testkit.TestActorRef
import com.webtrends.harness.component.spray.route.RouteManager
import org.specs2.mutable.SpecificationWithJUnit
import spray.routing.{HttpService, Directives}
import spray.testkit.Specs2RouteTest
/**
* @author Michael Cuthbert on 12/19/14.
*/
class BaseSprayRoutesSpecs extends SpecificationWithJUnit with Directives with Specs2RouteTest with HttpService {
def actorRefFactory = system
val testCommandRef = TestActorRef[BaseTestCommand]
val testActor = testCommandRef.underlyingActor
val testCustomCommandRef = TestActorRef[CustomTestCommand]
val testCustomActor = testCustomCommandRef.underlyingActor
"Test Command" should {
"should handle Get requests using SprayGet" in {
Get("/foo/key1/bar/key2") ~> RouteManager.getRoute("BaseTest_get").get ~> check {
handled must beTrue
}
}
"should not handle Post requests using SprayGet" in {
Post("/foo/key1/bar/key2") ~> RouteManager.getRoute("BaseTest_get").get ~> check {
handled must beFalse
}
}
"should handle Get requests with different keys using SprayGet" in {
Get("/foo/1234/bar/5678") ~> RouteManager.getRoute("BaseTest_get").get ~> check {
handled must beTrue
}
}
"should handle custom requests from Command using SprayCustom" in {
Get("/foo/bar") ~> RouteManager.getRoute("CustomTest_custom").get ~> check {
handled must beTrue
}
}
"should handle Head requests using SprayHead" in {
Head("/foo/key1/bar/kye2") ~> RouteManager.getRoute("BaseTest_head").get ~> check {
handled must beTrue
}
}
"should handle Options requests using SprayOption" in {
Options("/foo/key1/bar/key2") ~> RouteManager.getRoute("BaseTest_options").get ~> check {
handled must beTrue
}
}
"should handle Patch requests using SprayPatch" in {
Patch("/foo/key1/bar/key2") ~> RouteManager.getRoute("BaseTest_patch").get ~> check {
handled must beTrue
}
}
}
}
| mjwallin1/wookiee-spray | src/test/scala/com/webtrends/harness/component/spray/routes/BaseSprayRoutesSpecs.scala | Scala | apache-2.0 | 2,832 |
/*
* Copyright (c) 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package context
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import org.scalaide.debug.internal.expression.Names.Scala
import org.scalaide.debug.internal.expression.proxies.JdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.BooleanJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.PrimitiveJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.NullJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.UnitJdiProxy
import com.sun.jdi.ArrayType
import com.sun.jdi.ClassType
import com.sun.jdi.InterfaceType
import TypeNames._
/**
* Implements `isInstanceOfCheck` method used to mock `isInstanceOf`.
*/
private[context] trait InstanceOf {
self: Proxyfier =>
/**
* Checks if value under proxy conforms to given type.
*
* WARNING - this method is used in reflective compilation.
* If you change its name, package or behavior, make sure to change it also.
*
* @param proxy proxy to check
* @param typeName name of type to check against
* @param BooleanJdiProxy
*/
final def isInstanceOfCheck(proxy: JdiProxy, typeName: String): BooleanJdiProxy =
valueProxy(this.mirrorOf(isInstanceOf(proxy, fixScalaObjectType(typeName)))).asInstanceOf[BooleanJdiProxy]
/**
* Checks if proxy matches given type.
* Handles null, Unit, primitives and delegates everything else to `handleObject`.
*/
private def isInstanceOf(proxy: JdiProxy, typeName: String): Boolean = proxy match {
case _: NullJdiProxy =>
false
case _: UnitJdiProxy =>
typeName == fixScalaPrimitives(Scala.unitType)
case _ if proxy.__type.name == Scala.boxedUnitType =>
typeName == fixScalaPrimitives(Scala.unitType)
case boxedProxy: PrimitiveJdiProxy[_, _, _] =>
val scalaPrimitiveName = fixScalaPrimitives(javaNameToScalaName(boxedProxy.primitiveName))
scalaPrimitiveName == typeName
case other => handleObject(other, typeName)
}
/**
* Checks if proxy matches given type.
* Handles Classes, Interfaces and Arrays (no variance support for now).
*/
private def handleObject(proxy: JdiProxy, typeName: String): Boolean = proxy.__type match {
case array: ArrayType =>
val scalaComponentType = fixScalaPrimitives(javaNameToScalaName(array.componentTypeName))
// TODO add support for variance - this needs some integration with `MethodInvoker.conformsTo`
typeName == Scala.Array(scalaComponentType)
case interface: InterfaceType =>
val parents: Set[String] = (interface +: interface.subinterfaces.asScala)
.map(_.name)(collection.breakOut)
parents.contains(typeName)
case clazz: ClassType =>
val parents: Set[String] = ((clazz +: clazz.allInterfaces.asScala) ++ superclasses(clazz))
.map(_.name)(collection.breakOut)
parents.contains(typeName)
}
private def superclasses(clazz: ClassType): Seq[ClassType] = {
@tailrec def loop(clazz: ClassType, result: Seq[ClassType]): Seq[ClassType] = {
val superclass = clazz.superclass
if (superclass == null) result
else loop(superclass, result :+ superclass)
}
loop(clazz, Seq.empty)
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/context/InstanceOf.scala | Scala | bsd-3-clause | 3,325 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.