code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package breeze.linalg.operators
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.generic.{MMRegistry2, Multiproc2}
/**
* This is the capability trait for operations of the form a += b, a -= b, etc.
* These traits are usually implemented in (a supertype of) the companion object of
* one of the operands.
* @tparam A
* @tparam B
* @tparam Op
*/
trait BinaryUpdateOp[A, B, Op<:OpType] {
def apply(a: A, b: B)
}
object BinaryUpdateOp {
/** Just a magic type lambda to make registries happy. */
type Bind[Op <:OpType] = { type Sig[A, B] = BinaryUpdateOp[A, B, Op]}
}
/**
* This is a special kind of BinaryUpdateOp that supports registration
* of specialized implementations for a given operation.
* @author dlwh
*/
// This trait could reuse code from Multimethod2, but not doing so allows us to reduce code size a lot
// because we don't need BinaryOp's to inherit from Function2, which has a lot of @specialzied cruft.
trait BinaryUpdateRegistry[A<:AnyRef, B, Op<:OpType] extends BinaryUpdateOp[A, B, Op] with MMRegistry2[BinaryUpdateOp[_ <: A, _ <: B, Op]] {
protected def bindingMissing(a: A, b: B):Unit = throw new UnsupportedOperationException("Types not found!" + a + b + " " + ops)
protected def multipleOptions(a: A, b: B, m: Map[(Class[_],Class[_]),BinaryUpdateOp[_ <: A, _ <: B, Op]]):Unit = {
throw new RuntimeException("Multiple bindings for method: " + m)
}
def apply(a: A, b: B) {
val ac = a.asInstanceOf[AnyRef].getClass
val bc = b.asInstanceOf[AnyRef].getClass
val cached = cache.get(ac -> bc)
if(cached != null) {
cached match {
case None => bindingMissing(a, b)
case Some(m) =>
m.asInstanceOf[BinaryUpdateOp[A, B, Op]].apply(a, b)
}
} else {
val options = resolve(ac, bc.asInstanceOf[Class[_<:B]])
options.size match {
case 0 =>
cache.put(ac -> bc, None)
bindingMissing(a, b)
case 1 =>
val method = options.values.head
cache.put(ac -> bc, Some(method))
method.asInstanceOf[BinaryUpdateOp[A, B, Op]].apply(a, b)
case _ =>
val selected = selectBestOption(options)
if(selected.size != 1)
multipleOptions(a, b, options)
else {
val method = selected.values.head
cache.put(ac -> bc, Some(method))
method.asInstanceOf[BinaryUpdateOp[A, B, Op]].apply(a, b)
}
}
}
}
def register[AA<:A, BB<:B](op: BinaryUpdateOp[AA, BB, Op])(implicit manA: Manifest[AA], manB: Manifest[BB]) {
super.register(manA.runtimeClass, manB.runtimeClass, op)
}
}
| ktakagaki/breeze | src/main/scala/breeze/linalg/operators/BinaryUpdateOp.scala | Scala | apache-2.0 | 3,160 |
package com.github.mrpowers.spark.daria.sql.types
import org.apache.spark.sql.types.StructField
object StructFieldHelpers {
def customEquals(s1: StructField, s2: StructField, ignoreNullable: Boolean = false): Boolean = {
if (ignoreNullable) {
s1.name == s2.name &&
s1.dataType == s2.dataType
} else {
s1.name == s2.name &&
s1.dataType == s2.dataType &&
s1.nullable == s2.nullable
}
}
def prettyFormat(sf: StructField): String = {
s"""StructField("${sf.name}", ${sf.dataType}, ${sf.nullable})"""
}
}
| MrPowers/spark-daria | src/main/scala/com/github/mrpowers/spark/daria/sql/types/StructFieldHelpers.scala | Scala | mit | 558 |
package forms
import com.google.inject.Inject
import models.LoginInfo
import play.api.data.Forms._
import play.api.data._
import services.hash.PasswordHasher
import services.user.UserDAO
class LoginForm @Inject()(userDAO: UserDAO,
passwordHasher: PasswordHasher) {
val form = Form(
mapping(
"username" -> text,
"password" -> text
)(LoginInfo.apply)(LoginInfo.unapply) verifying("Wrong username or password", fields => fields match {
case loginInfo => verifyLoginInfo(loginInfo)
})
)
def verifyLoginInfo(loginInfo: LoginInfo): Boolean = {
userDAO.retrieveByName(loginInfo.username) match {
case None => false
case Some(user) => passwordHasher.check(user.password, loginInfo.password)
}
}
}
| GMadorell/play-jwt | app/forms/LoginForm.scala | Scala | mit | 777 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate
import scala.collection.immutable.SortedMap
import com.rbmhtechnology.eventuate.EventsourcedView.Handler
import scala.util._
private[eventuate] object PersistOnEvent {
/**
* Records a `persistOnEvent` invocation.
*/
case class PersistOnEventInvocation(event: Any, customDestinationAggregateIds: Set[String])
/**
* A request sent by [[PersistOnEvent]] instances to `self` in order to persist events recorded by `invocations`.
* @param persistOnEventSequenceNr the sequence number of the event that caused this request.
* @param persistOnEventId [[EventId]] of the event that caused this request. This is optional for backwards
* compatibility, as old snapshots might contain `PersistOnEventRequest`s
* without this field being defined.
*/
case class PersistOnEventRequest(persistOnEventSequenceNr: Long, persistOnEventId: Option[EventId], invocations: Vector[PersistOnEventInvocation], instanceId: Int)
/**
* Default `persist` handler to use when processing [[PersistOnEventRequest]]s in [[EventsourcedActor]].
*/
val DefaultHandler: Handler[Any] = {
case Success(_) =>
case Failure(e) => throw new PersistOnEventException(e)
}
}
/**
* Thrown to indicate that an asynchronous `persisOnEvent` operation failed.
*/
class PersistOnEventException(cause: Throwable) extends RuntimeException(cause)
/**
* Can be mixed into [[EventsourcedActor]] for writing new events within the `onEvent` handler. New events are
* written with the asynchronous [[persistOnEvent]] method. In contrast to [[EventsourcedActor.persist persist]],
* one can '''not''' prevent command processing from running concurrently to [[persistOnEvent]] by setting
* [[EventsourcedActor.stateSync stateSync]] to `true`.
*
* A `persistOnEvent` operation is reliable and idempotent. Once the event has been successfully written, a repeated
* `persistOnEvent` call for that event during event replay has no effect. A failed `persistOnEvent` operation will
* restart the actor by throwing a [[PersistOnEventException]]. After restart, failed `persistOnEvent` operations
* are automatically re-tried.
*/
trait PersistOnEvent extends EventsourcedActor {
import PersistOnEvent._
private var invocations: Vector[PersistOnEventInvocation] = Vector.empty
/**
* [[PersistOnEventRequest]] by sequence number of the event that caused the persist on event request.
*
* This map keeps the requests in the order they were submitted.
*/
private var requestsBySequenceNr: SortedMap[Long, PersistOnEventRequest] = SortedMap.empty
/**
* [[PersistOnEventRequest]] by [[EventId]] of the event that caused the persist on event request.
*
* This map ensures that requests can be confirmed properly even if the sequence number of the event
* that caused the request changed its local sequence number due to a disaster recovery.
*
* @see https://github.com/RBMHTechnology/eventuate/issues/385
*/
private var requestsByEventId: Map[EventId, PersistOnEventRequest] = Map.empty
/**
* Asynchronously persists the given `event`. Applications that want to handle the persisted event should define
* the event handler at that event. By default, the event is routed to event-sourced destinations with an undefined
* `aggregateId`. If this actor's `aggregateId` is defined it is additionally routed to all actors with the same
* `aggregateId`. Further routing destinations can be defined with the `customDestinationAggregateIds` parameter.
*/
final def persistOnEvent[A](event: A, customDestinationAggregateIds: Set[String] = Set()): Unit =
invocations = invocations :+ PersistOnEventInvocation(event, customDestinationAggregateIds)
/**
* Internal API.
*/
override private[eventuate] def receiveEvent(event: DurableEvent): Unit = {
super.receiveEvent(event)
if (event.emitterId == id) findPersistOnEventRequest(event).foreach(confirmRequest)
if (invocations.nonEmpty) {
deliverRequest(PersistOnEventRequest(lastSequenceNr, Some(lastHandledEvent.id), invocations, instanceId))
invocations = Vector.empty
}
}
/**
* Internal API.
*/
override private[eventuate] def snapshotCaptured(snapshot: Snapshot): Snapshot = {
requestsBySequenceNr.values.foldLeft(super.snapshotCaptured(snapshot)) {
case (s, pr) => s.addPersistOnEventRequest(pr)
}
}
/**
* Internal API.
*/
override private[eventuate] def snapshotLoaded(snapshot: Snapshot): Unit = {
super.snapshotLoaded(snapshot)
snapshot.persistOnEventRequests.foreach { pr =>
val requestWithUpdatedInstanceId = pr.copy(instanceId = instanceId)
requestsBySequenceNr += (pr.persistOnEventSequenceNr -> requestWithUpdatedInstanceId)
pr.persistOnEventId.foreach(requestsByEventId += _ -> requestWithUpdatedInstanceId)
}
}
/**
* Internal API.
*/
private[eventuate] override def recovered(): Unit = {
super.recovered()
redeliverUnconfirmedRequests()
}
/**
* Internal API.
*/
private[eventuate] def unconfirmedRequests: Set[Long] =
requestsBySequenceNr.keySet
private def deliverRequest(request: PersistOnEventRequest): Unit = {
requestsBySequenceNr += request.persistOnEventSequenceNr -> request
request.persistOnEventId.foreach(requestsByEventId += _ -> request)
if (!recovering) self ! request
}
private def confirmRequest(request: PersistOnEventRequest): Unit = {
request.persistOnEventId.foreach(requestsByEventId -= _)
requestsBySequenceNr -= request.persistOnEventSequenceNr
}
private def findPersistOnEventRequest(event: DurableEvent) =
event
.persistOnEventId.flatMap(requestsByEventId.get)
// Fallback for old events that have no persistOnEventId
.orElse(event.persistOnEventSequenceNr.flatMap(requestsBySequenceNr.get))
private def redeliverUnconfirmedRequests(): Unit = requestsBySequenceNr.foreach {
case (_, request) => self ! request
}
}
| RBMHTechnology/eventuate | eventuate-core/src/main/scala/com/rbmhtechnology/eventuate/PersistOnEvent.scala | Scala | apache-2.0 | 6,724 |
package actorthread.v2
import akka.actor.ActorLogging
import akka.actor.Actor
import akka.actor.ActorRef
object Forwarder {
case class Ping(id: Int, origin: ActorRef)
}
class Forwarder(returner: ActorRef) extends Actor with ActorLogging {
import Forwarder._
def receive: Receive = {
case p: Ping =>
log.info(s"Received $p")
returner ! Returner.Pang(p.id, p.origin)
}
} | sebastian-dasse/uni-scala | ScalaKurs(Knabe)_S_sbt/src/main/scala/actorthread/v2/Forwarder.scala | Scala | mit | 403 |
package breeze.linalg.functions
import breeze.linalg.{DenseVector, SparseVector, argsort, argtopk}
import org.scalacheck.Prop
import org.scalatest.FunSuite
import org.scalatest.prop.Checkers
/**
* Created by dlwh on 9/18/15.
*/
class argsortTest extends FunSuite with Checkers {
test("argsort dv") {
check(Prop.forAll{ (array: Array[Double]) =>
val ax = argsort(new DenseVector(array))
ax.toIndexedSeq.map(array) == array.sorted.toIndexedSeq
})
}
}
class argtopkTest extends FunSuite {
test("argtopk vector") {
val dv = DenseVector(2, 0, 3, 2, -1)
assert(argtopk(dv, 0) === Seq.empty)
assert(argtopk(dv, 1) === Seq(2))
assert(argtopk(dv, 3).toSet === Set(0, 2, 3))
assert(argtopk(dv, 5).toSet === Set(0, 1, 2, 3, 4))
val sv = SparseVector(5)(0 -> 2, 2-> 3, 3-> 2)
assert(argtopk(sv, 0) === Seq.empty)
assert(argtopk(sv, 1) === Seq(2))
assert(argtopk(sv, 3).toSet === Set(0, 2, 3))
assert(argtopk(sv, 5).toSet === Set(0, 1, 2, 3, 4))
}
}
| crealytics/breeze | math/src/test/scala/breeze/linalg/functions/argsortTest.scala | Scala | apache-2.0 | 1,013 |
package edu.gemini.model.p1.targetio
import api._
import uk.ac.starlink.table.{TableBuilder, StarTableWriter}
import uk.ac.starlink.fits.{FitsTableBuilder, FitsTableWriter}
import uk.ac.starlink.table.formats._
import uk.ac.starlink.votable.{VOTableBuilder, VOTableWriter}
import edu.gemini.model.p1.targetio.api.FileType._
package object table {
def stilWriter(ftype: FileType): StarTableWriter =
ftype match {
case Csv => new CsvTableWriter()
case Fits => new FitsTableWriter()
case Tst => new TstTableWriter()
case Vo => new VOTableWriter()
}
def stilBuilder(ftype: FileType): TableBuilder =
ftype match {
case Csv => new CsvTableBuilder()
case Fits => new FitsTableBuilder()
case Tst => new TstTableBuilder()
case Vo => new VOTableBuilder()
}
} | arturog8m/ocs | bundle/edu.gemini.model.p1.targetio/src/main/scala/edu/gemini/model/p1/targetio/table/package.scala | Scala | bsd-3-clause | 836 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.io.events
import org.platanios.tensorflow.proto.Event
import org.junit.{Rule, Test}
import org.junit.rules.TemporaryFolder
import org.scalatestplus.junit.JUnitSuite
import java.nio.file.{Files, Path, StandardOpenOption}
/**
* @author Emmanouil Antonios Platanios
*/
class EventFileReaderSuite extends JUnitSuite {
private[this] val record: Array[Byte] = Array(
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xa3, 0x7f, 0x4b, 0x22, 0x09, 0x00, 0x00, 0xc0,
0x25, 0xdd, 0x75, 0xd5, 0x41, 0x1a, 0x0d, 0x62,
0x72, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x76, 0x65,
0x6e, 0x74, 0x3a, 0x31, 0xec, 0xf3, 0x32, 0x8d).map(_.toByte)
private[this] val _tempFolder: TemporaryFolder = new TemporaryFolder
@Rule def tempFolder: TemporaryFolder = _tempFolder
private[this] def writeToFile(filePath: Path, data: Array[Byte]): Unit = {
Files.write(filePath, data, StandardOpenOption.APPEND)
}
@Test def testEmptyEventFile(): Unit = {
val filePath = tempFolder.newFile().toPath
writeToFile(filePath, Array.empty[Byte])
val reader = EventFileReader(filePath)
assert(reader.load().toSeq === Seq.empty[Event])
}
@Test def testSingleWrite(): Unit = {
val filePath = tempFolder.newFile().toPath
writeToFile(filePath, record)
val reader = EventFileReader(filePath)
val events = reader.load().toSeq
assert(events.size === 1)
assert(events.head.getWallTime === 1440183447.0)
}
@Test def testMultipleWrites(): Unit = {
val filePath = tempFolder.newFile().toPath
writeToFile(filePath, record)
val reader = EventFileReader(filePath)
assert(reader.load().toSeq.size === 1)
writeToFile(filePath, record)
assert(reader.load().toSeq.size === 1)
}
@Test def testMultipleLoads(): Unit = {
val filePath = tempFolder.newFile().toPath
writeToFile(filePath, record)
val reader = EventFileReader(filePath)
reader.load()
reader.load()
assert(reader.load().toSeq.size === 1)
}
@Test def testMultipleWritesAtOnce(): Unit = {
val filePath = tempFolder.newFile().toPath
writeToFile(filePath, record)
writeToFile(filePath, record)
val reader = EventFileReader(filePath)
assert(reader.load().toSeq.size === 2)
}
@Test def testMultipleWritesWithBadWrite(): Unit = {
val filePath = tempFolder.newFile().toPath
writeToFile(filePath, record)
writeToFile(filePath, record)
// Test that we ignore partial record writes at the end of the file.
writeToFile(filePath, Array(1, 2, 3).map(_.toByte))
val reader = EventFileReader(filePath)
assert(reader.load().toSeq.size === 2)
}
}
| eaplatanios/tensorflow_scala | modules/api/src/test/scala/org/platanios/tensorflow/api/io/events/EventFileReaderSuite.scala | Scala | apache-2.0 | 3,300 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import org.apache.kafka.common.config.ConfigException
import org.junit.{After, Before, Ignore, Test}
import scala.util.Random
import org.apache.log4j.{Level, Logger}
import java.util.Properties
import java.util.concurrent.ExecutionException
import kafka.consumer.{Consumer, ConsumerConfig}
import kafka.serializer.StringDecoder
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.CoreUtils
import kafka.utils.TestUtils._
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.errors.TimeoutException
import org.junit.Assert._
class UncleanLeaderElectionTest extends ZooKeeperTestHarness {
val brokerId1 = 0
val brokerId2 = 1
// controlled shutdown is needed for these tests, but we can trim the retry count and backoff interval to
// reduce test execution time
val enableControlledShutdown = true
var configProps1: Properties = null
var configProps2: Properties = null
var configs: Seq[KafkaConfig] = Seq.empty[KafkaConfig]
var servers: Seq[KafkaServer] = Seq.empty[KafkaServer]
val random = new Random()
val topic = "topic" + random.nextLong
val partitionId = 0
val kafkaApisLogger = Logger.getLogger(classOf[kafka.server.KafkaApis])
val networkProcessorLogger = Logger.getLogger(classOf[kafka.network.Processor])
val syncProducerLogger = Logger.getLogger(classOf[kafka.producer.SyncProducer])
val eventHandlerLogger = Logger.getLogger(classOf[kafka.producer.async.DefaultEventHandler[Object, Object]])
@Before
override def setUp() {
super.setUp()
configProps1 = createBrokerConfig(brokerId1, zkConnect)
configProps2 = createBrokerConfig(brokerId2, zkConnect)
for (configProps <- List(configProps1, configProps2)) {
configProps.put("controlled.shutdown.enable", enableControlledShutdown.toString)
configProps.put("controlled.shutdown.max.retries", "1")
configProps.put("controlled.shutdown.retry.backoff.ms", "1000")
}
// temporarily set loggers to a higher level so that tests run quietly
kafkaApisLogger.setLevel(Level.FATAL)
networkProcessorLogger.setLevel(Level.FATAL)
syncProducerLogger.setLevel(Level.FATAL)
eventHandlerLogger.setLevel(Level.FATAL)
}
@After
override def tearDown() {
servers.foreach(server => shutdownServer(server))
servers.foreach(server => CoreUtils.delete(server.config.logDirs))
// restore log levels
kafkaApisLogger.setLevel(Level.ERROR)
networkProcessorLogger.setLevel(Level.ERROR)
syncProducerLogger.setLevel(Level.ERROR)
eventHandlerLogger.setLevel(Level.ERROR)
super.tearDown()
}
private def startBrokers(cluster: Seq[Properties]) {
for (props <- cluster) {
val config = KafkaConfig.fromProps(props)
val server = createServer(config)
configs ++= List(config)
servers ++= List(server)
}
}
@Test
def testUncleanLeaderElectionEnabled(): Unit = {
// enable unclean leader election
configProps1.put("unclean.leader.election.enable", "true")
configProps2.put("unclean.leader.election.enable", "true")
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, Map(partitionId -> Seq(brokerId1, brokerId2)))
verifyUncleanLeaderElectionEnabled
}
@Test
@Ignore // Should be re-enabled after KAFKA-3096 is fixed
def testUncleanLeaderElectionDisabled(): Unit = {
// unclean leader election is disabled by default
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, Map(partitionId -> Seq(brokerId1, brokerId2)))
verifyUncleanLeaderElectionDisabled
}
@Test
def testUncleanLeaderElectionEnabledByTopicOverride(): Unit = {
// disable unclean leader election globally, but enable for our specific test topic
configProps1.put("unclean.leader.election.enable", "false")
configProps2.put("unclean.leader.election.enable", "false")
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election enabled
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "true")
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, Map(partitionId -> Seq(brokerId1, brokerId2)),
topicProps)
verifyUncleanLeaderElectionEnabled
}
@Test
@Ignore // Should be re-enabled after KAFKA-3096 is fixed
def testCleanLeaderElectionDisabledByTopicOverride(): Unit = {
// enable unclean leader election globally, but disable for our specific test topic
configProps1.put("unclean.leader.election.enable", "true")
configProps2.put("unclean.leader.election.enable", "true")
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election disabled
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "false")
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, Map(partitionId -> Seq(brokerId1, brokerId2)),
topicProps)
verifyUncleanLeaderElectionDisabled
}
@Test
def testUncleanLeaderElectionInvalidTopicOverride(): Unit = {
startBrokers(Seq(configProps1))
// create topic with an invalid value for unclean leader election
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "invalid")
intercept[ConfigException] {
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, Map(partitionId -> Seq(brokerId1)), topicProps)
}
}
def verifyUncleanLeaderElectionEnabled(): Unit = {
// wait until leader is elected
val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
debug("Leader for " + topic + " is elected to be: %s".format(leaderId))
assertTrue("Leader id is set to expected value for topic: " + topic, leaderId == brokerId1 || leaderId == brokerId2)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
debug("Follower for " + topic + " is: %s".format(followerId))
produceMessage(servers, topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server))
produceMessage(servers, topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic))
//remove any previous unclean election metric
servers.map(server => server.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec"))
// shutdown leader and then restart follower
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
val followerServer = servers.find(_.config.brokerId == followerId).get
followerServer.startup()
// wait until new leader is (uncleanly) elected
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
assertEquals(1, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count())
produceMessage(servers, topic, "third")
// second message was lost due to unclean election
assertEquals(List("first", "third"), consumeAllMessages(topic))
}
def verifyUncleanLeaderElectionDisabled(): Unit = {
// wait until leader is elected
val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
debug("Leader for " + topic + " is elected to be: %s".format(leaderId))
assertTrue("Leader id is set to expected value for topic: " + topic, leaderId == brokerId1 || leaderId == brokerId2)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
debug("Follower for " + topic + " is: %s".format(followerId))
produceMessage(servers, topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server))
produceMessage(servers, topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic))
//remove any previous unclean election metric
servers.map(server => server.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec"))
// shutdown leader and then restart follower
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
val followerServer = servers.find(_.config.brokerId == followerId).get
followerServer.startup()
// verify that unclean election to non-ISR follower does not occur
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(-1))
assertEquals(0, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count())
// message production and consumption should both fail while leader is down
try {
produceMessage(servers, topic, "third")
fail("Message produced while leader is down should fail, but it succeeded")
} catch {
case e: ExecutionException if e.getCause.isInstanceOf[TimeoutException] => // expected
}
assertEquals(List.empty[String], consumeAllMessages(topic))
// restart leader temporarily to send a successfully replicated message
servers.filter(server => server.config.brokerId == leaderId).map(server => server.startup())
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(leaderId))
produceMessage(servers, topic, "third")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
// verify clean leader transition to ISR follower
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
// verify messages can be consumed from ISR follower that was just promoted to leader
assertEquals(List("first", "second", "third"), consumeAllMessages(topic))
}
private def shutdownServer(server: KafkaServer) = {
server.shutdown()
server.awaitShutdown()
}
private def consumeAllMessages(topic: String) : List[String] = {
// use a fresh consumer group every time so that we don't need to mess with disabling auto-commit or
// resetting the ZK offset
val consumerProps = createConsumerProperties(zkConnect, "group" + random.nextLong, "id", 1000)
val consumerConnector = Consumer.create(new ConsumerConfig(consumerProps))
val messageStream = consumerConnector.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder())
val messages = getMessages(messageStream)
consumerConnector.shutdown
messages
}
}
| sebadiaz/kafka | core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala | Scala | apache-2.0 | 12,036 |
package scalarank.datapoint
import org.nd4j.linalg.api.ndarray.INDArray
/**
* A data point, this is typically a feature vector containing query-document features
*/
abstract class Datapoint {
/**
* The features as a dense vector
*/
val features: INDArray
}
/**
* For labeling data points with relevance
*/
trait Relevance {
/**
* The relevance of the data point. Typically higher means more relevant.
*/
val relevance: Double
}
| rjagerman/scalarank | src/main/scala/scalarank/datapoint/Datapoint.scala | Scala | mit | 468 |
package org.bitcoins.explorer.model
import org.bitcoins.crypto.SchnorrPublicKey
case class Oracle(pubkey: SchnorrPublicKey, oracleName: String)
| bitcoin-s/bitcoin-s | oracle-explorer-client/src/main/scala/org/bitcoins/explorer/model/Oracle.scala | Scala | mit | 146 |
/*
* Copyright 2014 DataGenerator Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.finra.datagenerator.common.SocialNetwork_Example.scala
import org.finra.datagenerator.common.Graph.Node
import org.finra.datagenerator.common.Helpers.RandomHelper
import org.finra.datagenerator.common.NodeData._
import scala.beans.BeanProperty
import scala.collection.mutable.ListBuffer
/**
* Description: Defines all user state and transition probability information.
* Each type is defined and mapped to a set of predicates determining the allowable parent and child types and whether or not to create them,
* as well as the actual methods and business logic to create the parent/child states for each allowable state transition (edge/link).
*/
class UserTypes extends NodeDataTypes[User, UserStub, UserType.UserType, UserTypes] {
def allInitialDataTypes: collection.immutable.HashSet[UserType.UserType] = {
collection.immutable.HashSet[UserType.UserType](UserType.Admin)
}
def allDataTypes: collection.immutable.HashSet[UserType.UserType] = {
collection.immutable.HashSet[UserType.UserType](UserType.Admin, UserType.SocialNetworkEmployee, UserType.PublicUser)
}
def dataTransitions: UserTransitions.type = UserTransitions
}
import NodeDataType.NodeDataType
object UserType {
abstract class UserType extends NodeDataType[User, UserStub, UserTypes, UserType] {
@BeanProperty def nodeDataTypes: UserTypes = new UserTypes()
def asStub: UserStub = new UserStub(this)
// We don't have any engines that use these two methods yet, but it might be useful at some point.
override def probabilisticallyLinkToExistingParentDataNode(dataNode: Node[User]): Unit = {}
override def probabilisticallyLinkToExistingParentStubNode(stubNode: Node[UserStub]): Unit = {}
}
// Admin can friend request Admin, SocialNetworkEmployee, and PublicUser
// SocialNetworkEmployee can friend request SocialNetworkEmployee and PublicUser
// PublicUser can friend request PublicUser
case object Admin extends UserType {
override def dataType: NodeDataType[User, UserStub, UserTypes, UserType] = UserType.Admin
override val name = "Admin"
override def getAllowableChildTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
nodeDataTypes.allDataTypes.toSeq
}
override def getAllowableParentTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.Admin)
}
override def childStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.Admin, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.07)),
(UserType.SocialNetworkEmployee, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.1)),
(UserType.PublicUser, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.15))
)
}
override def parentStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.Admin, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier * 0.07))
)
}
}
case object SocialNetworkEmployee extends UserType {
override def dataType: NodeDataType[User, UserStub, UserTypes, UserType] = UserType.Admin
override val name = "SocialNetworkEmployee"
override def getAllowableChildTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.SocialNetworkEmployee, UserType.PublicUser)
}
override def getAllowableParentTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.Admin, UserType.SocialNetworkEmployee)
}
override def childStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.SocialNetworkEmployee, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.25)),
(UserType.PublicUser, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.30))
)
}
override def parentStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.Admin, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.03)),
(UserType.SocialNetworkEmployee, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.25))
)
}
}
case object PublicUser extends UserType {
override def dataType: NodeDataType[User, UserStub, UserTypes, UserType] = UserType.Admin
override val name = "PublicUser"
override def getAllowableChildTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
Seq[UserType.UserType](UserType.PublicUser)
}
override def getAllowableParentTypes(nodeOfThisType: Node[UserStub]): Seq[UserType.UserType] = {
nodeDataTypes.allDataTypes.toSeq
}
override def childStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))] = {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.PublicUser, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.35))
)
}
override def parentStateTransitionPredicates[T_DisplayableData <: DisplayableData](
node: Node[T_DisplayableData], maxToGenerate: Int, probabilityMultiplier: Int)
: ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))]= {
ListBuffer[(UserType.UserType, (Node[T_DisplayableData] => Boolean))](
(UserType.Admin, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.01)),
(UserType.SocialNetworkEmployee, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.02)),
(UserType.PublicUser, (sourceEventNode: Node[T_DisplayableData]) => RandomHelper.evaluateProbability(probabilityMultiplier*0.35))
)
}
}
}
| Brijeshrpatel9/SingleThreaderProcessingDG | dg-common/src/main/code/org/finra/datagenerator/common/SocialNetwork_Example/scala/UserType.scala | Scala | apache-2.0 | 7,966 |
package breeze.stats.distributions;
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalatest._;
import org.scalatest.junit._;
import org.scalatest.prop._;
import org.scalacheck._;
import org.junit.runner.RunWith
import breeze.stats.DescriptiveStats._;
import breeze.linalg.{DenseVector, DenseMatrix, norm}
@RunWith(classOf[JUnitRunner])
class BetaTest extends FunSuite with Checkers with MomentsTestBase[Double] /*with ExpFamTest[Beta,Double]*/ {
val expFam = Beta
import Arbitrary.arbitrary;
def arbParameter = Arbitrary{
for( mean <- arbitrary[Double].map{x => math.abs(x) % 100.0 + 1E-4};
std <- arbitrary[Double].map{x => math.abs(x) % 100 + 1E-4}
) yield (mean,std)
}
def paramsClose(p: (Double,Double), b: (Double,Double)) = {
val y1 = (p._1 - b._1).abs / (p._1.abs / 2 + b._1.abs / 2+ 1) < 1E-1
val y2 = (p._2 - b._2).abs / (p._2.abs / 2 + b._2.abs / 2+ 1) < 1E-1
y1 && y2
}
import Arbitrary.arbitrary;
def asDouble(x: Double) = x
def fromDouble(x: Double) = x
implicit def arbDistr = Arbitrary {
for(a <- arbitrary[Double].map{x => math.abs(x) % 10000.0 + 1.1};
b <- arbitrary[Double].map {x => math.abs(x) % 8.0 + 1.1}) yield new Beta(a,b);
}
test("#15 test 1: Small a and b") {
val a = 0.0014364182264741652
val b = 0.0024709345620239687
val n = 1000000
val samples = new Beta(a,b).sample(n)
val mean = samples.sum / n
val true_mean = a / (a+b)
assert(math.abs(mean - true_mean) < 1e-2, (mean, true_mean))
}
test("#15 test 2: Smaller a and b") {
val a = 7.672385302336129E-4
val b = 0.5028709732819038
val n = 100000
val samples = new Beta(a,b).sample(n)
val mean = samples.sum / n
val true_mean = a / (a+b)
assert(math.abs(mean - true_mean) < 1e-2, (mean, true_mean))
}
test("endpoints work") {
val dist = new Beta(1,2)
assert(dist.pdf(0) == 2)
assert(dist.pdf(1) == 0)
}
test("Beta.pdf works as a ufunc") {
val M = 100
val x = DenseVector.zeros[Double](M)
val expectedResult = DenseVector.zeros[Double](M)
var i=0
while (i < M) {
x(i) = (1.0/M)*i
expectedResult(i) = 2.0 - 2.0*x(i)
i += 1
}
val d = new Beta(1,2)
assert(norm(d.pdf(x) - expectedResult) < 1e-8)
}
}
| wavelets/breeze | src/test/scala/breeze/stats/distributions/Beta.scala | Scala | apache-2.0 | 2,831 |
package com.airbnb.common.ml.xgboost.data
import org.apache.spark.sql.Row
trait ScoringModelData extends Serializable {
def parseRowToXgboostLabeledPointAndData(row: Row): ScoringLabeledPoint
}
| airbnb/aerosolve | airlearner/airlearner-xgboost/src/main/scala/com/airbnb/common/ml/xgboost/data/ScoringModelData.scala | Scala | apache-2.0 | 199 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import java.util
import java.util.List
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{Partition, SparkContext}
import org.apache.spark.sql.execution.command.CarbonMergerMapping
import org.apache.carbondata.core.datastore.block.{Distributable, TableBlockInfo}
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit}
import org.apache.carbondata.hadoop.api.CarbonTableInputFormat
import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
import org.apache.carbondata.processing.merger.CarbonDataMergerUtil
import org.apache.carbondata.processing.model.CarbonLoadModel
import org.apache.carbondata.spark.MergeResult
/**
* IUD carbon merger RDD
* */
class CarbonIUDMergerRDD[K, V](
sc: SparkContext,
result: MergeResult[K, V],
carbonLoadModel: CarbonLoadModel,
carbonMergerMapping: CarbonMergerMapping,
confExecutorsTemp: String)
extends CarbonMergerRDD[K, V](sc,
result,
carbonLoadModel,
carbonMergerMapping,
confExecutorsTemp) {
override def getPartitions: Array[Partition] = {
val startTime = System.currentTimeMillis()
val absoluteTableIdentifier: AbsoluteTableIdentifier = new AbsoluteTableIdentifier(
hdfsStoreLocation, new CarbonTableIdentifier(databaseName, factTableName, tableId)
)
val jobConf: JobConf = new JobConf(new Configuration)
val job: Job = new Job(jobConf)
val format = CarbonInputFormatUtil.createCarbonInputFormat(absoluteTableIdentifier, job)
var defaultParallelism = sparkContext.defaultParallelism
val result = new util.ArrayList[Partition](defaultParallelism)
// mapping of the node and block list.
var nodeMapping: util.Map[String, util.List[Distributable]] = new
util.HashMap[String, util.List[Distributable]]
var noOfBlocks = 0
val taskInfoList = new util.ArrayList[Distributable]
var blocksOfLastSegment: List[TableBlockInfo] = null
CarbonTableInputFormat.setSegmentsToAccess(
job.getConfiguration, carbonMergerMapping.validSegments.toList.asJava)
// get splits
val splits = format.getSplits(job)
val carbonInputSplits = splits.asScala.map(_.asInstanceOf[CarbonInputSplit])
// group blocks by segment.
val splitsGroupedMySegment = carbonInputSplits.groupBy(_.getSegmentId)
var i = -1
// No need to get a new SegmentUpdateStatus Manager as the Object is passed
// in CarbonLoadModel.
// val manager = new SegmentUpdateStatusManager(absoluteTableIdentifier)
val updateStatusManager = carbonLoadModel.getSegmentUpdateStatusManager
// make one spark partition for one segment
val resultSplits = splitsGroupedMySegment.map(entry => {
val (segName, splits) = (entry._1, entry._2)
val invalidBlocks = updateStatusManager.getInvalidBlockList(segName)
val validSplits = splits.filter( inputSplit =>
CarbonDataMergerUtil
.checkUpdateDeltaMatchBlock(segName, inputSplit.getBlockPath, updateStatusManager)
)
if (!validSplits.isEmpty) {
val locations = validSplits(0).getLocations
i += 1
new CarbonSparkPartition(id, i,
new CarbonMultiBlockSplit(absoluteTableIdentifier, validSplits.asJava, locations))
}
else {
null
}
}
).filter( _ != null)
// max segment cardinality is calculated in executor for each segment
carbonMergerMapping.maxSegmentColCardinality = null
carbonMergerMapping.maxSegmentColumnSchemaList = null
// Log the distribution
val noOfTasks = resultSplits.size
logInfo(s"Identified no.of.Blocks: $noOfBlocks,"
+ s"parallelism: $defaultParallelism , no.of.nodes: unknown, no.of.tasks: $noOfTasks"
)
logInfo("Time taken to identify Blocks to scan : " + (System
.currentTimeMillis() - startTime)
)
resultSplits.foreach { partition =>
val cp = partition.asInstanceOf[CarbonSparkPartition]
logInfo(s"Node : " + cp.multiBlockSplit.getLocations.toSeq.mkString(",")
+ ", No.Of Blocks : " + cp.multiBlockSplit.getLength
)
}
resultSplits.toArray
}
}
| aniketadnaik/carbondataStreamIngest | integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonIUDMergerRDD.scala | Scala | apache-2.0 | 5,227 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.controller.inject.open
import scala.concurrent.duration._
import io.gatling.commons.util.PushbackIterator
private object UserStreamBatchResult {
val Empty = UserStreamBatchResult(0, continue = false)
}
private final case class UserStreamBatchResult(count: Long, continue: Boolean)
private object UserStream {
def apply(steps: Iterable[OpenInjectionStep]): UserStream = {
val users = steps.foldRight(Iterator.empty: Iterator[FiniteDuration]) { (step, iterator) =>
step.chain(iterator)
}
new UserStream(users)
}
}
private class UserStream(users: Iterator[FiniteDuration]) {
private val stream: PushbackIterator[FiniteDuration] = new PushbackIterator(users)
def withStream(batchWindow: FiniteDuration, injectTime: Long, startTime: Long)(f: FiniteDuration => Unit): UserStreamBatchResult =
if (stream.hasNext) {
val batchTimeOffset = (injectTime - startTime).millis
val nextBatchTimeOffset = batchTimeOffset + batchWindow
var continue = true
var streamNonEmpty = true
var count = 0L
while (streamNonEmpty && continue) {
val startingTime = stream.next()
streamNonEmpty = stream.hasNext
val delay = startingTime - batchTimeOffset
continue = startingTime < nextBatchTimeOffset
if (continue) {
count += 1
f(delay)
} else {
streamNonEmpty = true
stream.pushback(startingTime)
}
}
UserStreamBatchResult(count, streamNonEmpty)
} else {
UserStreamBatchResult.Empty
}
}
| gatling/gatling | gatling-core/src/main/scala/io/gatling/core/controller/inject/open/UserStream.scala | Scala | apache-2.0 | 2,202 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network
import java.nio.ByteBuffer
import org.apache.spark.SparkConf
private[spark] object ReceiverTest {
def main(args: Array[String]) {
val manager = new ConnectionManager(9999, new SparkConf)
println("Started connection manager with id = " + manager.id)
manager.onReceiveMessage((msg: Message, id: ConnectionManagerId) => {
/*println("Received [" + msg + "] from [" + id + "] at " + System.currentTimeMillis)*/
val buffer = ByteBuffer.wrap("response".getBytes)
Some(Message.createBufferMessage(buffer, msg.id))
})
Thread.currentThread.join()
}
}
| sryza/spark | core/src/main/scala/org/apache/spark/network/ReceiverTest.scala | Scala | apache-2.0 | 1,423 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.solr
import java.net.URL
import java.nio.charset.StandardCharsets
import scalaj.http.BaseHttp
import scala.util.{ Failure, Success, Try }
case class SolrProviderImpl(solrUrl: URL, userAgent: String) extends SolrProvider {
object Http extends BaseHttp(userAgent = userAgent)
override def update(doc: String): Try[Unit] = Try {
val result = Http(solrUrl.toString)
.header("Content-Type", "application/xml; charset=utf-8")
.param("commit", "true")
.postData(doc.getBytes(StandardCharsets.UTF_8))
.timeout(5000, 60000)
.asString
if (result.isError)
Failure(new RuntimeException(s"${ result.statusLine }, details: ${ result.body }"))
else Success(())
}.flatten
}
| DANS-KNAW/easy-update-solr-index | lib/src/main/scala/nl.knaw.dans.easy.solr/SolrProviderImpl.scala | Scala | apache-2.0 | 1,394 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.dc.stream
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.{HttpEncodings, `Accept-Encoding`}
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.{ActorAttributes, KillSwitch, KillSwitches, Materializer}
import akka.stream.Supervision.Decider
import akka.stream.contrib.{Retry, SourceGen}
import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source}
import akka.util.ByteString
import cmwell.dc.{LazyLogging, Settings}
import cmwell.dc.Settings._
import cmwell.dc.stream.MessagesTypesAndExceptions._
import cmwell.dc.stream.akkautils.DebugStage
import cmwell.util.akka.http.HttpZipDecoder
import com.typesafe.config.ConfigFactory
import scala.collection.parallel.immutable
import scala.util.{Failure, Success, Try}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* Created by eli on 19/07/16.
*/
object TsvRetriever extends LazyLogging {
val gzipAcceptEncoding = `Accept-Encoding`(HttpEncodings.gzip)
val maxTsvLineLength = {
val slashHttpsDotPossiblePrefix = "/https.".length
val maxUrlLength = 2083
val dateLength = 25
val uuidLength = 32
val indexTimeLength = 13
val tabs = 3
slashHttpsDotPossiblePrefix + maxUrlLength + dateLength + uuidLength + indexTimeLength + indexTimeLength + 1
}
def parseTSVAndCreateInfotonDataFromIt(tsv: ByteString) = {
val tabAfterPath = tsv.indexOfSlice(tab)
val tabAfterLast = tsv.indexOfSlice(tab, tabAfterPath + 1)
val tabAfterUuid = tsv.indexOfSlice(tab, tabAfterLast + 1)
val path = tsv.take(tabAfterPath).utf8String
val uuid = tsv.slice(tabAfterLast + 1, tabAfterUuid)
val idxt = tsv.drop(tabAfterUuid).utf8String.trim.toLong
logger.trace(
s"parseTSVAndCreateInfotonDataFromIt: [path='$path',uuid='${uuid.utf8String}',idxt='$idxt']"
)
InfotonData(InfotonMeta(path, uuid, idxt), empty)
}
sealed trait ConsumeType
object BulkConsume extends ConsumeType {
override def toString = "BulkConsume"
}
object Consume extends ConsumeType {
override def toString = "Consume"
}
case class TsvFlowOutput(tsvs: List[InfotonData],
nextPositionKey: String,
isNoContent: Boolean = false)
case class ConsumeState(op: ConsumeType, startTime: Long)
case class TsvFlowState(tsvRetrieveInput: TsvRetrieveInput,
retriesLeft: Int,
lastException: Option[Throwable],
consumeState: ConsumeState)
type TsvRetrieveInput = String
type TsvRetrieveOutput = TsvFlowOutput
type TsvRetrieveState = TsvFlowState
def apply(dcInfo: DcInfo, decider: Decider)(
implicit mat: Materializer,
system: ActorSystem
): Source[List[InfotonData], (KillSwitch, Future[Seq[Option[String]]])] = {
SourceGen.unfoldFlowWith(
Future.successful(dcInfo.positionKey.get),
retrieveTsvsWithRetryAndLastPositionKey(dcInfo, decider)
) {
case Success(TsvFlowOutput(tsvs, nextPositionKey, isNoContent))
if !isNoContent => {
Some(Future.successful(nextPositionKey), tsvs)
}
case Success(TsvFlowOutput(tsvs, nextPositionKey, isNoContent)) => {
logger.info(
s"Data Center ID ${dcInfo.id} from ${dcInfo.location}. Got 204 no content. Will close the stream. It will be opened on the next scheduled check."
)
/* - cancelled due to errors in akka stream. the stream will be closed and opened again for each 204
val f = akka.pattern.after(Settings.delayInSecondsBetweenNoContentRetries.seconds, system.scheduler)(Future.successful(nextPositionKey))
logger.info(s"The type of 204 Future is $f")
f.onComplete(pos => logger.info(s"204 next position key future finished with $f"))
//val f = cmwell.util.concurrent.delayedTask(Settings.delayInSecondsBetweenNoContentRetries.seconds)(nextPositionKey)(system.dispatcher)
Some(f, List())
*/
None
}
case Failure(ex) => {
logger.error(s"Data Center ID ${dcInfo.id}: Retrieve of TSVs from ${dcInfo.location} failed. " +
s"Completing the stream (current got TSVs should be ok unless another exception is caught later). The exception is:",ex)
None
}
}
}
def retrieveTsvsWithRetryAndLastPositionKey(
dcInfo: DcInfo,
decider: Decider
)(implicit mat: Materializer, system: ActorSystem): Flow[Future[String], Try[
TsvRetrieveOutput
], (KillSwitch, Future[Seq[Option[String]]])] = {
//another sink to keep the last position got
//the reason for sliding(2) is that the last element can be None (a stream can finish with an error) and then the element before should be taken
val positionKeySink = Flow
.fromFunction[Try[TsvRetrieveOutput], Option[String]] {
case Success(TsvFlowOutput(tsvs, nextPositionKey, isNoContent)) =>
Some(nextPositionKey)
case _ => None
}
.sliding(2)
.toMat(Sink.last)(Keep.right)
retrieveTsvsWithRetry(dcInfo, decider).alsoToMat(positionKeySink)(Keep.both)
}
def retrieveTsvsWithRetry(dcInfo: DcInfo, decider: Decider)(
implicit mat: Materializer,
system: ActorSystem
): Flow[Future[String], Try[TsvRetrieveOutput], KillSwitch] =
Flow[Future[String]]
.mapAsync(1)(identity)
.viaMat(KillSwitches.single)(Keep.right)
.map(
positionKey =>
Future.successful(positionKey) -> TsvFlowState(
positionKey,
Settings.initialTsvRetryCount,
None,
ConsumeState(BulkConsume, System.currentTimeMillis)
)
)
.via(
Retry(retrieveTsvFlow(dcInfo, decider))(
retryDecider(dcInfo.id, dcInfo.location)
)
)
.map(_._1)
private def stayInThisState(stateStartTime: Long): Boolean =
System.currentTimeMillis - stateStartTime < Settings.consumeFallbackDuration
private def extractPrefixes(state: ConsumeState) = state match {
case ConsumeState(BulkConsume, _) => "bulk-"
case ConsumeState(Consume, _) => ""
}
private def getNewState(elementState: TsvRetrieveState,
lastUsedState: ConsumeState) =
// If there were an error before - take the state as it came from the retry decider.
// else change from lower consume type to a better one only after the time interval.
if (elementState.lastException.isDefined) elementState.consumeState
else
lastUsedState match {
case state @ ConsumeState(BulkConsume, _) => state
case state @ ConsumeState(Consume, start) =>
if (stayInThisState(start)) state
else ConsumeState(BulkConsume, System.currentTimeMillis)
}
def retrieveTsvFlow(dcInfo: DcInfo, decider: Decider)(
implicit mat: Materializer,
system: ActorSystem
): Flow[(Future[TsvRetrieveInput], TsvRetrieveState),
(Try[TsvRetrieveOutput], TsvRetrieveState),
NotUsed] = {
val startTime = System.currentTimeMillis
val hostPort = dcInfo.location.split(":")
val (host, port) = hostPort.head -> hostPort.tail.headOption
.getOrElse("80")
.toInt
val tsvPoolConfig = ConfigFactory
.parseString("akka.http.host-connection-pool.max-connections=1")
.withFallback(config)
val tsvConnPool = Http()
.newHostConnectionPool[TsvRetrieveState](
host,
port,
ConnectionPoolSettings(tsvPoolConfig)
)
Flow[(Future[TsvRetrieveInput], TsvRetrieveState)]
.mapAsync(1) { case (input, state) => input.map(_ -> state) }
.statefulMapConcat { () =>
var currentState = ConsumeState(BulkConsume, System.currentTimeMillis);
{
case (positionKey, state) =>
currentState = getNewState(state, currentState)
val bulkPrefix = extractPrefixes(currentState)
val request = HttpRequest(
uri =
s"http://${dcInfo.location}/?op=${bulkPrefix}consume&format=tsv&position=$positionKey",
headers = scala.collection.immutable.Seq(gzipAcceptEncoding)
)
logger.info(
s"Data Center ID ${dcInfo.id}: Sending ${currentState.op} request to ${dcInfo.location} using position key $positionKey."
)
scala.collection.immutable.Seq(
request -> state.copy(consumeState = currentState)
)
}
}
.via(tsvConnPool)
.map {
case (tryResponse, state) =>
tryResponse.map(HttpZipDecoder.decodeResponse) -> state
}
.flatMapConcat {
case (Success(res @ HttpResponse(s, h, entity, _)), state)
if s.isSuccess() && h.exists(_.name == "X-CM-WELL-POSITION") => {
val nextPositionKey = res.getHeader("X-CM-WELL-POSITION").get.value()
entity.dataBytes
.via(
Framing
.delimiter(endln, maximumFrameLength = maxTsvLineLength * 2)
)
.fold(List[InfotonData]())(
(total, bs) => parseTSVAndCreateInfotonDataFromIt(bs) :: total
)
.map { data =>
val sortedData = data.sortBy(_.meta.indexTime)
if (state.retriesLeft < Settings.initialTsvRetryCount) {
val consumeCount = Settings.initialTsvRetryCount - state.retriesLeft + 1
yellowlog.info(
s"TSV (bulk)consume succeeded only after $consumeCount (bulk)consumes. token: ${state.tsvRetrieveInput}."
)
}
Success(
TsvFlowOutput(sortedData, nextPositionKey, s.intValue == 204)
) -> state.copy(lastException = None)
}
.withAttributes(ActorAttributes.supervisionStrategy(decider))
.recover {
case e =>
val ex = RetrieveTsvException(
s"Retrieve TSVs using ${state.consumeState.op} failed. Data center ID ${dcInfo.id}, using remote location ${dcInfo.location}",
e
)
logger.warn("Retrieve TSVs failed.", ex)
Failure[TsvRetrieveOutput](ex) -> state.copy(
lastException = Some(ex)
)
}
}
case (res @ Success(HttpResponse(s, h, entity, _)), state) => {
val errorID = res.##
val e = new Exception(
s"Error ![$errorID]. Cm-Well returned bad response: status: ${s.intValue} headers: ${Util
.headersString(h)} reason: ${s.reason}"
)
val bodyFut =
entity.dataBytes.runFold(empty)(_ ++ _).map(_.utf8String)
val ex = RetrieveTsvBadResponseException(
s"Retrieve TSVs using ${state.consumeState.op} failed. Data center ID ${dcInfo.id}, using remote location ${dcInfo.location}.",
bodyFut,
e
)
logger.warn(s"${ex.getMessage} ${ex.getCause.getMessage}")
Util.warnPrintFuturedBodyException(ex)
Source.single(
Failure[TsvRetrieveOutput](ex) -> state
.copy(lastException = Some(ex))
)
}
case (Failure(e), state) => {
val ex = RetrieveTsvException(
s"Retrieve TSVs using ${state.consumeState.op} failed. Data center ID ${dcInfo.id}, using remote location ${dcInfo.location}",
e
)
logger.warn("Retrieve TSVs failed.", ex)
Source.single(
Failure[TsvRetrieveOutput](ex) -> state
.copy(lastException = Some(ex))
)
}
}
.statefulMapConcat { () =>
var infotonsGot: Long = 0;
{
case output @ (
Success(TsvFlowOutput(tsvs, nextPositionKey, _)),
state
) =>
infotonsGot += tsvs.size
val rate = infotonsGot / ((System.currentTimeMillis - startTime) / 1000D)
val d = dcInfo.id
val s = tsvs.size
val o = state.consumeState.op
val r = rate.formatted("%.2f")
logger.info(s"Data Center ID $d: Got TSVs stream source. The next position key to consume is $nextPositionKey. " +
s"Got $s TSVs using $o. Total TSVs got $infotonsGot. Read rate: $r TSVs/second")
scala.collection.immutable.Seq(output)
case output => scala.collection.immutable.Seq(output)
}
}
}
private def retryDecider(
dataCenterId: String,
location: String
)(implicit mat: Materializer, system: ActorSystem) =
(state: TsvRetrieveState) =>
state match {
case TsvFlowState(_, 0, _, _) =>
// scalastyle:off
logger.error(s"Data Center ID $dataCenterId: Retrieve of TSVs from $location failed. No more reties will be done. The sync will be closed now (no more new TSV will be got) and restarted again automatically.")
// scalastyle:on
None
case TsvFlowState(positionKey, retriesLeft, ex, consumeState) =>
val waitSeconds = ex match {
case Some(_: RetrieveTsvBadResponseException) => 1
// due to what seems to be a bug in akka http if there were an error during the retrieve of the body
// (e.g. connection reset by peer) and another request is sent
// the akka-http is stuck. To overcome this issue a wait of 40 seconds is added to allow the connection pool
// to be properly closed before sending another request
case Some(_) => 40
case None =>
??? // Shouldn't get here. The retry decider is called only when there is an exception and the ex should be in the state
}
val newConsumeOp = consumeState.op match {
case BulkConsume
if Settings.initialTsvRetryCount - retriesLeft < Settings.bulkTsvRetryCount =>
BulkConsume
case _ => Consume
}
logger.warn(
s"Data Center ID $dataCenterId: Retrieve of TSVs from $location failed. Retries left $retriesLeft. Will try again in $waitSeconds seconds."
)
Some(
akka.pattern.after(waitSeconds.seconds, system.scheduler)(
Future.successful(positionKey)
) -> TsvFlowState(
positionKey,
retriesLeft - 1,
ex,
ConsumeState(newConsumeOp, System.currentTimeMillis)
)
)
}
}
| bryaakov/CM-Well | server/cmwell-dc/src/main/scala/cmwell/dc/stream/TsvRetriever.scala | Scala | apache-2.0 | 15,385 |
package model
import skinny.DBSettings
import skinny.test._
import org.scalatest.fixture.FlatSpec
import org.scalatest._
import scalikejdbc._
import scalikejdbc.scalatest._
import org.joda.time._
class GoogleUsersSpec extends FlatSpec with Matchers with DBSettings with AutoRollback {
}
| yoshitakes/skinny-task-example | src/test/scala/model/GoogleUsersSpec.scala | Scala | mit | 289 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap.io
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.orc.mapred.OrcStruct
import org.apache.parquet.filter2.predicate.FilterPredicate
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Ascending, JoinedRow}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution.datasources.PartitionedFile
import org.apache.spark.sql.execution.datasources.oap._
import org.apache.spark.sql.execution.datasources.oap.index._
import org.apache.spark.sql.execution.datasources.oap.utils.FilterHelper
import org.apache.spark.sql.execution.datasources.orc.OrcDeserializer
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types._
private[oap] class OapDataReaderV1(
pathStr: String,
meta: DataSourceMeta,
partitionSchema: StructType,
requiredSchema: StructType,
filterScanners: Option[IndexScanners],
requiredIds: Array[Int],
pushed: Option[FilterPredicate],
metrics: OapMetricsManager,
conf: Configuration,
enableVectorizedReader: Boolean = false,
options: Map[String, String] = Map.empty,
filters: Seq[Filter] = Seq.empty,
context: Option[DataFileContext] = None,
file: PartitionedFile = null) extends OapDataReader with Logging {
import org.apache.spark.sql.execution.datasources.oap.INDEX_STAT._
private var _rowsReadWhenHitIndex: Option[Long] = None
private var _indexStat = MISS_INDEX
override def rowsReadByIndex: Option[Long] = _rowsReadWhenHitIndex
override def indexStat: INDEX_STAT = _indexStat
def totalRows(): Long = _totalRows
private var _totalRows: Long = 0
private val path = new Path(pathStr)
private val dataFileClassName = meta.dataReaderClassName
def initialize(): OapCompletionIterator[Any] = {
logDebug("Initializing OapDataReader...")
// TODO how to save the additional FS operation to get the Split size
val fileScanner = DataFile(pathStr, meta.schema, dataFileClassName, conf)
if (meta.dataReaderClassName.equals(OapFileFormat.PARQUET_DATA_FILE_CLASSNAME)) {
fileScanner.asInstanceOf[ParquetDataFile].setParquetVectorizedContext(
context.asInstanceOf[Option[ParquetVectorizedContext]])
fileScanner.asInstanceOf[ParquetDataFile].setPartitionedFile(file)
} else if (meta.dataReaderClassName.equals(OapFileFormat.ORC_DATA_FILE_CLASSNAME)) {
// For orc, the context will be used by both vectorization and non vectorization.
fileScanner.asInstanceOf[OrcDataFile].setOrcDataFileContext(
context.get.asInstanceOf[OrcDataFileContext])
}
def fullScan: OapCompletionIterator[Any] = {
val start = if (log.isDebugEnabled) System.currentTimeMillis else 0
val iter = fileScanner.iterator(requiredIds, filters)
val end = if (log.isDebugEnabled) System.currentTimeMillis else 0
_totalRows = fileScanner.totalRows()
logDebug("Construct File Iterator: " + (end - start) + " ms")
iter
}
filterScanners match {
case Some(indexScanners) if indexScanners.isIndexFileBeneficial(path, conf) =>
def getRowIds(options: Map[String, String]): Array[Int] = {
indexScanners.initialize(path, conf)
_totalRows = indexScanners.totalRows()
// total Row count can be get from the index scanner
val limit = options.getOrElse(OapFileFormat.OAP_QUERY_LIMIT_OPTION_KEY, "0").toInt
val rowIds = if (limit > 0) {
// Order limit scan options
val isAscending = options.getOrElse(
OapFileFormat.OAP_QUERY_ORDER_OPTION_KEY, "true").toBoolean
val sameOrder = !((indexScanners.order == Ascending) ^ isAscending)
if (sameOrder) {
indexScanners.take(limit).toArray
} else {
indexScanners.toArray.reverse.take(limit)
}
} else {
indexScanners.toArray
}
// Parquet reader does not support backward scan, so rowIds must be sorted.
// Actually Orc readers support the backward scan, thus no need to sort row Ids.
// But with the sorted row Ids, the adjacment rows will be scanned in the same batch.
// This will reduce IO cost.
if (meta.dataReaderClassName.equals(OapFileFormat.PARQUET_DATA_FILE_CLASSNAME) ||
meta.dataReaderClassName.equals(OapFileFormat.ORC_DATA_FILE_CLASSNAME)) {
rowIds.sorted
} else {
rowIds
}
}
val start = if (log.isDebugEnabled) System.currentTimeMillis else 0
val rows = getRowIds(options)
val iter = fileScanner.iteratorWithRowIds(requiredIds, rows, filters)
val end = if (log.isDebugEnabled) System.currentTimeMillis else 0
_indexStat = HIT_INDEX
_rowsReadWhenHitIndex = Some(rows.length)
logDebug("Construct File Iterator: " + (end - start) + "ms")
iter
case Some(_) =>
_indexStat = IGNORE_INDEX
fullScan
case _ =>
fullScan
}
}
override def read(file: PartitionedFile): Iterator[InternalRow] = {
FilterHelper.setFilterIfExist(conf, pushed)
val iter = initialize()
Option(TaskContext.get()).foreach(_.addTaskCompletionListener[Unit](_ => iter.close()))
val tot = totalRows()
metrics.updateTotalRows(tot)
metrics.updateIndexAndRowRead(this, tot)
// if enableVectorizedReader == true , return iter directly because of partitionValues
// already filled by VectorizedReader, else use original branch as Parquet or Orc.
if (enableVectorizedReader) {
iter.asInstanceOf[Iterator[InternalRow]]
} else {
// Parquet and Oap are the same if the vectorization is off.
val fullSchema = requiredSchema.toAttributes ++ partitionSchema.toAttributes
val joinedRow = new JoinedRow()
val appendPartitionColumns =
GenerateUnsafeProjection.generate(fullSchema, fullSchema)
meta.dataReaderClassName match {
case dataReader if dataReader.equals(OapFileFormat.PARQUET_DATA_FILE_CLASSNAME) =>
iter.asInstanceOf[Iterator[InternalRow]].map(d => {
appendPartitionColumns(joinedRow(d, file.partitionValues))
})
case dataReader if dataReader.equals(OapFileFormat.ORC_DATA_FILE_CLASSNAME) =>
val orcDataFileContext = context.get.asInstanceOf[OrcDataFileContext]
val deserializer = new OrcDeserializer(orcDataFileContext.dataSchema, requiredSchema,
orcDataFileContext.requestedColIds)
iter.asInstanceOf[Iterator[OrcStruct]].map(value => {
appendPartitionColumns(joinedRow(deserializer.deserialize(value),
file.partitionValues))})
}
}
}
}
| Intel-bigdata/OAP | oap-cache/oap/src/main/scala/org/apache/spark/sql/execution/datasources/oap/io/OapDataReaderWriter.scala | Scala | apache-2.0 | 7,752 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.tensor
object DenseTensorApply {
/**
* Iterate through tensor1, and apply func to the elements,
* set function result to tensor 2
*
* @param tensor1 the tensor1
* @param tensor2 the result tensor
* @param func (tensor1Data, tensor1Offset, tensor2Data,
* tensor2Offset)
*/
def apply1[A, B](tensor1: Tensor[A], tensor2: Tensor[B],
func: TensorDiffTypeFunc4[A, B]): Unit = {
if (tensor1.isEmpty) {
return
}
// shortcut for scalar
if (tensor1.isScalar && tensor2.isScalar) {
val data1 = tensor1.storage().array()
val index1 = tensor1.storageOffset() - 1
val data2 = tensor2.storage().array()
val index2 = tensor2.storageOffset() - 1
func(data1, index1, data2, index2)
return
}
val stride1 = getStride(tensor1)
val stride2 = getStride(tensor2)
val (largestDim1, largestSize1) = getLargestContiguousSize(tensor1)
val (largestDim2, largestSize2) = getLargestContiguousSize(tensor2)
val counter1 = getCounter(largestDim1)
val counter2 = getCounter(largestDim2)
val data1 = tensor1.storage().array()
val data2 = tensor2.storage().array()
var offset1 = tensor1.storageOffset() - 1
var offset2 = tensor2.storageOffset() - 1
var hasFinished1 = false
var hasFinished2 = false
var i1 = 0
var i2 = 0
while (!hasFinished1 && !hasFinished2) {
while (i1 < largestSize1 && i2 < largestSize2) {
val index1 = offset1 + i1 * stride1
val index2 = offset2 + i2 * stride2
func(data1, index1, data2, index2)
i1 += 1
i2 += 1
}
val r1 = updateCounter(tensor1, counter1, offset1, largestDim1)
val r2 = updateCounter(tensor2, counter2, offset2, largestDim2)
hasFinished1 = r1._1
hasFinished2 = r2._1
offset1 = r1._2
offset2 = r2._2
i1 = 0
i2 = 0
}
}
/**
* Iterate through tensor1, tensor2, and apply func to the elements
*
* @param tensor
* @param func (tensor1Data, tensor1Offset)
*/
def apply1[@specialized(Float, Double) T](
tensor: Tensor[T], func: TensorFunc2[T]): Unit = {
if (tensor.isEmpty) {
return
}
// shortcut for scalar
if (tensor.isScalar) {
val data = tensor.storage().array()
val index = tensor.storageOffset() - 1
func(data, index)
return
}
val stride = getStride(tensor)
val (largestDim, largestSize) = getLargestContiguousSize(tensor)
val counter = getCounter(largestDim)
val data = tensor.storage().array()
var offset = tensor.storageOffset() - 1
var hasFinished = false
var i = 0
while (!hasFinished) {
while (i < largestSize) {
val index = offset + i * stride
func(data, index)
i += 1
}
val r = updateCounter(tensor, counter, offset, largestDim)
hasFinished = r._1
offset = r._2
i = 0
}
}
/**
* Iterate through tensor1, tensor2, and apply func to the elements
*
* @param tensor1 the tensor
* @param tensor2 the tensor
* @param func (tensor1Data, tensor1Offset, tensor2Data, tensor2Offset)
*/
def apply2[T](tensor1: Tensor[T], tensor2: Tensor[T],
func: TensorFunc4[T]): Unit = {
require(tensor1.nElement() == tensor2.nElement(),
s"inconsistent tensor size: ${tensor1.nElement()} == ${tensor2.nElement()}")
if (tensor1.isEmpty) {
return
}
// shortcut for scalar
if (tensor1.isScalar && tensor2.isScalar) {
val tensor1Data = tensor1.storage().array()
val tensor2Data = tensor2.storage().array()
val tensor1Index = tensor1.storageOffset() - 1
val tensor2Index = tensor2.storageOffset() - 1
func(tensor1Data, tensor1Index, tensor2Data, tensor2Index)
return
}
val tensor1Data = tensor1.storage().array()
var tensor1Offset = tensor1.storageOffset() - 1
val tensor2Data = tensor2.storage().array()
var tensor2Offset = tensor2.storageOffset() - 1
var adjacent = false
if (tensor1.nDimension == 1 && tensor2.nDimension == 1 && tensor1.stride(1) == 1 &&
tensor2.stride(1) == 1) {
adjacent = true
}
if (tensor1.nDimension == 2 && tensor2.nDimension == 2) {
if (tensor1.stride(2) == 1 && tensor2.stride(2) == 1 && tensor1.stride(1) == tensor1.size(2)
&& tensor2.stride(1) == tensor2.size(2)) {
adjacent = true
}
if (tensor1.stride(1) == 1 && tensor2.stride(1) == 1 && tensor1.stride(2) == tensor1.size(1)
&& tensor2.stride(2) == tensor2.size(1)) {
adjacent = true
}
}
if (adjacent) {
var i = 0
while (i < tensor1.nElement()) {
func(tensor1Data, tensor1Offset + i, tensor2Data, tensor2Offset + i)
i += 1
}
return
}
val tensor1Stride = getStride(tensor1)
val (largestDim1, largestSize1) = getLargestContiguousSize(tensor1)
val counter1 = getCounter(largestDim1)
val tensor2Stride = getStride(tensor2)
val (largestDim2, largestSize2) = getLargestContiguousSize(tensor2)
val counter2 = getCounter(largestDim2)
var hasFinished = false
var i1 = 0
var i2 = 0
while (!hasFinished) {
while (i1 < largestSize1 && i2 < largestSize2) {
func(tensor1Data, tensor1Offset + i1 * tensor1Stride, tensor2Data,
tensor2Offset + i2 * tensor2Stride)
i1 = i1 + 1
i2 = i2 + 1
}
if (i1 == largestSize1) {
val r = updateCounter(tensor1, counter1, tensor1Offset, largestDim1)
hasFinished = r._1
tensor1Offset = r._2
i1 = 0
}
if (i2 == largestSize2) {
val r = updateCounter(tensor2, counter2, tensor2Offset, largestDim2)
hasFinished = r._1
tensor2Offset = r._2
i2 = 0
}
}
}
/**
* Iterate through tensor1, tensor2, tensor3, and apply func to the elements
*
* @param tensor1 the tensor
* @param tensor2 the tensor
* @param tensor3 the tensor
* @param func (tensor1Data, tensor1Offset, tensor2Data, tensor2Offset, tensor3Data,
* tensor3Offset)
*/
private[bigdl] def apply3[@specialized(Float, Double) T](tensor1: Tensor[T],
tensor2: Tensor[T], tensor3: Tensor[T],
func: TensorFunc6[T]): Unit = {
require(tensor1.nElement() == tensor2.nElement() && tensor2.nElement() == tensor3.nElement(),
"inconsistent tensor size")
if (tensor1.isEmpty) {
return
}
// shortcut for scalar
if (tensor1.isScalar && tensor2.isScalar && tensor3.isScalar) {
val tensor1Data = tensor1.storage().array()
val tensor2Data = tensor2.storage().array()
val tensor3Data = tensor3.storage().array()
val tensor1Index = tensor1.storageOffset() - 1
val tensor2Index = tensor2.storageOffset() - 1
val tensor3Index = tensor3.storageOffset() - 1
func(tensor1Data, tensor1Index, tensor2Data, tensor2Index, tensor3Data, tensor3Index)
return
}
val tensor1Data = tensor1.storage().array()
var tensor1Offset = tensor1.storageOffset() - 1
val tensor1Stride = getStride(tensor1)
val (tensor1Dim, tensor1Size) = getLargestContiguousSize(tensor1)
val tensor1Counter = getCounter(tensor1Dim)
val tensor2Data = tensor2.storage().array()
var tensor2Offset = tensor2.storageOffset() - 1
val tensor2Stride = getStride(tensor2)
val (tensor2Dim, tensor2Size) = getLargestContiguousSize(tensor2)
val tensor2Counter = getCounter(tensor2Dim)
val tensor3Data = tensor3.storage().array()
var tensor3Offset = tensor3.storageOffset() - 1
val tensor3Stride = getStride(tensor3)
val (tensor3Dim, tensor3Size) = getLargestContiguousSize(tensor3)
val tensor3Counter = getCounter(tensor3Dim)
var hasFinished = false
var i1 = 0
var i2 = 0
var i3 = 0
while (!hasFinished) {
while (i1 < tensor1Size && i2 < tensor2Size && i3 < tensor3Size) {
func(tensor1Data, tensor1Offset + i1 * tensor1Stride, tensor2Data,
tensor2Offset + i2 * tensor2Stride,
tensor3Data, tensor3Offset + i3 * tensor3Stride)
i1 += 1
i2 += 1
i3 += 1
}
if (i1 == tensor1Size) {
val r = updateCounter(tensor1, tensor1Counter, tensor1Offset, tensor1Dim)
hasFinished = r._1
tensor1Offset = r._2
i1 = 0
}
if (i2 == tensor2Size) {
val r = updateCounter(tensor2, tensor2Counter, tensor2Offset, tensor2Dim)
hasFinished = r._1
tensor2Offset = r._2
i2 = 0
}
if (i3 == tensor3Size) {
val r = updateCounter(tensor3, tensor3Counter, tensor3Offset, tensor3Dim)
hasFinished = r._1
tensor3Offset = r._2
i3 = 0
}
}
}
/**
* Get the stride discard dimensions with size 1
*
* @param tensor tensor
* @return
*/
def getStride[T](tensor: Tensor[T]): Int = {
var d = tensor.nDimension()
while (d > 0) {
if (tensor.size(d) != 1) {
return tensor.stride(d)
}
d -= 1
}
0
}
def getLargestContiguousSize[T](tensor: Tensor[T]): (Int, Int) = {
var largestSize = 1
var largestDim = tensor.nDimension()
while (largestDim > 0) {
if (tensor.size(largestDim) != 1) {
if (tensor.stride(largestDim) == largestSize) {
largestSize = largestSize * tensor.size(largestDim)
} else {
return (largestDim, largestSize)
}
}
largestDim -= 1
}
(largestDim, largestSize)
}
def getCounter(largestDim: Int): Array[Int] = {
val counter = new Array[Int](largestDim)
var d = 0
while (d < largestDim) {
counter(d) = 0
d += 1
}
counter
}
def updateCounter[T](tensor: Tensor[T], counter: Array[Int], offset: Int,
dim: Int): (Boolean, Int) = {
if (dim == 0) {
return (true, offset)
}
var _offset = offset
var i = dim
while (i > 0) {
counter(i - 1) += 1
_offset += tensor.stride(i)
if (counter(i - 1) == tensor.size(i)) {
if (i == 1) {
return (true, _offset)
} else {
_offset -= counter(i - 1) * tensor.stride(i)
counter(i - 1) = 0
}
} else {
return (false, _offset)
}
i -= 1
}
(false, _offset)
}
/**
* Iterate through tensor1, tensor2, and apply func to the elements,
* set function result to tensor 3
*
* @param tensor1 the tensor1
* @param tensor2 the tensor2
* @param tensor3 the result tensor
* @param func (tensor1Data, tensor1Offset, tensor2Data,
* tensor2Offset, tensor3Data, tensor3Offset)
*/
def apply2[A, B, C](tensor1: Tensor[A], tensor2: Tensor[B], tensor3: Tensor[C],
func: TensorDiffTypeFunc6[A, B, C])
: Unit = {
require(tensor1.nElement() == tensor2.nElement(),
s"inconsistent tensor size: ${tensor1.nElement()} == ${tensor2.nElement()}")
if (tensor1.isEmpty) {
return
}
// shortcut for scalar
if (tensor1.isScalar && tensor2.isScalar) {
val tensor1Data = tensor1.storage().array()
val tensor2Data = tensor2.storage().array()
val tensor3Data = tensor3.storage().array()
val tensor1Index = tensor1.storageOffset() - 1
val tensor2Index = tensor2.storageOffset() - 1
val tensor3Index = tensor3.storageOffset() - 1
func(tensor1Data, tensor1Index, tensor2Data, tensor2Index, tensor3Data, tensor3Index)
return
}
val tensor1Data = tensor1.storage().array()
var tensor1Offset = tensor1.storageOffset() - 1
val tensor2Data = tensor2.storage().array()
var tensor2Offset = tensor2.storageOffset() - 1
val tensor3Data = tensor3.storage().array()
val tensor3Offset = tensor3.storageOffset() - 1
var adjacent = false
if (tensor1.nDimension == 1 && tensor2.nDimension == 1 && tensor1.stride(1) == 1 &&
tensor2.stride(1) == 1) {
adjacent = true
}
if (tensor1.nDimension == 2 && tensor2.nDimension == 2) {
if (tensor1.stride(2) == 1 && tensor2.stride(2) == 1 && tensor1.stride(1) == tensor1.size(2)
&& tensor2.stride(1) == tensor2.size(2)) {
adjacent = true
}
if (tensor1.stride(1) == 1 && tensor2.stride(1) == 1 && tensor1.stride(2) == tensor1.size(1)
&& tensor2.stride(2) == tensor2.size(1)) {
adjacent = true
}
}
if (adjacent) {
var i = 0
while (i < tensor1.nElement()) {
func(
tensor1Data, tensor1Offset + i,
tensor2Data, tensor2Offset + i,
tensor3Data, tensor3Offset + i)
i += 1
}
return
}
val tensor1Stride = getStride(tensor1)
val (largestDim1, largestSize1) = getLargestContiguousSize(tensor1)
val counter1 = getCounter(largestDim1)
val tensor2Stride = getStride(tensor2)
val (largestDim2, largestSize2) = getLargestContiguousSize(tensor2)
val counter2 = getCounter(largestDim2)
val tensor3Stride = getStride(tensor3)
val (largestDim3, largestSize3) = getLargestContiguousSize(tensor3)
val counter3 = getCounter(largestDim3)
var hasFinished = false
var i1 = 0
var i2 = 0
var i3 = 0
while (!hasFinished) {
while (i1 < largestSize1 && i2 < largestSize2) {
func(
tensor1Data, tensor1Offset + i1 * tensor1Stride,
tensor2Data, tensor2Offset + i2 * tensor2Stride,
tensor3Data, tensor3Offset + i3 * tensor3Stride
)
i1 = i1 + 1
i2 = i2 + 1
i3 = i3 + 1
}
if (i1 == largestSize1) {
val r = updateCounter(tensor1, counter1, tensor1Offset, largestDim1)
hasFinished = r._1
tensor1Offset = r._2
i1 = 0
}
if (i2 == largestSize2) {
val r = updateCounter(tensor2, counter2, tensor2Offset, largestDim2)
hasFinished = r._1
tensor2Offset = r._2
i2 = 0
}
if (i3 == largestSize3) {
val r = updateCounter(tensor3, counter3, tensor3Offset, largestDim3)
hasFinished = r._1
tensor2Offset = r._2
i3 = 0
}
}
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/tensor/DenseTensorApply.scala | Scala | apache-2.0 | 14,797 |
package spire
package std
import spire.algebra.{Eq, EuclideanRing, IsIntegral, NRoot, Order, Signed, TruncatedDivisionCRing}
import spire.math.BitString
import spire.util.Opt
import java.lang.Math
trait LongIsEuclideanRing extends EuclideanRing[Long] {
override def minus(a:Long, b:Long): Long = a - b
def negate(a:Long): Long = -a
def one: Long = 1L
def plus(a:Long, b:Long): Long = a + b
override def pow(a: Long, b:Int): Long = spire.math.pow(a, b)
override def times(a:Long, b:Long): Long = a * b
def zero: Long = 0L
override def fromInt(n: Int): Long = n
def euclideanFunction(a:Long): BigInt = BigInt(a).abs
override def equotmod(a: Long, b: Long): (Long, Long) = spire.math.equotmod(a, b)
def equot(a: Long, b: Long): Long = spire.math.equot(a, b)
def emod(a: Long, b: Long): Long = spire.math.emod(a, b)
def gcd(a:Long, b:Long)(implicit ev: Eq[Long]): Long = spire.math.gcd(a, b)
def lcm(a:Long, b:Long)(implicit ev: Eq[Long]): Long = spire.math.lcm(a, b)
}
// Not included in Instances trait!
trait LongIsNRoot extends NRoot[Long] {
def nroot(x: Long, n: Int): Long = {
def findnroot(prev: Long, add: Long): Long = {
val next = prev | add
val e = spire.math.pow(next, n)
if (e == x || add == 0) {
next
} else if (e <= 0 || e > x) {
findnroot(prev, add >> 1)
} else {
findnroot(next, add >> 1)
}
}
if (n < 1) throw new IllegalArgumentException(s"nroot($n)")
else if (n == 1) x
else findnroot(0, 1L << ((65 - n) / n))
}
def log(a:Long): Long = Math.log(a.toDouble).toLong
def fpow(a:Long, b:Long): Long = spire.math.pow(a, b) // xyz
}
trait LongOrder extends Order[Long] {
override def eqv(x:Long, y:Long): Boolean = x == y
override def neqv(x:Long, y:Long): Boolean = x != y
override def gt(x: Long, y: Long): Boolean = x > y
override def gteqv(x: Long, y: Long): Boolean = x >= y
override def lt(x: Long, y: Long): Boolean = x < y
override def lteqv(x: Long, y: Long): Boolean = x <= y
def compare(x: Long, y: Long): Int = if (x < y) -1 else if (x == y) 0 else 1
}
trait LongSigned extends Signed[Long] with LongOrder {
override def signum(a: Long): Int = java.lang.Long.signum(a)
override def abs(a: Long): Long = if (a < 0L) -a else a
}
trait LongTruncatedDivision extends TruncatedDivisionCRing[Long] with LongSigned {
def toBigIntOpt(x: Long): Opt[BigInt] = Opt(BigInt(x))
def tquot(x: Long, y: Long): Long = x / y
def tmod(x: Long, y: Long): Long = x % y
}
trait LongIsReal extends IsIntegral[Long] with LongTruncatedDivision {
def toDouble(n: Long): Double = n.toDouble
def toBigInt(n: Long): BigInt = BigInt(n)
}
@SerialVersionUID(0L)
class LongIsBitString extends BitString[Long] with Serializable {
def one: Long = -1L
def zero: Long = 0L
def and(a: Long, b: Long): Long = a & b
def or(a: Long, b: Long): Long = a | b
def complement(a: Long): Long = ~a
override def xor(a: Long, b: Long): Long = a ^ b
def signed: Boolean = true
def width: Int = 64
def toHexString(n: Long): String = java.lang.Long.toHexString(n)
def bitCount(n: Long): Int = java.lang.Long.bitCount(n)
def highestOneBit(n: Long): Long = java.lang.Long.highestOneBit(n)
def lowestOneBit(n: Long): Long = java.lang.Long.lowestOneBit(n)
def numberOfLeadingZeros(n: Long): Int = java.lang.Long.numberOfLeadingZeros(n)
def numberOfTrailingZeros(n: Long): Int = java.lang.Long.numberOfTrailingZeros(n)
def leftShift(n: Long, i: Int): Long = n << i
def rightShift(n: Long, i: Int): Long = n >> i
def signedRightShift(n: Long, i: Int): Long = n >>> i
def rotateLeft(n: Long, i: Int): Long = java.lang.Long.rotateLeft(n, i)
def rotateRight(n: Long, i: Int): Long = java.lang.Long.rotateRight(n, i)
}
@SerialVersionUID(0L)
class LongAlgebra extends LongIsEuclideanRing with LongIsNRoot with LongIsReal with Serializable
trait LongInstances {
implicit final val LongBitString = new LongIsBitString
implicit final val LongAlgebra = new LongAlgebra
import spire.math.NumberTag._
implicit final val LongTag = new BuiltinIntTag[Long](0L, Long.MinValue, Long.MaxValue)
}
| adampingel/spire | core/src/main/scala/spire/std/long.scala | Scala | mit | 4,149 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.rx.html
import scala.language.experimental.macros
import scala.reflect.macros.{blackbox => sm}
/**
*/
private[html] object RxHtmlMacros {
def code(c: sm.Context)(rxElements: c.Tree*): c.Tree = {
import c.universe._
val codes = for (rxElement <- rxElements) yield {
val pos = rxElement.pos
val src = pos.source
val lineBlocks = src.content
.slice(pos.start, pos.`end`)
.mkString
.replaceAll("^\\\\{\\\\n", "")
.replaceAll("\\\\}$", "")
val lines = lineBlocks.split("\\n")
val columnOffsetInLine = lines.headOption
.map { firstLine =>
firstLine.size - firstLine.stripLeading().size
}
.getOrElse(0)
val trimmedSource = lines
.map { line =>
line.replaceFirst(s"^\\\\s{0,${columnOffsetInLine}}", "")
}
.mkString("\\n")
(rxElement, trimmedSource)
}
val elems = codes.map(_._1)
val source = codes.map(_._2).mkString("\\n")
q"wvlet.airframe.rx.html.RxCode(Seq(..${elems}), ${source})"
}
}
| wvlet/airframe | airframe-rx-html/src/main/scala/wvlet/airframe/rx/html/RxHtmlMacros.scala | Scala | apache-2.0 | 1,633 |
package org.orbeon.oxf.xml
import javax.xml.transform.{Result, Source}
import org.orbeon.saxon.event._
import org.orbeon.saxon.trans.XPathException
import org.orbeon.saxon.{Configuration, Controller}
import org.xml.sax.SAXParseException
// Custom version of Saxon's IdentityTransformer which hooks up a `ComplexContentOutputter`
class IdentityTransformerWithFixup(config: Configuration) extends Controller(config) {
override def transform(source: Source, result: Result): Unit =
try {
val pipelineConfig = makePipelineConfiguration
val receiver =
getConfiguration.getSerializerFactory.getReceiver(result, pipelineConfig, getOutputProperties)
// To remove duplicate namespace declarations
val reducer = new NamespaceReducer
reducer.setUnderlyingReceiver(receiver)
reducer.setPipelineConfiguration(pipelineConfig)
// To fixup namespaces
val cco = new ComplexContentOutputter
cco.setHostLanguage(pipelineConfig.getHostLanguage)
cco.setPipelineConfiguration(pipelineConfig)
cco.setReceiver(reducer)
new Sender(pipelineConfig).send(source, cco, true)
} catch {
case xpe: XPathException ⇒
xpe.getException match {
case spe: SAXParseException if ! spe.getException.isInstanceOf[RuntimeException] ⇒ // NOP
case _ ⇒ reportFatalError(xpe)
}
throw xpe
}
}
//
// The contents of this file are subject to the Mozilla Public License Version 1.0 (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy of the
// License at http://www.mozilla.org/MPL/
//
// Software distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTY OF ANY KIND, either express or implied.
// See the License for the specific language governing rights and limitations under the License.
//
// The Original Code is: all this file.
//
// The Initial Developer of the Original Code is Michael H. Kay
//
// Portions created by (your name) are Copyright (C) (your legal entity). All Rights Reserved.
//
// Contributor(s): None
//
| brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/oxf/xml/IdentityTransformerWithFixup.scala | Scala | lgpl-2.1 | 2,122 |
package mavigator.uav
package mock
import scala.util.Random
import org.mavlink.Mavlink
import org.mavlink.enums._
import org.mavlink.messages._
class RandomFlightPlan {
private var time: Double = 0 //current time in seconds
private def millis = (time * 1000).toInt
private def micros = (time * 1E6).toInt
// an oscilliating function
private def osc[A](min: A, max: A, period: Double, offset: Double = 0)(implicit n: Numeric[A]): A = {
val amplitude = (n.toDouble(max) - n.toDouble(min)) / 2
val base = (n.toDouble(max) + n.toDouble(min)) / 2
val factor = math.sin(2 * math.Pi * (time + offset) / period)
n.fromInt((base + amplitude * factor).toInt)
}
private var x: Double = 0.0
private var y: Double = 0.0
private var vX: Double = 0.0
private var vY: Double = 0.0
def tick(delta: Double) {
val aX = Random.nextDouble() * 5
val aY = Random.nextDouble() * 5
x += vX * delta
y += vY * delta
vX += aX * delta
vY += aY * delta
time += delta
}
private final val EarthRadius = 6000000 //m
private final val StartLat = 46.518513 //deg N
private final val StartLon = 6.566923 //deg E
def position = GlobalPositionInt(
millis,
(StartLat + x / EarthRadius).toInt,
(StartLon + y / EarthRadius).toInt,
0,
0,
(vX * 100).toShort,
(vY * 100).toShort,
0,
0
)
def attitude = Attitude(
millis,
(math.sin(2 * math.Pi * time / 12) * math.Pi / 8).toFloat,
(math.sin(2 * math.Pi * time / 18) * math.Pi / 9).toFloat,
(2 * math.Pi * time / 6).toFloat,
0,
0,
0
)
def heartbeat = Heartbeat(
MavType.MavTypeGeneric.toByte,
MavAutopilot.MavAutopilotGeneric.toByte,
(MavModeFlag.MavModeFlagSafetyArmed | MavModeFlag.MavModeFlagManualInputEnabled).toByte,
0, //no custom mode
MavState.MavStateActive.toByte,
Mavlink.MavlinkVersion
)
private final val DistanceMin: Short = 10
private final val DistanceMax: Short = 500
def distance = DistanceSensor(
timeBootMs = millis,
minDistance = DistanceMin,
maxDistance = DistanceMax,
currentDistance = osc(DistanceMin, DistanceMax, 6),
`type` = MavDistanceSensor.MavDistanceSensorUltrasound.toByte,
id = 0: Byte,
orientation = -1: Byte,
covariance = 3: Byte)
private final val MotorsMax: Short = 2000 //usec, ppm signal => 100%
private final val MotorsMin: Short = 1000 //usec, ppm signal => 0%
def motors = ServoOutputRaw(
timeUsec = micros,
port = 0: Byte,
servo1Raw = osc(MotorsMin, MotorsMax, 6, 0),
servo2Raw = osc(MotorsMin, MotorsMax, 6, 5),
servo3Raw = osc(MotorsMin, MotorsMax, 6, 2),
servo4Raw = osc(MotorsMin, MotorsMax, 6, 4),
servo5Raw = 0,
servo6Raw = 0,
servo7Raw = 0,
servo8Raw = 0
)
}
| project-condor/mavigator | mavigator-uav/src/main/scala/mavigator/uav/mock/RandomFlightPlan.scala | Scala | gpl-3.0 | 2,790 |
package provingground.induction
import provingground._, HoTT._
import shapeless._
object SubstInstances {
implicit def indConsShape[S <: HList,
H <: Term with Subs[H],
Fb <: Term with Subs[Fb],
ConstructorType <: Term with Subs[ConstructorType],
Index <: HList: TermList]
: Subst[IndexedConstructorShape[S, H, Fb, ConstructorType, Index]] =
new Subst[IndexedConstructorShape[S, H, Fb, ConstructorType, Index]] {
def subst(a: IndexedConstructorShape[S, H, Fb, ConstructorType, Index])(
x: Term,
y: Term) =
a.subs(x, y)
}
implicit def consShape[S <: HList,
H <: Term with Subs[H],
ConstructorType <: Term with Subs[ConstructorType]]
: Subst[ConstructorShape[S, H, ConstructorType]] =
new Subst[ConstructorShape[S, H, ConstructorType]] {
def subst(a: ConstructorShape[S, H, ConstructorType])(
x: Term,
y: Term
) = a.subs(x, y)
}
implicit def typFmaily[H <: Term with Subs[H],
F <: Term with Subs[F],
Index <: HList: TermList]
: Subst[TypFamilyPtn[H, F, Index]] = new Subst[TypFamilyPtn[H, F, Index]] {
def subst(a: TypFamilyPtn[H, F, Index])(
x: Term,
y: Term
) = a.subs(x, y)
}
implicit def iterFunc[O <: Term with Subs[O], F <: Term with Subs[F]]
: Subst[IterFuncShape[O, F]] =
new Subst[IterFuncShape[O, F]] {
def subst(a: IterFuncShape[O, F])(x: Term, y: Term) = a.subs(x, y)
}
implicit def indexedIterFunc[H <: Term with Subs[H],
F <: Term with Subs[F],
Fb <: Term with Subs[Fb],
Index <: HList: TermList]
: Subst[IndexedIterFuncShape[H, F, Fb, Index]] =
new Subst[IndexedIterFuncShape[H, F, Fb, Index]] {
def subst(a: IndexedIterFuncShape[H, F, Fb, Index])(
x: Term,
y: Term
) = a.subs(x, y)
}
}
| siddhartha-gadgil/ProvingGround | core/src/main/scala/provingground/induction/SubstInstances.scala | Scala | mit | 2,103 |
package gh2013.payloads
import gh2013.models.User
import net.liftweb.json.JsonAST.JValue
case class MemberEventPayload(action: String, member: User)
object MemberEventPayload
{
def apply(json: JValue): Option[MemberEventPayload] =
{
val n2s = gh3.node2String(json)(_)
val action = n2s("action")
val member = User(json \\ "member")
val params = Seq(action, member)
if(params.forall(_.isDefined))
Some(MemberEventPayload(action.get, member.get))
else None
}
}
| mgoeminne/github_etl | src/main/scala/gh2013/payloads/MemberEventPayload.scala | Scala | mit | 517 |
package org.scalajs.testsuite.javalib.time
import java.time._
import java.time.chrono.{IsoEra, IsoChronology}
import java.time.format.DateTimeParseException
import java.time.temporal._
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.utils.AssertThrows._
class LocalDateTest extends TemporalTest[LocalDate] {
import DateTimeTestUtil._
import LocalDate._
import ChronoField._
import ChronoUnit._
val someDate = of(2011, 2, 28)
val leapDate = of(2012, 2, 29)
val samples = Seq(MIN, ofEpochDay(-1), ofEpochDay(0), ofEpochDay(1), someDate,
leapDate, MAX)
def isSupported(unit: ChronoUnit): Boolean = unit.isDateBased
def isSupported(field: ChronoField): Boolean = field.isDateBased
override def expectedRangeFor(accessor: LocalDate, field: TemporalField): ValueRange = {
field match {
case DAY_OF_MONTH => ValueRange.of(1, accessor.lengthOfMonth)
case DAY_OF_YEAR => ValueRange.of(1, accessor.lengthOfYear)
case ALIGNED_WEEK_OF_MONTH =>
ValueRange.of(1, if (accessor.lengthOfMonth > 28) 5 else 4)
case YEAR_OF_ERA =>
val maxYear = if (accessor.getEra == IsoEra.CE) 999999999 else 1000000000
ValueRange.of(1, maxYear)
case _ =>
super.expectedRangeFor(accessor, field)
}
}
@Test def test_getLong(): Unit = {
for (d <- samples) {
assertEquals(d.getDayOfWeek.getValue.toLong, d.getLong(DAY_OF_WEEK))
assertEquals(d.getDayOfMonth.toLong, d.getLong(DAY_OF_MONTH))
assertEquals(d.getDayOfYear.toLong, d.getLong(DAY_OF_YEAR))
assertEquals(d.toEpochDay, d.getLong(EPOCH_DAY))
assertEquals(d.getMonthValue.toLong, d.getLong(MONTH_OF_YEAR))
assertEquals(d.getYear.toLong, d.getLong(YEAR))
assertEquals(d.getEra.getValue.toLong, d.getLong(ERA))
}
assertEquals(1L, MIN.getLong(ALIGNED_DAY_OF_WEEK_IN_MONTH))
assertEquals(1L, MIN.getLong(ALIGNED_DAY_OF_WEEK_IN_YEAR))
assertEquals(1L, MIN.getLong(ALIGNED_WEEK_OF_MONTH))
assertEquals(1L, MIN.getLong(ALIGNED_WEEK_OF_YEAR))
assertEquals(-11999999988L, MIN.getLong(PROLEPTIC_MONTH))
assertEquals(1000000000L, MIN.getLong(YEAR_OF_ERA))
assertEquals(7L, someDate.getLong(ALIGNED_DAY_OF_WEEK_IN_MONTH))
assertEquals(3L, someDate.getLong(ALIGNED_DAY_OF_WEEK_IN_YEAR))
assertEquals(4L, someDate.getLong(ALIGNED_WEEK_OF_MONTH))
assertEquals(9L, someDate.getLong(ALIGNED_WEEK_OF_YEAR))
assertEquals(24133L, someDate.getLong(PROLEPTIC_MONTH))
assertEquals(2011L, someDate.getLong(YEAR_OF_ERA))
assertEquals(1L, leapDate.getLong(ALIGNED_DAY_OF_WEEK_IN_MONTH))
assertEquals(4L, leapDate.getLong(ALIGNED_DAY_OF_WEEK_IN_YEAR))
assertEquals(5L, leapDate.getLong(ALIGNED_WEEK_OF_MONTH))
assertEquals(9L, leapDate.getLong(ALIGNED_WEEK_OF_YEAR))
assertEquals(24145L, leapDate.getLong(PROLEPTIC_MONTH))
assertEquals(2012L, leapDate.getLong(YEAR_OF_ERA))
assertEquals(3L, MAX.getLong(ALIGNED_DAY_OF_WEEK_IN_MONTH))
assertEquals(1L, MAX.getLong(ALIGNED_DAY_OF_WEEK_IN_YEAR))
assertEquals(5L, MAX.getLong(ALIGNED_WEEK_OF_MONTH))
assertEquals(53L, MAX.getLong(ALIGNED_WEEK_OF_YEAR))
assertEquals(11999999999L, MAX.getLong(PROLEPTIC_MONTH))
assertEquals(999999999L, MAX.getLong(YEAR_OF_ERA))
}
@Test def test_getChronology(): Unit = {
for (d <- samples)
assertEquals(IsoChronology.INSTANCE, d.getChronology)
}
@Test def test_getEra(): Unit = {
assertEquals(IsoEra.BCE, MIN.getEra)
assertEquals(IsoEra.CE, someDate.getEra)
assertEquals(IsoEra.CE, leapDate.getEra)
assertEquals(IsoEra.CE, MAX.getEra)
}
@Test def test_getYear(): Unit = {
assertEquals(-999999999, MIN.getYear)
assertEquals(2011, someDate.getYear)
assertEquals(2012, leapDate.getYear)
assertEquals(999999999, MAX.getYear)
}
@Test def test_getMonthValue(): Unit = {
for (d <- samples)
assertEquals(d.getMonth.getValue, d.getMonthValue)
}
@Test def test_getMonth(): Unit = {
assertEquals(Month.JANUARY, MIN.getMonth)
assertEquals(Month.FEBRUARY, someDate.getMonth)
assertEquals(Month.FEBRUARY, leapDate.getMonth)
assertEquals(Month.DECEMBER, MAX.getMonth)
}
@Test def test_getDayOfMonth(): Unit = {
assertEquals(1, MIN.getDayOfMonth)
assertEquals(28, someDate.getDayOfMonth)
assertEquals(29, leapDate.getDayOfMonth)
assertEquals(31, MAX.getDayOfMonth)
}
@Test def test_getDayOfYear(): Unit = {
assertEquals(1, MIN.getDayOfYear)
assertEquals(59, someDate.getDayOfYear)
assertEquals(60, leapDate.getDayOfYear)
assertEquals(366, of(2012, 12, 31).getDayOfYear)
assertEquals(365, MAX.getDayOfYear)
}
@Test def test_getDayOfWeek(): Unit = {
assertEquals(DayOfWeek.MONDAY, MIN.getDayOfWeek)
assertEquals(DayOfWeek.MONDAY, someDate.getDayOfWeek)
assertEquals(DayOfWeek.WEDNESDAY, leapDate.getDayOfWeek)
assertEquals(DayOfWeek.FRIDAY, MAX.getDayOfWeek)
}
@Test def test_isLeapYear(): Unit = {
assertFalse(MIN.isLeapYear)
assertTrue(of(-400, 6, 30).isLeapYear)
assertFalse(of(-100, 3, 1).isLeapYear)
assertTrue(of(0, 1, 1).isLeapYear)
assertFalse(of(1900, 9, 30).isLeapYear)
assertTrue(of(2000, 1, 1).isLeapYear)
assertFalse(someDate.isLeapYear)
assertTrue(leapDate.isLeapYear)
assertFalse(MAX.isLeapYear)
}
@Test def test_lengthOfMonth(): Unit = {
for (d <- samples ++ Seq(of(2001, 2, 1), of(2012, 9, 30)))
assertEquals(d.getMonth.length(d.isLeapYear), d.lengthOfMonth)
}
@Test def test_lengthOfYear(): Unit = {
for (d <- samples)
assertEquals(if (d.isLeapYear) 366 else 365, d.lengthOfYear)
}
@Test def test_with(): Unit = {
testDateTime(MAX.`with`(DAY_OF_WEEK, 1))(of(999999999, 12, 27))
testDateTime(MAX.`with`(DAY_OF_WEEK, 5))(MAX)
testDateTime(MIN.`with`(DAY_OF_WEEK, 1))(MIN)
testDateTime(MIN.`with`(DAY_OF_WEEK, 7))(of(-999999999, 1, 7))
testDateTime(someDate.`with`(DAY_OF_WEEK, 1))(someDate)
testDateTime(someDate.`with`(DAY_OF_WEEK, 7))(of(2011, 3, 6))
testDateTime(leapDate.`with`(DAY_OF_WEEK, 1))(of(2012, 2, 27))
testDateTime(leapDate.`with`(DAY_OF_WEEK, 7))(of(2012, 3, 4))
testDateTime(MAX.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 1))(of(999999999, 12, 29))
testDateTime(MAX.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 3))(MAX)
testDateTime(MIN.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 1))(MIN)
testDateTime(MIN.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 7))(of(-999999999, 1, 7))
testDateTime(someDate.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 1))(of(2011, 2, 22))
testDateTime(someDate.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 7))(someDate)
testDateTime(leapDate.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 1))(leapDate)
testDateTime(leapDate.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 7))(of(2012, 3, 6))
testDateTime(MAX.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 1))(MAX)
testDateTime(MIN.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 1))(MIN)
testDateTime(MIN.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 7))(of(-999999999, 1, 7))
testDateTime(someDate.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 1))(of(2011, 2, 26))
testDateTime(someDate.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 7))(of(2011, 3, 4))
testDateTime(leapDate.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 1))(of(2012, 2, 26))
testDateTime(leapDate.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 7))(of(2012, 3, 3))
testDateTime(someDate.`with`(DAY_OF_MONTH, 1))(of(2011, 2, 1))
testDateTime(leapDate.`with`(DAY_OF_MONTH, 28))(of(2012, 2, 28))
testDateTime(someDate.`with`(DAY_OF_YEAR, 1))(of(2011, 1, 1))
testDateTime(someDate.`with`(DAY_OF_YEAR, 365))(of(2011, 12, 31))
testDateTime(leapDate.`with`(DAY_OF_YEAR, 366))(of(2012, 12, 31))
for {
d1 <- samples
d2 <- samples
} {
testDateTime(d1.`with`(EPOCH_DAY, d2.toEpochDay))(d2)
}
testDateTime(MAX.`with`(ALIGNED_WEEK_OF_MONTH, 1))(of(999999999, 12, 3))
testDateTime(MAX.`with`(ALIGNED_WEEK_OF_MONTH, 5))(MAX)
testDateTime(MIN.`with`(ALIGNED_WEEK_OF_MONTH, 1))(MIN)
testDateTime(MIN.`with`(ALIGNED_WEEK_OF_MONTH, 5))(of(-999999999, 1, 29))
testDateTime(someDate.`with`(ALIGNED_WEEK_OF_MONTH, 1))(of(2011, 2, 7))
testDateTime(someDate.`with`(ALIGNED_WEEK_OF_MONTH, 5))(of(2011, 3, 7))
testDateTime(leapDate.`with`(ALIGNED_WEEK_OF_MONTH, 1))(of(2012, 2, 1))
testDateTime(leapDate.`with`(ALIGNED_WEEK_OF_MONTH, 5))(leapDate)
testDateTime(MAX.`with`(ALIGNED_WEEK_OF_YEAR, 1))(of(999999999, 1, 1))
testDateTime(MAX.`with`(ALIGNED_WEEK_OF_YEAR, 53))(MAX)
testDateTime(MIN.`with`(ALIGNED_WEEK_OF_YEAR, 1))(MIN)
testDateTime(MIN.`with`(ALIGNED_WEEK_OF_YEAR, 53))(of(-999999999, 12, 31))
testDateTime(someDate.`with`(ALIGNED_WEEK_OF_YEAR, 1))(of(2011, 1, 3))
testDateTime(someDate.`with`(ALIGNED_WEEK_OF_YEAR, 53))(of(2012, 1, 2))
testDateTime(leapDate.`with`(ALIGNED_WEEK_OF_YEAR, 1))(of(2012, 1, 4))
testDateTime(leapDate.`with`(ALIGNED_WEEK_OF_YEAR, 53))(of(2013, 1, 2))
testDateTime(MAX.`with`(MONTH_OF_YEAR, 2))(of(999999999, 2, 28))
testDateTime(MAX.`with`(MONTH_OF_YEAR, 11))(of(999999999, 11, 30))
testDateTime(someDate.`with`(MONTH_OF_YEAR, 1))(of(2011, 1, 28))
testDateTime(leapDate.`with`(MONTH_OF_YEAR, 2))(leapDate)
testDateTime(MAX.`with`(PROLEPTIC_MONTH, 1))(of(0, 2, 29))
testDateTime(MIN.`with`(PROLEPTIC_MONTH, -1))(of(-1, 12, 1))
testDateTime(someDate.`with`(PROLEPTIC_MONTH, -11999999988L))(of(-999999999, 1, 28))
testDateTime(leapDate.`with`(PROLEPTIC_MONTH, 11999999999L))(of(999999999, 12, 29))
testDateTime(MIN.`with`(YEAR_OF_ERA, 1000000000))(MIN)
testDateTime(MIN.`with`(YEAR_OF_ERA, 1))(of(0, 1, 1))
testDateTime(MAX.`with`(YEAR_OF_ERA, 999999999))(MAX)
testDateTime(MAX.`with`(YEAR_OF_ERA, 1))(of(1, 12, 31))
testDateTime(leapDate.`with`(YEAR_OF_ERA, 2011))(someDate)
testDateTime(MIN.`with`(YEAR, -999999999))(MIN)
testDateTime(MIN.`with`(YEAR, 999999999))(of(999999999, 1, 1))
testDateTime(MAX.`with`(YEAR, -999999999))(of(-999999999, 12, 31))
testDateTime(MAX.`with`(YEAR, 999999999))(MAX)
testDateTime(leapDate.`with`(YEAR, 2011))(someDate)
testDateTime(MIN.`with`(ERA, 0))(MIN)
testDateTime(MAX.`with`(ERA, 0))(of(-999999998, 12, 31))
testDateTime(MAX.`with`(ERA, 1))(MAX)
testDateTime(someDate.`with`(ERA, 1))(someDate)
testDateTime(leapDate.`with`(ERA, 0))(of(-2011, 2, 28))
expectThrows(classOf[DateTimeException], MAX.`with`(DAY_OF_WEEK, 6))
expectThrows(classOf[DateTimeException],
MAX.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, 4))
expectThrows(classOf[DateTimeException],
MAX.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, 2))
expectThrows(classOf[DateTimeException], someDate.`with`(DAY_OF_MONTH, 29))
expectThrows(classOf[DateTimeException], leapDate.`with`(DAY_OF_MONTH, 30))
expectThrows(classOf[DateTimeException], someDate.`with`(DAY_OF_YEAR, 366))
expectThrows(classOf[DateTimeException],
someDate.`with`(YEAR_OF_ERA, 1000000000))
expectThrows(classOf[DateTimeException], MIN.`with`(ERA, 1))
for (d <- samples) {
for (n <- Seq(Long.MinValue, 0L, 8L, Long.MaxValue)) {
expectThrows(classOf[DateTimeException], d.`with`(DAY_OF_WEEK, n))
expectThrows(classOf[DateTimeException],
d.`with`(ALIGNED_DAY_OF_WEEK_IN_MONTH, n))
expectThrows(classOf[DateTimeException],
d.`with`(ALIGNED_DAY_OF_WEEK_IN_YEAR, n))
}
for (n <- Seq(Long.MinValue, 0L, 32L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(DAY_OF_MONTH, n))
for (n <- Seq(Long.MinValue, 0L, 367L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(DAY_OF_YEAR, n))
for (n <- Seq(Long.MinValue, -365243219163L, 365241780472L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(EPOCH_DAY, n))
for (n <- Seq(Long.MinValue, 0L, 6L, Long.MaxValue)) {
expectThrows(classOf[DateTimeException],
d.`with`(ALIGNED_WEEK_OF_MONTH, n))
}
for (n <- Seq(Long.MinValue, 0L, 54L, Long.MaxValue)) {
expectThrows(classOf[DateTimeException],
d.`with`(ALIGNED_WEEK_OF_YEAR, n))
}
for (n <- Seq(Long.MinValue, 0L, 13L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(MONTH_OF_YEAR, n))
for (n <- Seq(Long.MinValue, -11999999989L, 12000000000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(PROLEPTIC_MONTH, n))
for (n <- Seq(Long.MinValue, 0L, 1000000001L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(YEAR_OF_ERA, n))
for (n <- Seq(Long.MinValue, -1000000000L, 1000000000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(YEAR, n))
for (n <- Seq(Long.MinValue, -1L, 2L, Long.MaxValue))
expectThrows(classOf[DateTimeException], d.`with`(ERA, n))
}
}
@Test def test_withYear(): Unit = {
testDateTime(MIN.withYear(-999999999))(MIN)
testDateTime(MIN.withYear(999999999))(of(999999999, 1, 1))
testDateTime(MAX.withYear(-999999999))(of(-999999999, 12, 31))
testDateTime(MAX.withYear(999999999))(MAX)
val years = Seq(Int.MinValue, -1000000000, 1000000000, Int.MaxValue)
for {
d <- samples
n <- years
} {
expectThrows(classOf[DateTimeException], d.withYear(n))
}
}
@Test def test_withMonth(): Unit = {
testDateTime(MAX.withMonth(2))(of(999999999, 2, 28))
testDateTime(MAX.withMonth(11))(of(999999999, 11, 30))
testDateTime(someDate.withMonth(1))(of(2011, 1, 28))
testDateTime(leapDate.withMonth(2))(leapDate)
val months = Seq(Int.MinValue, 0, 13, Int.MaxValue)
for {
d <- samples
n <- months
} {
expectThrows(classOf[DateTimeException], d.withMonth(n))
}
}
@Test def test_withDayOfMonth(): Unit = {
testDateTime(someDate.withDayOfMonth(1))(of(2011, 2, 1))
testDateTime(leapDate.withDayOfMonth(28))(of(2012, 2, 28))
expectThrows(classOf[DateTimeException], someDate.withDayOfMonth(29))
expectThrows(classOf[DateTimeException], leapDate.withDayOfMonth(30))
expectThrows(classOf[DateTimeException], of(0, 4, 30).withDayOfMonth(31))
val days = Seq(Int.MinValue, 0, 32, Int.MaxValue)
for {
d <- samples
n <- days
} {
expectThrows(classOf[DateTimeException], d.withDayOfMonth(n))
}
}
@Test def test_withDayOfYear(): Unit = {
testDateTime(someDate.withDayOfYear(1))(of(2011, 1, 1))
testDateTime(someDate.withDayOfYear(365))(of(2011, 12, 31))
testDateTime(leapDate.withDayOfYear(366))(of(2012, 12, 31))
expectThrows(classOf[DateTimeException], someDate.withDayOfYear(366))
val days = Seq(Int.MinValue, 0, 367, Int.MaxValue)
for {
d <- samples
n <- days
} {
expectThrows(classOf[DateTimeException], d.withDayOfMonth(n))
}
}
@Test def test_plus(): Unit = {
val values = Seq(Long.MinValue, Int.MinValue.toLong, -1000L, -366L, -365L,
-100L, -12L, -10L, -7L, -1L, 0L, 1L, 7L, 10L, 12L, 100L,
365L, 366L, 1000L, Int.MaxValue.toLong, Long.MaxValue)
for {
d <- samples
n <- values
} {
testDateTime(d.plus(n, DAYS))(d.plusDays(n))
testDateTime(d.plus(n, WEEKS))(d.plusWeeks(n))
testDateTime(d.plus(n, MONTHS))(d.plusMonths(n))
testDateTime(d.plus(n, YEARS))(d.plusYears(n))
testDateTime(d.plus(n, DECADES))(d.plusYears(Math.multiplyExact(n, 10)))
testDateTime(d.plus(n, CENTURIES))(d.plusYears(Math.multiplyExact(n, 100)))
testDateTime(d.plus(n, MILLENNIA))(d.plusYears(Math.multiplyExact(n, 1000)))
testDateTime(d.plus(n, ERAS))(d.`with`(ERA, Math.addExact(n, d.get(ERA))))
}
}
@Test def test_plusYears(): Unit = {
for (d <- samples)
testDateTime(d.plusYears(0))(d)
testDateTime(someDate.plusYears(-2))(of(2009, 2, 28))
testDateTime(someDate.plusYears(-1))(of(2010, 2, 28))
testDateTime(someDate.plusYears(1))(of(2012, 2, 28))
testDateTime(someDate.plusYears(2))(of(2013, 2, 28))
testDateTime(leapDate.plusYears(-2))(of(2010, 2, 28))
testDateTime(leapDate.plusYears(-1))(someDate)
testDateTime(leapDate.plusYears(1))(of(2013, 2, 28))
testDateTime(leapDate.plusYears(2))(of(2014, 2, 28))
testDateTime(MIN.plusYears(1999999998))(of(999999999, 1, 1))
testDateTime(MAX.plusYears(-1999999998))(of(-999999999, 12, 31))
expectThrows(classOf[DateTimeException], MIN.plusYears(-1))
expectThrows(classOf[DateTimeException], MIN.plusYears(1999999999))
expectThrows(classOf[DateTimeException], MAX.plusYears(-1999999999))
expectThrows(classOf[DateTimeException], MAX.plusYears(1))
expectThrows(classOf[DateTimeException], MIN.plusYears(Long.MinValue))
expectThrows(classOf[DateTimeException], MAX.plusYears(Long.MaxValue))
}
@Test def test_plusMonths(): Unit = {
for (d <- samples)
testDateTime(d.plusMonths(0))(d)
testDateTime(someDate.plusMonths(-12))(of(2010, 2, 28))
testDateTime(someDate.plusMonths(-1))(of(2011, 1, 28))
testDateTime(someDate.plusMonths(1))(of(2011, 3, 28))
testDateTime(someDate.plusMonths(12))(of(2012, 2, 28))
testDateTime(leapDate.plusMonths(-12))(someDate)
testDateTime(leapDate.plusMonths(-1))(of(2012, 1, 29))
testDateTime(leapDate.plusMonths(1))(of(2012, 3, 29))
testDateTime(leapDate.plusMonths(12))(of(2013, 2, 28))
testDateTime(of(2011, 1, 31).plusMonths(1))(someDate)
testDateTime(of(2011, 3, 31).plusMonths(-1))(someDate)
testDateTime(of(2011, 3, 31).plusMonths(1))(of(2011, 4, 30))
testDateTime(of(2012, 1, 31).plusMonths(1))(leapDate)
testDateTime(of(2012, 3, 31).plusMonths(-1))(leapDate)
testDateTime(of(2012, 3, 31).plusMonths(1))(of(2012, 4, 30))
testDateTime(MIN.plusMonths(23999999987L))(of(999999999, 12, 1))
testDateTime(MAX.plusMonths(-23999999987L))(of(-999999999, 1, 31))
expectThrows(classOf[DateTimeException], MIN.plusMonths(-1))
expectThrows(classOf[DateTimeException], MIN.plusMonths(23999999988L))
expectThrows(classOf[DateTimeException], MAX.plusMonths(-23999999988L))
expectThrows(classOf[DateTimeException], MAX.plusMonths(1))
expectThrows(classOf[DateTimeException], MIN.plusMonths(Long.MinValue))
expectThrows(classOf[DateTimeException], MAX.plusMonths(Long.MaxValue))
}
@Test def test_plusWeeks(): Unit = {
for (d <- samples)
testDateTime(d.plusWeeks(0))(d)
testDateTime(someDate.plusWeeks(-53))(of(2010, 2, 22))
testDateTime(someDate.plusWeeks(-52))(of(2010, 3, 1))
testDateTime(someDate.plusWeeks(-1))(of(2011, 2, 21))
testDateTime(someDate.plusWeeks(1))(of(2011, 3, 7))
testDateTime(someDate.plusWeeks(52))(of(2012, 2, 27))
testDateTime(someDate.plusWeeks(53))(of(2012, 3, 5))
testDateTime(leapDate.plusWeeks(-53))(of(2011, 2, 23))
testDateTime(leapDate.plusWeeks(-52))(of(2011, 3, 2))
testDateTime(leapDate.plusWeeks(-1))(of(2012, 2, 22))
testDateTime(leapDate.plusWeeks(1))(of(2012, 3, 7))
testDateTime(leapDate.plusWeeks(52))(of(2013, 2, 27))
testDateTime(leapDate.plusWeeks(53))(of(2013, 3, 6))
testDateTime(MIN.plusWeeks(104354999947L))(of(999999999, 12, 27))
testDateTime(MAX.plusWeeks(-104354999947L))(of(-999999999, 1, 5))
expectThrows(classOf[DateTimeException], MIN.plusWeeks(-1))
expectThrows(classOf[DateTimeException], MIN.plusWeeks(104354999948L))
expectThrows(classOf[DateTimeException], MAX.plusWeeks(-1043549999478L))
expectThrows(classOf[DateTimeException], MAX.plusWeeks(1))
expectThrows(classOf[ArithmeticException], MIN.plusWeeks(Long.MinValue))
expectThrows(classOf[ArithmeticException], MAX.plusWeeks(Long.MaxValue))
}
@Test def test_plusDays(): Unit = {
for (d <- samples)
testDateTime(d.plusDays(0))(d)
testDateTime(someDate.plusDays(-365))(of(2010, 2, 28))
testDateTime(someDate.plusDays(-1))(of(2011, 2, 27))
testDateTime(someDate.plusDays(1))(of(2011, 3, 1))
testDateTime(someDate.plusDays(365))(of(2012, 2, 28))
testDateTime(someDate.plusDays(366))(leapDate)
testDateTime(leapDate.plusDays(-366))(someDate)
testDateTime(leapDate.plusDays(-365))(of(2011, 3, 1))
testDateTime(leapDate.plusDays(-1))(of(2012, 2, 28))
testDateTime(leapDate.plusDays(1))(of(2012, 3, 1))
testDateTime(leapDate.plusDays(365))(of(2013, 2, 28))
testDateTime(leapDate.plusDays(366))(of(2013, 3, 1))
testDateTime(MIN.plusDays(730484999633L))(MAX)
testDateTime(MAX.plusDays(-730484999633L))(MIN)
expectThrows(classOf[DateTimeException], MIN.plusDays(-1))
expectThrows(classOf[DateTimeException], MIN.plusDays(730484999634L))
expectThrows(classOf[DateTimeException], MAX.plusDays(-730484999634L))
expectThrows(classOf[DateTimeException], MAX.plusDays(1))
expectThrows(classOf[ArithmeticException],
ofEpochDay(-1).plusDays(Long.MinValue))
expectThrows(classOf[DateTimeException],
ofEpochDay(0).plusDays(Long.MinValue))
expectThrows(classOf[DateTimeException],
ofEpochDay(0).plusDays(Long.MaxValue))
expectThrows(classOf[ArithmeticException],
ofEpochDay(1).plusDays(Long.MaxValue))
}
@Test def test_minusYears(): Unit = {
for (d <- samples)
testDateTime(d.minusYears(0))(d)
testDateTime(someDate.minusYears(2))(of(2009, 2, 28))
testDateTime(someDate.minusYears(1))(of(2010, 2, 28))
testDateTime(someDate.minusYears(-1))(of(2012, 2, 28))
testDateTime(someDate.minusYears(-2))(of(2013, 2, 28))
testDateTime(leapDate.minusYears(2))(of(2010, 2, 28))
testDateTime(leapDate.minusYears(1))(someDate)
testDateTime(leapDate.minusYears(-1))(of(2013, 2, 28))
testDateTime(leapDate.minusYears(-2))(of(2014, 2, 28))
testDateTime(MIN.minusYears(-1999999998))(of(999999999, 1, 1))
testDateTime(MAX.minusYears(1999999998))(of(-999999999, 12, 31))
expectThrows(classOf[DateTimeException], MIN.minusYears(1))
expectThrows(classOf[DateTimeException], MIN.minusYears(-1999999999))
expectThrows(classOf[DateTimeException], MAX.minusYears(1999999999))
expectThrows(classOf[DateTimeException], MAX.minusYears(-1))
expectThrows(classOf[DateTimeException], MIN.minusYears(Long.MaxValue))
expectThrows(classOf[DateTimeException], MAX.minusYears(Long.MinValue))
}
@Test def test_minusMonths(): Unit = {
for (d <- samples)
testDateTime(d.minusMonths(0))(d)
testDateTime(someDate.minusMonths(12))(of(2010, 2, 28))
testDateTime(someDate.minusMonths(1))(of(2011, 1, 28))
testDateTime(someDate.minusMonths(-1))(of(2011, 3, 28))
testDateTime(someDate.minusMonths(-12))(of(2012, 2, 28))
testDateTime(leapDate.minusMonths(12))(someDate)
testDateTime(leapDate.minusMonths(1))(of(2012, 1, 29))
testDateTime(leapDate.minusMonths(-1))(of(2012, 3, 29))
testDateTime(leapDate.minusMonths(-12))(of(2013, 2, 28))
testDateTime(of(2011, 1, 31).minusMonths(-1))(someDate)
testDateTime(of(2011, 3, 31).minusMonths(1))(someDate)
testDateTime(of(2011, 3, 31).minusMonths(-1))(of(2011, 4, 30))
testDateTime(of(2012, 1, 31).minusMonths(-1))(leapDate)
testDateTime(of(2012, 3, 31).minusMonths(1))(leapDate)
testDateTime(of(2012, 3, 31).minusMonths(-1))(of(2012, 4, 30))
testDateTime(MIN.minusMonths(-23999999987L))(of(999999999, 12, 1))
testDateTime(MAX.minusMonths(23999999987L))(of(-999999999, 1, 31))
expectThrows(classOf[DateTimeException], MIN.minusMonths(1))
expectThrows(classOf[DateTimeException], MIN.minusMonths(-23999999988L))
expectThrows(classOf[DateTimeException], MAX.minusMonths(23999999988L))
expectThrows(classOf[DateTimeException], MAX.minusMonths(-1))
expectThrows(classOf[DateTimeException], MIN.minusMonths(Long.MaxValue))
expectThrows(classOf[DateTimeException], MAX.minusMonths(Long.MinValue))
}
@Test def test_minusWeeks(): Unit = {
for (d <- samples)
testDateTime(d.minusWeeks(0))(d)
testDateTime(someDate.minusWeeks(53))(of(2010, 2, 22))
testDateTime(someDate.minusWeeks(52))(of(2010, 3, 1))
testDateTime(someDate.minusWeeks(1))(of(2011, 2, 21))
testDateTime(someDate.minusWeeks(-1))(of(2011, 3, 7))
testDateTime(someDate.minusWeeks(-52))(of(2012, 2, 27))
testDateTime(someDate.minusWeeks(-53))(of(2012, 3, 5))
testDateTime(leapDate.minusWeeks(53))(of(2011, 2, 23))
testDateTime(leapDate.minusWeeks(52))(of(2011, 3, 2))
testDateTime(leapDate.minusWeeks(1))(of(2012, 2, 22))
testDateTime(leapDate.minusWeeks(-1))(of(2012, 3, 7))
testDateTime(leapDate.minusWeeks(-52))(of(2013, 2, 27))
testDateTime(leapDate.minusWeeks(-53))(of(2013, 3, 6))
testDateTime(MIN.minusWeeks(-104354999947L))(of(999999999, 12, 27))
testDateTime(MAX.minusWeeks(104354999947L))(of(-999999999, 1, 5))
expectThrows(classOf[DateTimeException], MIN.minusWeeks(1))
expectThrows(classOf[DateTimeException], MIN.minusWeeks(-104354999948L))
expectThrows(classOf[DateTimeException], MAX.minusWeeks(1043549999478L))
expectThrows(classOf[DateTimeException], MAX.minusWeeks(-1))
expectThrows(classOf[ArithmeticException], MIN.minusWeeks(Long.MaxValue))
expectThrows(classOf[ArithmeticException], MAX.minusWeeks(Long.MinValue))
}
@Test def test_minusDays(): Unit = {
for (d <- samples)
testDateTime(d.minusDays(0))(d)
testDateTime(someDate.minusDays(365))(of(2010, 2, 28))
testDateTime(someDate.minusDays(1))(of(2011, 2, 27))
testDateTime(someDate.minusDays(-1))(of(2011, 3, 1))
testDateTime(someDate.minusDays(-365))(of(2012, 2, 28))
testDateTime(someDate.minusDays(-366))(leapDate)
testDateTime(leapDate.minusDays(366))(someDate)
testDateTime(leapDate.minusDays(365))(of(2011, 3, 1))
testDateTime(leapDate.minusDays(1))(of(2012, 2, 28))
testDateTime(leapDate.minusDays(-1))(of(2012, 3, 1))
testDateTime(leapDate.minusDays(-365))(of(2013, 2, 28))
testDateTime(leapDate.minusDays(-366))(of(2013, 3, 1))
testDateTime(MIN.minusDays(-730484999633L))(MAX)
testDateTime(MAX.minusDays(730484999633L))(MIN)
expectThrows(classOf[DateTimeException], MIN.minusDays(1))
expectThrows(classOf[DateTimeException], MIN.minusDays(-730484999634L))
expectThrows(classOf[DateTimeException], MAX.minusDays(730484999634L))
expectThrows(classOf[DateTimeException], MAX.minusDays(-1))
expectThrows(classOf[ArithmeticException],
ofEpochDay(-2).minusDays(Long.MaxValue))
expectThrows(classOf[ArithmeticException],
ofEpochDay(1).minusDays(Long.MinValue))
}
@Test def test_adjustInto(): Unit = {
for {
d1 <- samples
d2 <- samples
} {
testDateTime(d1.adjustInto(d2))(d1)
}
val ts = Seq(LocalTime.MIN, LocalTime.MAX)
for {
d <- samples
t <- ts
} {
expectThrows(classOf[DateTimeException], d.adjustInto(t))
}
}
@Test def test_until(): Unit = {
val samples1 = samples ++ Seq(of(2012, 1, 29), of(2012, 1, 30), of(2012, 2, 28),
of(2013, 2, 28), of(2013, 3, 1), of(0, 12, 31), of(1, 1, 1))
for {
d <- samples1
u <- dateBasedUnits
} {
assertEquals(0L, d.until(d, u))
}
assertEquals(730484999633L, MIN.until(MAX, DAYS))
assertEquals(366L, someDate.until(leapDate, DAYS))
assertEquals(28L, leapDate.until(of(2012, 3, 28), DAYS))
assertEquals(104354999947L, MIN.until(MAX, WEEKS))
assertEquals(12L, someDate.until(leapDate, MONTHS))
assertEquals(1L, of(2012, 1, 29).until(leapDate, MONTHS))
assertEquals(0L, of(2012, 1, 30).until(leapDate, MONTHS))
assertEquals(1999999998L, MIN.until(MAX, YEARS))
assertEquals(1L, someDate.until(of(2012, 2, 28), YEARS))
assertEquals(0L, leapDate.until(of(2013, 2, 28), YEARS))
assertEquals(199999999L, MIN.until(MAX, DECADES))
assertEquals(19999999L, MIN.until(MAX, CENTURIES))
assertEquals(1999999L, MIN.until(MAX, MILLENNIA))
assertEquals(1L, MIN.until(MAX, ERAS))
assertEquals(1L, of(0, 12, 31).until(of(1, 1, 1), ERAS))
for {
d1 <- samples1
d2 <- samples1 if d2.isAfter(d1)
u <- dateBasedUnits
} {
assertEquals(-d1.until(d2, u), d2.until(d1, u))
}
for (d <- samples1)
assertEquals(Period.ZERO, d.until(d))
for {
d1 <- samples1
d2 <- samples1
u <- timeBasedUnits
} {
expectThrows(classOf[UnsupportedTemporalTypeException], d1.until(d2, u))
}
assertEquals(Period.of(1999999998, 11, 30), MIN.until(MAX))
assertEquals(Period.of(-1999999998, -11, -30), MAX.until(MIN))
assertEquals(Period.of(1, 0, 1), someDate.until(leapDate))
assertEquals(Period.of(-1, 0, -1), leapDate.until(someDate))
assertEquals(Period.of(0, 11, 30), leapDate.until(of(2013, 2, 28)))
assertEquals(Period.of(0, -11, -28), of(2013, 2, 28).until(leapDate))
assertEquals(Period.of(1, 0, 1), leapDate.until(of(2013, 3, 1)))
assertEquals(Period.of(-1, 0, -1), of(2013, 3, 1).until(leapDate))
assertEquals(Period.of(0, 1, 1), of(2013, 3, 30).until(of(2013, 5, 1)))
assertEquals(Period.of(0, -1, -2), of(2013, 5, 1).until(of(2013, 3, 30)))
assertEquals(Period.of(0, 1, 1), of(2013, 3, 31).until(of(2013, 5, 1)))
assertEquals(Period.of(0, -1, -1), of(2013, 5, 1).until(of(2013, 3, 31)))
}
@Test def test_toEpochDay(): Unit = {
assertEquals(-365243219162L, MIN.toEpochDay)
assertEquals(-1L, of(1969, 12, 31).toEpochDay)
assertEquals(0L, of(1970, 1, 1).toEpochDay)
assertEquals(15033L, someDate.toEpochDay)
assertEquals(15399L, leapDate.toEpochDay)
assertEquals(365241780471L, MAX.toEpochDay)
}
@Test def test_compareTo(): Unit = {
assertEquals(0, MIN.compareTo(MIN))
assertTrue(MIN.compareTo(someDate) < 0)
assertTrue(MIN.compareTo(MAX) < 0)
assertTrue(someDate.compareTo(MIN) > 0)
assertEquals(0, someDate.compareTo(someDate))
assertTrue(someDate.compareTo(MAX) < 0)
assertTrue(MAX.compareTo(MIN) > 0)
assertTrue(MAX.compareTo(someDate) > 0)
assertEquals(0, MAX.compareTo(MAX))
}
@Test def test_isAfter(): Unit = {
assertFalse(MIN.isAfter(MIN))
assertFalse(MIN.isAfter(someDate))
assertFalse(MIN.isAfter(MAX))
assertTrue(someDate.isAfter(MIN))
assertFalse(someDate.isAfter(someDate))
assertFalse(someDate.isAfter(MAX))
assertTrue(MAX.isAfter(MIN))
assertTrue(MAX.isAfter(someDate))
assertFalse(MAX.isAfter(MAX))
}
@Test def test_isBefore(): Unit = {
assertFalse(MIN.isBefore(MIN))
assertTrue(MIN.isBefore(someDate))
assertTrue(MIN.isBefore(MAX))
assertFalse(someDate.isBefore(MIN))
assertFalse(someDate.isBefore(someDate))
assertTrue(someDate.isBefore(MAX))
assertFalse(MAX.isBefore(MIN))
assertFalse(MAX.isBefore(someDate))
assertFalse(MAX.isBefore(MAX))
}
@Test def test_toString(): Unit = {
assertEquals("-999999999-01-01", MIN.toString)
assertEquals("-0001-12-31", of(-1, 12, 31).toString)
assertEquals("0000-01-01", of(0, 1, 1).toString)
assertEquals("2011-02-28", someDate.toString)
assertEquals("2012-02-29", leapDate.toString)
assertEquals("9999-12-31", of(9999, 12, 31).toString)
assertEquals("+10000-01-01", of(10000, 1, 1).toString)
assertEquals("+999999999-12-31", MAX.toString)
}
@Test def test_now(): Unit = {
assertEquals(IsoEra.CE, now().getEra)
}
@Test def test_of(): Unit = {
val years = Seq(Int.MinValue, -1000000000, -999999999, 0, 999999999,
1000000000, Int.MaxValue)
val days = Seq(Int.MinValue, 0, 1, 28, 29, 30, 31, 32, Int.MaxValue)
for {
year <- years
month <- Month.values
day <- days
} {
testDateTime(of(year, month, day))(of(year, month.getValue, day))
}
expectThrows(classOf[DateTimeException], of(Int.MinValue, 1, 1))
expectThrows(classOf[DateTimeException], of(-1000000000, 1, 1))
expectThrows(classOf[DateTimeException], of(2011, Int.MinValue, 1))
expectThrows(classOf[DateTimeException], of(2011, 0, 1))
expectThrows(classOf[DateTimeException], of(2011, 13, 1))
expectThrows(classOf[DateTimeException], of(2011, Int.MaxValue, 1))
for (month <- Month.values) {
val m = month.getValue
expectThrows(classOf[DateTimeException], of(2011, m, Int.MinValue))
expectThrows(classOf[DateTimeException], of(2011, m, 0))
expectThrows(classOf[DateTimeException],
of(2011, m, month.length(false) + 1))
expectThrows(classOf[DateTimeException],
of(2012, m, month.length(true) + 1))
expectThrows(classOf[DateTimeException], of(2011, m, Int.MaxValue))
}
}
@Test def test_ofYearDay(): Unit = {
testDateTime(ofYearDay(2011, 1))(of(2011, 1, 1))
testDateTime(ofYearDay(2011, 31))(of(2011, 1, 31))
testDateTime(ofYearDay(2011, 32))(of(2011, 2, 1))
testDateTime(ofYearDay(2011, 59))(of(2011, 2, 28))
testDateTime(ofYearDay(2011, 60))(of(2011, 3, 1))
testDateTime(ofYearDay(2011, 90))(of(2011, 3, 31))
testDateTime(ofYearDay(2011, 91))(of(2011, 4, 1))
testDateTime(ofYearDay(2011, 120))(of(2011, 4, 30))
testDateTime(ofYearDay(2011, 121))(of(2011, 5, 1))
testDateTime(ofYearDay(2011, 151))(of(2011, 5, 31))
testDateTime(ofYearDay(2011, 152))(of(2011, 6, 1))
testDateTime(ofYearDay(2011, 181))(of(2011, 6, 30))
testDateTime(ofYearDay(2011, 182))(of(2011, 7, 1))
testDateTime(ofYearDay(2011, 212))(of(2011, 7, 31))
testDateTime(ofYearDay(2011, 213))(of(2011, 8, 1))
testDateTime(ofYearDay(2011, 243))(of(2011, 8, 31))
testDateTime(ofYearDay(2011, 244))(of(2011, 9, 1))
testDateTime(ofYearDay(2011, 273))(of(2011, 9, 30))
testDateTime(ofYearDay(2011, 274))(of(2011, 10, 1))
testDateTime(ofYearDay(2011, 304))(of(2011, 10, 31))
testDateTime(ofYearDay(2011, 305))(of(2011, 11, 1))
testDateTime(ofYearDay(2011, 334))(of(2011, 11, 30))
testDateTime(ofYearDay(2011, 335))(of(2011, 12, 1))
testDateTime(ofYearDay(2011, 365))(of(2011, 12, 31))
testDateTime(ofYearDay(2012, 1))(of(2012, 1, 1))
testDateTime(ofYearDay(2012, 31))(of(2012, 1, 31))
testDateTime(ofYearDay(2012, 32))(of(2012, 2, 1))
testDateTime(ofYearDay(2012, 60))(of(2012, 2, 29))
testDateTime(ofYearDay(2012, 61))(of(2012, 3, 1))
testDateTime(ofYearDay(2012, 91))(of(2012, 3, 31))
testDateTime(ofYearDay(2012, 92))(of(2012, 4, 1))
testDateTime(ofYearDay(2012, 121))(of(2012, 4, 30))
testDateTime(ofYearDay(2012, 122))(of(2012, 5, 1))
testDateTime(ofYearDay(2012, 152))(of(2012, 5, 31))
testDateTime(ofYearDay(2012, 153))(of(2012, 6, 1))
testDateTime(ofYearDay(2012, 182))(of(2012, 6, 30))
testDateTime(ofYearDay(2012, 183))(of(2012, 7, 1))
testDateTime(ofYearDay(2012, 213))(of(2012, 7, 31))
testDateTime(ofYearDay(2012, 214))(of(2012, 8, 1))
testDateTime(ofYearDay(2012, 244))(of(2012, 8, 31))
testDateTime(ofYearDay(2012, 245))(of(2012, 9, 1))
testDateTime(ofYearDay(2012, 274))(of(2012, 9, 30))
testDateTime(ofYearDay(2012, 275))(of(2012, 10, 1))
testDateTime(ofYearDay(2012, 305))(of(2012, 10, 31))
testDateTime(ofYearDay(2012, 306))(of(2012, 11, 1))
testDateTime(ofYearDay(2012, 335))(of(2012, 11, 30))
testDateTime(ofYearDay(2012, 336))(of(2012, 12, 1))
testDateTime(ofYearDay(2012, 366))(of(2012, 12, 31))
expectThrows(classOf[DateTimeException], ofYearDay(Int.MinValue, 1))
expectThrows(classOf[DateTimeException], ofYearDay(-1000000000, 1))
expectThrows(classOf[DateTimeException], ofYearDay(1000000000, 1))
expectThrows(classOf[DateTimeException], ofYearDay(Int.MaxValue, 1))
expectThrows(classOf[DateTimeException], ofYearDay(2011, Int.MinValue))
expectThrows(classOf[DateTimeException], ofYearDay(2011, 0))
expectThrows(classOf[DateTimeException], ofYearDay(2011, 366))
expectThrows(classOf[DateTimeException], ofYearDay(2012, 367))
expectThrows(classOf[DateTimeException], ofYearDay(2011, Int.MaxValue))
}
@Test def test_ofEpochDay(): Unit = {
testDateTime(ofEpochDay(-365243219162L))(MIN)
testDateTime(ofEpochDay(-1))(of(1969, 12, 31))
testDateTime(ofEpochDay(0))(of(1970, 1, 1))
testDateTime(ofEpochDay(1))(of(1970, 1, 2))
testDateTime(ofEpochDay(365241780471L))(MAX)
expectThrows(classOf[DateTimeException], ofEpochDay(Long.MinValue))
expectThrows(classOf[DateTimeException], ofEpochDay(-365243219163L))
expectThrows(classOf[DateTimeException], ofEpochDay(365241780472L))
expectThrows(classOf[DateTimeException], ofEpochDay(Long.MaxValue))
}
@Test def test_from(): Unit = {
for (d <- samples)
testDateTime(from(d))(d)
for (t <- Seq(LocalTime.MIN, LocalTime.NOON, LocalTime.MAX))
expectThrows(classOf[DateTimeException], from(t))
}
@Test def test_parse(): Unit = {
assertEquals(parse("-999999999-01-01"), MIN)
assertEquals(parse("-0001-12-31"), of(-1, 12, 31))
assertEquals(parse("0000-01-01"), of(0, 1, 1))
assertEquals(parse("2011-02-28"), someDate)
assertEquals(parse("2012-02-29"), leapDate)
assertEquals(parse("9999-12-31"), of(9999, 12, 31))
assertEquals(parse("+10000-01-01"), of(10000, 1, 1))
assertEquals(parse("+999999999-12-31"), MAX)
expectThrows(classOf[DateTimeParseException], parse("0000-01-99"))
expectThrows(classOf[DateTimeParseException], parse("0000-01-900"))
expectThrows(classOf[DateTimeParseException], parse("aaaa-01-30"))
expectThrows(classOf[DateTimeParseException], parse("2012-13-30"))
expectThrows(classOf[DateTimeParseException], parse("2012-01-34"))
expectThrows(classOf[DateTimeParseException], parse("2005-02-29"))
}
}
| sjrd/scala-js-java-time | testSuite/shared/src/test/scala/org/scalajs/testsuite/javalib/time/LocalDateTest.scala | Scala | bsd-3-clause | 37,688 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "shorty"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
"postgresql" % "postgresql" % "9.1-901.jdbc4",
jdbc,
anorm,
"org.scalatest" % "scalatest_2.10" % "1.9.1" % "test",
"junit" % "junit" % "4.8.1" % "test"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
resolvers ++= Seq(
DefaultMavenRepository,
Resolver.url("Play", url("http://download.playframework.org/ivy-releases/"))(Resolver.ivyStylePatterns),
"Typesafe Repository" at "http://repo.typesafe.com/typesave/releases/"
))
}
| Tigermelville/laughing-octo-nemesis | project/Build.scala | Scala | mit | 783 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import org.scalatest.FlatSpec
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.LayerException
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
@com.intel.analytics.bigdl.tags.Parallel
class ReshapeSpec extends FlatSpec {
"A Reshape Module " should "generate correct output and grad" in {
val module = new Reshape[Double](Array(3, 2))
for (batchSize <- 1 to 4) {
val input = Tensor[Double](batchSize, 1, 6)
input.rand()
val inputOrg = input.clone()
val output = module.forward(input)
val gradOutput = Tensor[Double](batchSize, 3, 2)
gradOutput.rand()
val gradOutputOrg = gradOutput.clone()
val gradInput = module.backward(input, gradOutput)
assert(output.nDimension() == 3)
assert(output.size(1) == batchSize)
assert(output.size(2) == 3)
assert(output.size(3) == 2)
assert(gradInput.isSameSizeAs(input))
for (i <- 1 to batchSize) {
for (j <- 0 to 5) {
assert(input(Array(i, 1, j + 1)) == output(Array(i, j / 2 + 1, j % 2 + 1)))
assert(gradInput(Array(i, 1, j + 1)) == gradOutput(Array(i, j / 2 + 1, j % 2 + 1)))
}
}
assert(input == inputOrg)
assert(gradOutput == gradOutputOrg)
}
intercept[LayerException] {
module.forward(Tensor[Double](2, 2))
}
intercept[LayerException] {
module.forward(Tensor[Double](3, 2, 2))
}
}
"A Reshape Module default batch" should "generate correct output and grad" in {
val module = new Reshape[Double](Array(3, 2))
val input = Tensor[Double](2, 3)
input.rand()
val inputOrg = input.clone()
val output = module.forward(input)
val gradOutput = Tensor[Double](3, 2)
gradOutput.rand()
val gradOutputOrg = gradOutput.clone()
val gradInput = module.backward(input, gradOutput)
assert(output.nDimension() == 2)
assert(output.size(1) == 3)
assert(output.size(2) == 2)
for (j <- 0 to 5) {
assert(input(Array(j / 3 + 1, j % 3 + 1)) == output(Array(j / 2 + 1, j % 2 + 1)))
assert(gradInput(Array(j / 3 + 1, j % 3 + 1)) == gradOutput(Array(j / 2 + 1, j % 2 + 1)))
}
assert(input == inputOrg)
assert(gradOutput == gradOutputOrg)
}
"A Reshape Module disable batch" should "generate correct output and grad" in {
val module = new Reshape[Double](Array(3, 2), Some(false))
val input = Tensor[Double](1, 2, 3)
input.rand()
val inputOrg = input.clone()
val output = module.forward(input)
val gradOutput = Tensor[Double](3, 2)
gradOutput.rand()
val gradOutputOrg = gradOutput.clone()
val gradInput = module.backward(input, gradOutput)
assert(output.nDimension() == 2)
assert(output.size(1) == 3)
assert(output.size(2) == 2)
for (j <- 0 to 5) {
assert(input(Array(1, j / 3 + 1, j % 3 + 1)) == output(Array(j / 2 + 1, j % 2 + 1)))
assert(gradInput(Array(1, j / 3 + 1, j % 3 + 1)) == gradOutput(Array(j / 2 + 1, j % 2 + 1)))
}
assert(input == inputOrg)
assert(gradOutput == gradOutputOrg)
intercept[LayerException] {
module.forward(Tensor[Double](2, 3, 2))
}
}
"A Reshape Module enable batch" should "generate correct output and grad" in {
val module = new Reshape[Double](Array(3, 2), Some(true))
for (batchSize <- 1 to 4) {
val input = Tensor[Double](batchSize, 1, 6)
input.rand()
val inputOrg = input.clone()
val output = module.forward(input)
val gradOutput = Tensor[Double](batchSize, 3, 2)
gradOutput.rand()
val gradOutputOrg = gradOutput.clone()
val gradInput = module.backward(input, gradOutput)
assert(output.nDimension() == 3)
assert(output.size(1) == batchSize)
assert(output.size(2) == 3)
assert(output.size(3) == 2)
assert(gradInput.isSameSizeAs(input))
for (i <- 1 to batchSize) {
for (j <- 0 to 5) {
assert(input(Array(i, 1, j + 1)) == output(Array(i, j / 2 + 1, j % 2 + 1)))
assert(gradInput(Array(i, 1, j + 1)) == gradOutput(Array(i, j / 2 + 1, j % 2 + 1)))
}
}
assert(input == inputOrg)
assert(gradOutput == gradOutputOrg)
}
intercept[LayerException] {
module.forward(Tensor[Double](3, 2))
}
}
}
class ReshapeSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val reshape = Reshape[Float](Array(1, 4, 5)).setName("reshape")
val input = Tensor[Float](2, 2, 5).apply1( _ => Random.nextFloat())
runSerializationTest(reshape, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ReshapeSpec.scala | Scala | apache-2.0 | 5,284 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.vectorized
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Random
import org.apache.arrow.vector.IntVector
import org.apache.spark.SparkFunSuite
import org.apache.spark.memory.MemoryMode
import org.apache.spark.sql.{RandomDataGenerator, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.ArrowUtils
import org.apache.spark.sql.vectorized.{ArrowColumnVector, ColumnarBatch}
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.types.CalendarInterval
class ColumnarBatchSuite extends SparkFunSuite {
private def allocate(capacity: Int, dt: DataType, memMode: MemoryMode): WritableColumnVector = {
if (memMode == MemoryMode.OFF_HEAP) {
new OffHeapColumnVector(capacity, dt)
} else {
new OnHeapColumnVector(capacity, dt)
}
}
private def testVector(
name: String,
size: Int,
dt: DataType)(
block: WritableColumnVector => Unit): Unit = {
test(name) {
Seq(MemoryMode.ON_HEAP, MemoryMode.OFF_HEAP).foreach { mode =>
val vector = allocate(size, dt, mode)
try block(vector) finally {
vector.close()
}
}
}
}
testVector("Null APIs", 1024, IntegerType) {
column =>
val reference = mutable.ArrayBuffer.empty[Boolean]
var idx = 0
assert(!column.hasNull)
assert(column.numNulls() == 0)
column.appendNotNull()
reference += false
assert(!column.hasNull)
assert(column.numNulls() == 0)
column.appendNotNulls(3)
(1 to 3).foreach(_ => reference += false)
assert(!column.hasNull)
assert(column.numNulls() == 0)
column.appendNull()
reference += true
assert(column.hasNull)
assert(column.numNulls() == 1)
column.appendNulls(3)
(1 to 3).foreach(_ => reference += true)
assert(column.hasNull)
assert(column.numNulls() == 4)
idx = column.elementsAppended
column.putNotNull(idx)
reference += false
idx += 1
assert(column.hasNull)
assert(column.numNulls() == 4)
column.putNull(idx)
reference += true
idx += 1
assert(column.hasNull)
assert(column.numNulls() == 5)
column.putNulls(idx, 3)
reference += true
reference += true
reference += true
idx += 3
assert(column.hasNull)
assert(column.numNulls() == 8)
column.putNotNulls(idx, 4)
reference += false
reference += false
reference += false
reference += false
idx += 4
assert(column.hasNull)
assert(column.numNulls() == 8)
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.isNullAt(v._2))
}
}
testVector("Byte APIs", 1024, ByteType) {
column =>
val reference = mutable.ArrayBuffer.empty[Byte]
var values = (10 :: 20 :: 30 :: 40 :: 50 :: Nil).map(_.toByte).toArray
column.appendBytes(2, values, 0)
reference += 10.toByte
reference += 20.toByte
column.appendBytes(3, values, 2)
reference += 30.toByte
reference += 40.toByte
reference += 50.toByte
column.appendBytes(6, 60.toByte)
(1 to 6).foreach(_ => reference += 60.toByte)
column.appendByte(70.toByte)
reference += 70.toByte
var idx = column.elementsAppended
values = (1 :: 2 :: 3 :: 4 :: 5 :: Nil).map(_.toByte).toArray
column.putBytes(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putBytes(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
column.putByte(idx, 9)
reference += 9
idx += 1
column.putBytes(idx, 3, 4)
reference += 4
reference += 4
reference += 4
idx += 3
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getByte(v._2), "VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Short APIs", 1024, ShortType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Short]
var values = (10 :: 20 :: 30 :: 40 :: 50 :: Nil).map(_.toShort).toArray
column.appendShorts(2, values, 0)
reference += 10.toShort
reference += 20.toShort
column.appendShorts(3, values, 2)
reference += 30.toShort
reference += 40.toShort
reference += 50.toShort
column.appendShorts(6, 60.toShort)
(1 to 6).foreach(_ => reference += 60.toShort)
column.appendShort(70.toShort)
reference += 70.toShort
var idx = column.elementsAppended
values = (1 :: 2 :: 3 :: 4 :: 5 :: Nil).map(_.toShort).toArray
column.putShorts(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putShorts(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
column.putShort(idx, 9)
reference += 9
idx += 1
column.putShorts(idx, 3, 4)
reference += 4
reference += 4
reference += 4
idx += 3
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextInt().toShort
column.putShort(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
val v = (n + 1).toShort
column.putShorts(idx, n, v)
var i = 0
while (i < n) {
reference += v
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getShort(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Int APIs", 1024, IntegerType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Int]
var values = (10 :: 20 :: 30 :: 40 :: 50 :: Nil).toArray
column.appendInts(2, values, 0)
reference += 10
reference += 20
column.appendInts(3, values, 2)
reference += 30
reference += 40
reference += 50
column.appendInts(6, 60)
(1 to 6).foreach(_ => reference += 60)
column.appendInt(70)
reference += 70
var idx = column.elementsAppended
values = (1 :: 2 :: 3 :: 4 :: 5 :: Nil).toArray
column.putInts(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putInts(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
val littleEndian = new Array[Byte](8)
littleEndian(0) = 7
littleEndian(1) = 1
littleEndian(4) = 6
littleEndian(6) = 1
column.putIntsLittleEndian(idx, 1, littleEndian, 4)
column.putIntsLittleEndian(idx + 1, 1, littleEndian, 0)
reference += 6 + (1 << 16)
reference += 7 + (1 << 8)
idx += 2
column.putIntsLittleEndian(idx, 2, littleEndian, 0)
reference += 7 + (1 << 8)
reference += 6 + (1 << 16)
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextInt()
column.putInt(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
column.putInts(idx, n, n + 1)
var i = 0
while (i < n) {
reference += (n + 1)
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getInt(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Long APIs", 1024, LongType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Long]
var values = (10L :: 20L :: 30L :: 40L :: 50L :: Nil).toArray
column.appendLongs(2, values, 0)
reference += 10L
reference += 20L
column.appendLongs(3, values, 2)
reference += 30L
reference += 40L
reference += 50L
column.appendLongs(6, 60L)
(1 to 6).foreach(_ => reference += 60L)
column.appendLong(70L)
reference += 70L
var idx = column.elementsAppended
values = (1L :: 2L :: 3L :: 4L :: 5L :: Nil).toArray
column.putLongs(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putLongs(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
val littleEndian = new Array[Byte](16)
littleEndian(0) = 7
littleEndian(1) = 1
littleEndian(8) = 6
littleEndian(10) = 1
column.putLongsLittleEndian(idx, 1, littleEndian, 8)
column.putLongsLittleEndian(idx + 1, 1, littleEndian, 0)
reference += 6 + (1 << 16)
reference += 7 + (1 << 8)
idx += 2
column.putLongsLittleEndian(idx, 2, littleEndian, 0)
reference += 7 + (1 << 8)
reference += 6 + (1 << 16)
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextLong()
column.putLong(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
column.putLongs(idx, n, n + 1)
var i = 0
while (i < n) {
reference += (n + 1)
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getLong(v._2), "idx=" + v._2 +
" Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Float APIs", 1024, FloatType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Float]
var values = (.1f :: .2f :: .3f :: .4f :: .5f :: Nil).toArray
column.appendFloats(2, values, 0)
reference += .1f
reference += .2f
column.appendFloats(3, values, 2)
reference += .3f
reference += .4f
reference += .5f
column.appendFloats(6, .6f)
(1 to 6).foreach(_ => reference += .6f)
column.appendFloat(.7f)
reference += .7f
var idx = column.elementsAppended
values = (1.0f :: 2.0f :: 3.0f :: 4.0f :: 5.0f :: Nil).toArray
column.putFloats(idx, 2, values, 0)
reference += 1.0f
reference += 2.0f
idx += 2
column.putFloats(idx, 3, values, 2)
reference += 3.0f
reference += 4.0f
reference += 5.0f
idx += 3
val buffer = new Array[Byte](8)
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET, 2.234f)
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET + 4, 1.123f)
if (ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
// Ensure array contains Little Endian floats
val bb = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET, bb.getFloat(0))
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET + 4, bb.getFloat(4))
}
column.putFloats(idx, 1, buffer, 4)
column.putFloats(idx + 1, 1, buffer, 0)
reference += 1.123f
reference += 2.234f
idx += 2
column.putFloats(idx, 2, buffer, 0)
reference += 2.234f
reference += 1.123f
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextFloat()
column.putFloat(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
val v = random.nextFloat()
column.putFloats(idx, n, v)
var i = 0
while (i < n) {
reference += v
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getFloat(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Double APIs", 1024, DoubleType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Double]
var values = (.1 :: .2 :: .3 :: .4 :: .5 :: Nil).toArray
column.appendDoubles(2, values, 0)
reference += .1
reference += .2
column.appendDoubles(3, values, 2)
reference += .3
reference += .4
reference += .5
column.appendDoubles(6, .6)
(1 to 6).foreach(_ => reference += .6)
column.appendDouble(.7)
reference += .7
var idx = column.elementsAppended
values = (1.0 :: 2.0 :: 3.0 :: 4.0 :: 5.0 :: Nil).toArray
column.putDoubles(idx, 2, values, 0)
reference += 1.0
reference += 2.0
idx += 2
column.putDoubles(idx, 3, values, 2)
reference += 3.0
reference += 4.0
reference += 5.0
idx += 3
val buffer = new Array[Byte](16)
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET, 2.234)
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET + 8, 1.123)
if (ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
// Ensure array contains Little Endian doubles
val bb = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET, bb.getDouble(0))
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET + 8, bb.getDouble(8))
}
column.putDoubles(idx, 1, buffer, 8)
column.putDoubles(idx + 1, 1, buffer, 0)
reference += 1.123
reference += 2.234
idx += 2
column.putDoubles(idx, 2, buffer, 0)
reference += 2.234
reference += 1.123
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextDouble()
column.putDouble(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
val v = random.nextDouble()
column.putDoubles(idx, n, v)
var i = 0
while (i < n) {
reference += v
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getDouble(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("String APIs", 7, StringType) {
column =>
val reference = mutable.ArrayBuffer.empty[String]
assert(column.arrayData().elementsAppended == 0)
val str = "string"
column.appendByteArray(str.getBytes(StandardCharsets.UTF_8),
0, str.getBytes(StandardCharsets.UTF_8).length)
reference += str
assert(column.arrayData().elementsAppended == 6)
var idx = column.elementsAppended
val values = ("Hello" :: "abc" :: Nil).toArray
column.putByteArray(idx, values(0).getBytes(StandardCharsets.UTF_8),
0, values(0).getBytes(StandardCharsets.UTF_8).length)
reference += values(0)
idx += 1
assert(column.arrayData().elementsAppended == 11)
column.putByteArray(idx, values(1).getBytes(StandardCharsets.UTF_8),
0, values(1).getBytes(StandardCharsets.UTF_8).length)
reference += values(1)
idx += 1
assert(column.arrayData().elementsAppended == 14)
// Just put llo
val offset = column.putByteArray(idx, values(0).getBytes(StandardCharsets.UTF_8),
2, values(0).getBytes(StandardCharsets.UTF_8).length - 2)
reference += "llo"
idx += 1
assert(column.arrayData().elementsAppended == 17)
// Put the same "ll" at offset. This should not allocate more memory in the column.
column.putArray(idx, offset, 2)
reference += "ll"
idx += 1
assert(column.arrayData().elementsAppended == 17)
// Put a long string
val s = "abcdefghijklmnopqrstuvwxyz"
column.putByteArray(idx, (s + s).getBytes(StandardCharsets.UTF_8))
reference += (s + s)
idx += 1
assert(column.arrayData().elementsAppended == 17 + (s + s).length)
column.putNull(idx)
assert(column.getUTF8String(idx) == null)
idx += 1
reference.zipWithIndex.foreach { v =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
assert(v._1.length == column.getArrayLength(v._2), errMsg)
assert(v._1 == column.getUTF8String(v._2).toString, errMsg)
}
column.reset()
assert(column.arrayData().elementsAppended == 0)
}
testVector("CalendarInterval APIs", 4, CalendarIntervalType) {
column =>
val reference = mutable.ArrayBuffer.empty[CalendarInterval]
val months = column.getChild(0)
val microseconds = column.getChild(1)
assert(months.dataType() == IntegerType)
assert(microseconds.dataType() == LongType)
months.putInt(0, 1)
microseconds.putLong(0, 100)
reference += new CalendarInterval(1, 100)
months.putInt(1, 0)
microseconds.putLong(1, 2000)
reference += new CalendarInterval(0, 2000)
column.putNull(2)
assert(column.getInterval(2) == null)
reference += null
months.putInt(3, 20)
microseconds.putLong(3, 0)
reference += new CalendarInterval(20, 0)
reference.zipWithIndex.foreach { case (v, i) =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
assert(v == column.getInterval(i), errMsg)
if (v == null) assert(column.isNullAt(i), errMsg)
}
column.close()
}
testVector("Int Array", 10, new ArrayType(IntegerType, true)) {
column =>
// Fill the underlying data with all the arrays back to back.
val data = column.arrayData()
var i = 0
while (i < 6) {
data.putInt(i, i)
i += 1
}
// Populate it with arrays [0], [1, 2], null, [], [3, 4, 5]
column.putArray(0, 0, 1)
column.putArray(1, 1, 2)
column.putNull(2)
column.putArray(3, 3, 0)
column.putArray(4, 3, 3)
assert(column.getArray(0).numElements == 1)
assert(column.getArray(1).numElements == 2)
assert(column.isNullAt(2))
assert(column.getArray(2) == null)
assert(column.getArray(3).numElements == 0)
assert(column.getArray(4).numElements == 3)
val a1 = ColumnVectorUtils.toJavaIntArray(column.getArray(0))
val a2 = ColumnVectorUtils.toJavaIntArray(column.getArray(1))
val a3 = ColumnVectorUtils.toJavaIntArray(column.getArray(3))
val a4 = ColumnVectorUtils.toJavaIntArray(column.getArray(4))
assert(a1 === Array(0))
assert(a2 === Array(1, 2))
assert(a3 === Array.empty[Int])
assert(a4 === Array(3, 4, 5))
// Verify the ArrayData get APIs
assert(column.getArray(0).getInt(0) == 0)
assert(column.getArray(1).getInt(0) == 1)
assert(column.getArray(1).getInt(1) == 2)
assert(column.getArray(4).getInt(0) == 3)
assert(column.getArray(4).getInt(1) == 4)
assert(column.getArray(4).getInt(2) == 5)
// Add a longer array which requires resizing
column.reset()
val array = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
assert(data.capacity == 10)
data.reserve(array.length)
assert(data.capacity == array.length * 2)
data.putInts(0, array.length, array, 0)
column.putArray(0, 0, array.length)
assert(ColumnVectorUtils.toJavaIntArray(column.getArray(0)) === array)
}
test("toArray for primitive types") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode =>
val len = 4
val columnBool = allocate(len, new ArrayType(BooleanType, false), memMode)
val boolArray = Array(false, true, false, true)
boolArray.zipWithIndex.foreach { case (v, i) => columnBool.arrayData.putBoolean(i, v) }
columnBool.putArray(0, 0, len)
assert(columnBool.getArray(0).toBooleanArray === boolArray)
columnBool.close()
val columnByte = allocate(len, new ArrayType(ByteType, false), memMode)
val byteArray = Array[Byte](0, 1, 2, 3)
byteArray.zipWithIndex.foreach { case (v, i) => columnByte.arrayData.putByte(i, v) }
columnByte.putArray(0, 0, len)
assert(columnByte.getArray(0).toByteArray === byteArray)
columnByte.close()
val columnShort = allocate(len, new ArrayType(ShortType, false), memMode)
val shortArray = Array[Short](0, 1, 2, 3)
shortArray.zipWithIndex.foreach { case (v, i) => columnShort.arrayData.putShort(i, v) }
columnShort.putArray(0, 0, len)
assert(columnShort.getArray(0).toShortArray === shortArray)
columnShort.close()
val columnInt = allocate(len, new ArrayType(IntegerType, false), memMode)
val intArray = Array(0, 1, 2, 3)
intArray.zipWithIndex.foreach { case (v, i) => columnInt.arrayData.putInt(i, v) }
columnInt.putArray(0, 0, len)
assert(columnInt.getArray(0).toIntArray === intArray)
columnInt.close()
val columnLong = allocate(len, new ArrayType(LongType, false), memMode)
val longArray = Array[Long](0, 1, 2, 3)
longArray.zipWithIndex.foreach { case (v, i) => columnLong.arrayData.putLong(i, v) }
columnLong.putArray(0, 0, len)
assert(columnLong.getArray(0).toLongArray === longArray)
columnLong.close()
val columnFloat = allocate(len, new ArrayType(FloatType, false), memMode)
val floatArray = Array(0.0F, 1.1F, 2.2F, 3.3F)
floatArray.zipWithIndex.foreach { case (v, i) => columnFloat.arrayData.putFloat(i, v) }
columnFloat.putArray(0, 0, len)
assert(columnFloat.getArray(0).toFloatArray === floatArray)
columnFloat.close()
val columnDouble = allocate(len, new ArrayType(DoubleType, false), memMode)
val doubleArray = Array(0.0, 1.1, 2.2, 3.3)
doubleArray.zipWithIndex.foreach { case (v, i) => columnDouble.arrayData.putDouble(i, v) }
columnDouble.putArray(0, 0, len)
assert(columnDouble.getArray(0).toDoubleArray === doubleArray)
columnDouble.close()
}
}
test("Int Map") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode =>
val column = allocate(10, new MapType(IntegerType, IntegerType, false), memMode)
(0 to 1).foreach { colIndex =>
val data = column.getChild(colIndex)
(0 to 5).foreach {i =>
data.putInt(i, i * (colIndex + 1))
}
}
// Populate it with maps [0->0], [1->2, 2->4], null, [], [3->6, 4->8, 5->10]
column.putArray(0, 0, 1)
column.putArray(1, 1, 2)
column.putNull(2)
assert(column.getMap(2) == null)
column.putArray(3, 3, 0)
column.putArray(4, 3, 3)
assert(column.getMap(0).numElements == 1)
assert(column.getMap(1).numElements == 2)
assert(column.isNullAt(2))
assert(column.getMap(3).numElements == 0)
assert(column.getMap(4).numElements == 3)
val a1 = ColumnVectorUtils.toJavaIntMap(column.getMap(0))
val a2 = ColumnVectorUtils.toJavaIntMap(column.getMap(1))
val a4 = ColumnVectorUtils.toJavaIntMap(column.getMap(3))
val a5 = ColumnVectorUtils.toJavaIntMap(column.getMap(4))
assert(a1.asScala == Map(0 -> 0))
assert(a2.asScala == Map(1 -> 2, 2 -> 4))
assert(a4.asScala == Map())
assert(a5.asScala == Map(3 -> 6, 4 -> 8, 5 -> 10))
column.close()
}
}
testVector(
"Struct Column",
10,
new StructType().add("int", IntegerType).add("double", DoubleType)) { column =>
val c1 = column.getChild(0)
val c2 = column.getChild(1)
assert(c1.dataType() == IntegerType)
assert(c2.dataType() == DoubleType)
c1.putInt(0, 123)
c2.putDouble(0, 3.45)
column.putNull(1)
assert(column.getStruct(1) == null)
c1.putInt(2, 456)
c2.putDouble(2, 5.67)
val s = column.getStruct(0)
assert(s.getInt(0) == 123)
assert(s.getDouble(1) == 3.45)
assert(column.isNullAt(1))
assert(column.getStruct(1) == null)
val s2 = column.getStruct(2)
assert(s2.getInt(0) == 456)
assert(s2.getDouble(1) == 5.67)
}
testVector("Nest Array in Array", 10, new ArrayType(new ArrayType(IntegerType, true), true)) {
column =>
val childColumn = column.arrayData()
val data = column.arrayData().arrayData()
(0 until 6).foreach {
case 3 => data.putNull(3)
case i => data.putInt(i, i)
}
// Arrays in child column: [0], [1, 2], [], [null, 4, 5]
childColumn.putArray(0, 0, 1)
childColumn.putArray(1, 1, 2)
childColumn.putArray(2, 2, 0)
childColumn.putArray(3, 3, 3)
// Arrays in column: [[0]], [[1, 2], []], [[], [null, 4, 5]], null
column.putArray(0, 0, 1)
column.putArray(1, 1, 2)
column.putArray(2, 2, 2)
column.putNull(3)
assert(column.getArray(0).getArray(0).toIntArray() === Array(0))
assert(column.getArray(1).getArray(0).toIntArray() === Array(1, 2))
assert(column.getArray(1).getArray(1).toIntArray() === Array())
assert(column.getArray(2).getArray(0).toIntArray() === Array())
assert(column.getArray(2).getArray(1).isNullAt(0))
assert(column.getArray(2).getArray(1).getInt(1) === 4)
assert(column.getArray(2).getArray(1).getInt(2) === 5)
assert(column.isNullAt(3))
}
private val structType: StructType = new StructType().add("i", IntegerType).add("l", LongType)
testVector(
"Nest Struct in Array",
10,
new ArrayType(structType, true)) { column =>
val data = column.arrayData()
val c0 = data.getChild(0)
val c1 = data.getChild(1)
// Structs in child column: (0, 0), (1, 10), (2, 20), (3, 30), (4, 40), (5, 50)
(0 until 6).foreach { i =>
c0.putInt(i, i)
c1.putLong(i, i * 10)
}
// Arrays in column: [(0, 0), (1, 10)], [(1, 10), (2, 20), (3, 30)],
// [(4, 40), (5, 50)]
column.putArray(0, 0, 2)
column.putArray(1, 1, 3)
column.putArray(2, 4, 2)
assert(column.getArray(0).getStruct(0, 2).toSeq(structType) === Seq(0, 0))
assert(column.getArray(0).getStruct(1, 2).toSeq(structType) === Seq(1, 10))
assert(column.getArray(1).getStruct(0, 2).toSeq(structType) === Seq(1, 10))
assert(column.getArray(1).getStruct(1, 2).toSeq(structType) === Seq(2, 20))
assert(column.getArray(1).getStruct(2, 2).toSeq(structType) === Seq(3, 30))
assert(column.getArray(2).getStruct(0, 2).toSeq(structType) === Seq(4, 40))
assert(column.getArray(2).getStruct(1, 2).toSeq(structType) === Seq(5, 50))
}
testVector(
"Nest Array in Struct",
10,
new StructType()
.add("int", IntegerType)
.add("array", new ArrayType(IntegerType, true))) { column =>
val c0 = column.getChild(0)
val c1 = column.getChild(1)
c0.putInt(0, 0)
c0.putInt(1, 1)
c0.putInt(2, 2)
val c1Child = c1.arrayData()
(0 until 6).foreach { i =>
c1Child.putInt(i, i)
}
// Arrays in c1: [0, 1], [2], [3, 4, 5]
c1.putArray(0, 0, 2)
c1.putArray(1, 2, 1)
c1.putArray(2, 3, 3)
assert(column.getStruct(0).getInt(0) === 0)
assert(column.getStruct(0).getArray(1).toIntArray() === Array(0, 1))
assert(column.getStruct(1).getInt(0) === 1)
assert(column.getStruct(1).getArray(1).toIntArray() === Array(2))
assert(column.getStruct(2).getInt(0) === 2)
assert(column.getStruct(2).getArray(1).toIntArray() === Array(3, 4, 5))
}
private val subSchema: StructType = new StructType()
.add("int", IntegerType)
.add("int", IntegerType)
testVector(
"Nest Struct in Struct",
10,
new StructType().add("int", IntegerType).add("struct", subSchema)) { column =>
val c0 = column.getChild(0)
val c1 = column.getChild(1)
c0.putInt(0, 0)
c0.putInt(1, 1)
c0.putInt(2, 2)
val c1c0 = c1.getChild(0)
val c1c1 = c1.getChild(1)
// Structs in c1: (7, 70), (8, 80), (9, 90)
c1c0.putInt(0, 7)
c1c0.putInt(1, 8)
c1c0.putInt(2, 9)
c1c1.putInt(0, 70)
c1c1.putInt(1, 80)
c1c1.putInt(2, 90)
assert(column.getStruct(0).getInt(0) === 0)
assert(column.getStruct(0).getStruct(1, 2).toSeq(subSchema) === Seq(7, 70))
assert(column.getStruct(1).getInt(0) === 1)
assert(column.getStruct(1).getStruct(1, 2).toSeq(subSchema) === Seq(8, 80))
assert(column.getStruct(2).getInt(0) === 2)
assert(column.getStruct(2).getStruct(1, 2).toSeq(subSchema) === Seq(9, 90))
}
test("ColumnarBatch basic") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode => {
val schema = new StructType()
.add("intCol", IntegerType)
.add("doubleCol", DoubleType)
.add("intCol2", IntegerType)
.add("string", BinaryType)
val capacity = 4 * 1024
val columns = schema.fields.map { field =>
allocate(capacity, field.dataType, memMode)
}
val batch = new ColumnarBatch(columns.toArray)
assert(batch.numCols() == 4)
assert(batch.numRows() == 0)
assert(batch.rowIterator().hasNext == false)
// Add a row [1, 1.1, NULL]
columns(0).putInt(0, 1)
columns(1).putDouble(0, 1.1)
columns(2).putNull(0)
columns(3).putByteArray(0, "Hello".getBytes(StandardCharsets.UTF_8))
batch.setNumRows(1)
// Verify the results of the row.
assert(batch.numCols() == 4)
assert(batch.numRows() == 1)
assert(batch.rowIterator().hasNext)
assert(batch.rowIterator().hasNext)
assert(columns(0).getInt(0) == 1)
assert(columns(0).isNullAt(0) == false)
assert(columns(1).getDouble(0) == 1.1)
assert(columns(1).isNullAt(0) == false)
assert(columns(2).isNullAt(0))
assert(columns(3).getUTF8String(0).toString == "Hello")
// Verify the iterator works correctly.
val it = batch.rowIterator()
assert(it.hasNext())
val row = it.next()
assert(row.getInt(0) == 1)
assert(row.isNullAt(0) == false)
assert(row.getDouble(1) == 1.1)
assert(row.isNullAt(1) == false)
assert(row.isNullAt(2))
assert(columns(3).getUTF8String(0).toString == "Hello")
assert(it.hasNext == false)
assert(it.hasNext == false)
// Reset and add 3 rows
columns.foreach(_.reset())
// Add rows [NULL, 2.2, 2, "abc"], [3, NULL, 3, ""], [4, 4.4, 4, "world]
columns(0).putNull(0)
columns(1).putDouble(0, 2.2)
columns(2).putInt(0, 2)
columns(3).putByteArray(0, "abc".getBytes(StandardCharsets.UTF_8))
columns(0).putInt(1, 3)
columns(1).putNull(1)
columns(2).putInt(1, 3)
columns(3).putByteArray(1, "".getBytes(StandardCharsets.UTF_8))
columns(0).putInt(2, 4)
columns(1).putDouble(2, 4.4)
columns(2).putInt(2, 4)
columns(3).putByteArray(2, "world".getBytes(StandardCharsets.UTF_8))
batch.setNumRows(3)
def rowEquals(x: InternalRow, y: Row): Unit = {
assert(x.isNullAt(0) == y.isNullAt(0))
if (!x.isNullAt(0)) assert(x.getInt(0) == y.getInt(0))
assert(x.isNullAt(1) == y.isNullAt(1))
if (!x.isNullAt(1)) assert(x.getDouble(1) == y.getDouble(1))
assert(x.isNullAt(2) == y.isNullAt(2))
if (!x.isNullAt(2)) assert(x.getInt(2) == y.getInt(2))
assert(x.isNullAt(3) == y.isNullAt(3))
if (!x.isNullAt(3)) assert(x.getString(3) == y.getString(3))
}
// Verify
assert(batch.numRows() == 3)
val it2 = batch.rowIterator()
rowEquals(it2.next(), Row(null, 2.2, 2, "abc"))
rowEquals(it2.next(), Row(3, null, 3, ""))
rowEquals(it2.next(), Row(4, 4.4, 4, "world"))
assert(!it.hasNext)
batch.close()
}}
}
private def doubleEquals(d1: Double, d2: Double): Boolean = {
if (d1.isNaN && d2.isNaN) {
true
} else {
d1 == d2
}
}
private def compareStruct(fields: Seq[StructField], r1: InternalRow, r2: Row, seed: Long) {
fields.zipWithIndex.foreach { case (field: StructField, ordinal: Int) =>
assert(r1.isNullAt(ordinal) == r2.isNullAt(ordinal), "Seed = " + seed)
if (!r1.isNullAt(ordinal)) {
field.dataType match {
case BooleanType => assert(r1.getBoolean(ordinal) == r2.getBoolean(ordinal),
"Seed = " + seed)
case ByteType => assert(r1.getByte(ordinal) == r2.getByte(ordinal), "Seed = " + seed)
case ShortType => assert(r1.getShort(ordinal) == r2.getShort(ordinal), "Seed = " + seed)
case IntegerType => assert(r1.getInt(ordinal) == r2.getInt(ordinal), "Seed = " + seed)
case LongType => assert(r1.getLong(ordinal) == r2.getLong(ordinal), "Seed = " + seed)
case FloatType => assert(doubleEquals(r1.getFloat(ordinal), r2.getFloat(ordinal)),
"Seed = " + seed)
case DoubleType => assert(doubleEquals(r1.getDouble(ordinal), r2.getDouble(ordinal)),
"Seed = " + seed)
case t: DecimalType =>
val d1 = r1.getDecimal(ordinal, t.precision, t.scale).toBigDecimal
val d2 = r2.getDecimal(ordinal)
assert(d1.compare(d2) == 0, "Seed = " + seed)
case StringType =>
assert(r1.getString(ordinal) == r2.getString(ordinal), "Seed = " + seed)
case CalendarIntervalType =>
assert(r1.getInterval(ordinal) === r2.get(ordinal).asInstanceOf[CalendarInterval])
case ArrayType(childType, n) =>
val a1 = r1.getArray(ordinal).array
val a2 = r2.getList(ordinal).toArray
assert(a1.length == a2.length, "Seed = " + seed)
childType match {
case DoubleType =>
var i = 0
while (i < a1.length) {
assert(doubleEquals(a1(i).asInstanceOf[Double], a2(i).asInstanceOf[Double]),
"Seed = " + seed)
i += 1
}
case FloatType =>
var i = 0
while (i < a1.length) {
assert(doubleEquals(a1(i).asInstanceOf[Float], a2(i).asInstanceOf[Float]),
"Seed = " + seed)
i += 1
}
case t: DecimalType =>
var i = 0
while (i < a1.length) {
assert((a1(i) == null) == (a2(i) == null), "Seed = " + seed)
if (a1(i) != null) {
val d1 = a1(i).asInstanceOf[Decimal].toBigDecimal
val d2 = a2(i).asInstanceOf[java.math.BigDecimal]
assert(d1.compare(d2) == 0, "Seed = " + seed)
}
i += 1
}
case _ => assert(a1 === a2, "Seed = " + seed)
}
case StructType(childFields) =>
compareStruct(childFields, r1.getStruct(ordinal, fields.length),
r2.getStruct(ordinal), seed)
case _ =>
throw new UnsupportedOperationException("Not implemented " + field.dataType)
}
}
}
}
test("Convert rows") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode => {
val rows = Row(1, 2L, "a", 1.2, 'b'.toByte) :: Row(4, 5L, "cd", 2.3, 'a'.toByte) :: Nil
val schema = new StructType()
.add("i1", IntegerType)
.add("l2", LongType)
.add("string", StringType)
.add("d", DoubleType)
.add("b", ByteType)
val batch = ColumnVectorUtils.toBatch(schema, memMode, rows.iterator.asJava)
assert(batch.numRows() == 2)
assert(batch.numCols() == 5)
val it = batch.rowIterator()
val referenceIt = rows.iterator
while (it.hasNext) {
compareStruct(schema, it.next(), referenceIt.next(), 0)
}
batch.close()
}
}}
/**
* This test generates a random schema data, serializes it to column batches and verifies the
* results.
*/
def testRandomRows(flatSchema: Boolean, numFields: Int) {
// TODO: Figure out why StringType doesn't work on jenkins.
val types = Array(
BooleanType, ByteType, FloatType, DoubleType, IntegerType, LongType, ShortType,
DecimalType.ShortDecimal, DecimalType.IntDecimal, DecimalType.ByteDecimal,
DecimalType.FloatDecimal, DecimalType.LongDecimal, new DecimalType(5, 2),
new DecimalType(12, 2), new DecimalType(30, 10), CalendarIntervalType)
val seed = System.nanoTime()
val NUM_ROWS = 200
val NUM_ITERS = 1000
val random = new Random(seed)
var i = 0
while (i < NUM_ITERS) {
val schema = if (flatSchema) {
RandomDataGenerator.randomSchema(random, numFields, types)
} else {
RandomDataGenerator.randomNestedSchema(random, numFields, types)
}
val rows = mutable.ArrayBuffer.empty[Row]
var j = 0
while (j < NUM_ROWS) {
val row = RandomDataGenerator.randomRow(random, schema)
rows += row
j += 1
}
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode => {
val batch = ColumnVectorUtils.toBatch(schema, memMode, rows.iterator.asJava)
assert(batch.numRows() == NUM_ROWS)
val it = batch.rowIterator()
val referenceIt = rows.iterator
var k = 0
while (it.hasNext) {
compareStruct(schema, it.next(), referenceIt.next(), seed)
k += 1
}
batch.close()
}}
i += 1
}
}
test("Random flat schema") {
testRandomRows(true, 15)
}
test("Random nested schema") {
testRandomRows(false, 30)
}
test("exceeding maximum capacity should throw an error") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode =>
val column = allocate(1, ByteType, memMode)
column.MAX_CAPACITY = 15
column.appendBytes(5, 0.toByte)
// Successfully allocate twice the requested capacity
assert(column.capacity == 10)
column.appendBytes(10, 0.toByte)
// Allocated capacity doesn't exceed MAX_CAPACITY
assert(column.capacity == 15)
val ex = intercept[RuntimeException] {
// Over-allocating beyond MAX_CAPACITY throws an exception
column.appendBytes(10, 0.toByte)
}
assert(ex.getMessage.contains(s"Cannot reserve additional contiguous bytes in the " +
s"vectorized reader"))
}
}
test("create columnar batch from Arrow column vectors") {
val allocator = ArrowUtils.rootAllocator.newChildAllocator("int", 0, Long.MaxValue)
val vector1 = ArrowUtils.toArrowField("int1", IntegerType, nullable = true, null)
.createVector(allocator).asInstanceOf[IntVector]
vector1.allocateNew()
val vector2 = ArrowUtils.toArrowField("int2", IntegerType, nullable = true, null)
.createVector(allocator).asInstanceOf[IntVector]
vector2.allocateNew()
(0 until 10).foreach { i =>
vector1.setSafe(i, i)
vector2.setSafe(i + 1, i)
}
vector1.setNull(10)
vector1.setValueCount(11)
vector2.setNull(0)
vector2.setValueCount(11)
val columnVectors = Seq(new ArrowColumnVector(vector1), new ArrowColumnVector(vector2))
val schema = StructType(Seq(StructField("int1", IntegerType), StructField("int2", IntegerType)))
val batch = new ColumnarBatch(columnVectors.toArray)
batch.setNumRows(11)
assert(batch.numCols() == 2)
assert(batch.numRows() == 11)
val rowIter = batch.rowIterator().asScala
rowIter.zipWithIndex.foreach { case (row, i) =>
if (i == 10) {
assert(row.isNullAt(0))
} else {
assert(row.getInt(0) == i)
}
if (i == 0) {
assert(row.isNullAt(1))
} else {
assert(row.getInt(1) == i - 1)
}
}
batch.close()
allocator.close()
}
testVector("Decimal API", 4, DecimalType.IntDecimal) {
column =>
val reference = mutable.ArrayBuffer.empty[Decimal]
var idx = 0
column.putDecimal(idx, new Decimal().set(10), 10)
reference += new Decimal().set(10)
idx += 1
column.putDecimal(idx, new Decimal().set(20), 10)
reference += new Decimal().set(20)
idx += 1
column.putNull(idx)
assert(column.getDecimal(idx, 10, 0) == null)
reference += null
idx += 1
column.putDecimal(idx, new Decimal().set(30), 10)
reference += new Decimal().set(30)
reference.zipWithIndex.foreach { case (v, i) =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
assert(v == column.getDecimal(i, 10, 0), errMsg)
if (v == null) assert(column.isNullAt(i), errMsg)
}
column.close()
}
testVector("Binary APIs", 4, BinaryType) {
column =>
val reference = mutable.ArrayBuffer.empty[String]
var idx = 0
column.putByteArray(idx, "Hello".getBytes(StandardCharsets.UTF_8))
reference += "Hello"
idx += 1
column.putByteArray(idx, "World".getBytes(StandardCharsets.UTF_8))
reference += "World"
idx += 1
column.putNull(idx)
reference += null
idx += 1
column.putByteArray(idx, "abc".getBytes(StandardCharsets.UTF_8))
reference += "abc"
reference.zipWithIndex.foreach { case (v, i) =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
if (v != null) {
assert(v == new String(column.getBinary(i)), errMsg)
} else {
assert(column.isNullAt(i), errMsg)
assert(column.getBinary(i) == null, errMsg)
}
}
column.close()
}
testVector("WritableColumnVector.reserve(): requested capacity is negative", 1024, ByteType) {
column =>
val ex = intercept[RuntimeException] { column.reserve(-1) }
assert(ex.getMessage.contains(
"Cannot reserve additional contiguous bytes in the vectorized reader (integer overflow)"))
}
}
| aosagie/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala | Scala | apache-2.0 | 43,456 |
package com.twitter.finagle.buoyant.h2
package netty4
import com.twitter.logging.Logger
import io.netty.buffer.ByteBuf
import io.netty.channel._
import io.netty.handler.codec.http.{HttpServerCodec, HttpServerUpgradeHandler}
import io.netty.handler.codec.http2._
import io.netty.util.AsciiString
object ServerUpgradeHandler {
private val log = Logger.get("h2")
private val PrefaceBuf = Http2CodecUtil.connectionPrefaceBuf
private val PrefaceLen = PrefaceBuf.readableBytes
private def isPreface(bb: ByteBuf) =
(bb.readableBytes >= PrefaceLen &&
bb.slice(0, PrefaceLen).equals(PrefaceBuf))
private def isH2C(proto: CharSequence): Boolean =
AsciiString.contentEquals(Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME, proto)
}
/**
* Accept either H2C requests (beginning with a Connection preface)
* or HTTP requests with h2c protocol upgrading.
*/
class ServerUpgradeHandler(h2Framer: H2FrameCodec) extends ChannelDuplexHandler {
import ServerUpgradeHandler._
// Parses HTTP/1 objects.
private[this] val h1Codec = new HttpServerCodec
// Intercepts HTTP/1 requests with the HTTP2-Settings headers and
// initiate protocol upgrade.
private[this] val upgrader =
new HttpServerUpgradeHandler(h1Codec, new HttpServerUpgradeHandler.UpgradeCodecFactory {
override def newUpgradeCodec(proto: CharSequence): HttpServerUpgradeHandler.UpgradeCodec =
if (isH2C(proto)) new Http2FrameCodecServerUpgrader(h2Framer)
else null
})
/**
* Detect the HTTP2 connection preface to support Prior Knowledge
* HTTP2 (i.e. gRPC). If that doesn't exist try to upgrade from HTTP/1.
*/
override def channelRead(ctx: ChannelHandlerContext, obj: Any): Unit = {
obj match {
case bb: ByteBuf if isPreface(bb) =>
// If the connection starts with the magical prior-knowledge
// preface, just assume we're speaking plain h2c.
ctx.pipeline.addAfter(ctx.name, "h2 framer", h2Framer)
case bb: ByteBuf =>
// Otherwise, Upgrade from h1 to h2
ctx.pipeline.addAfter(ctx.name, "h1 codec", h1Codec)
ctx.pipeline.addAfter("h1 codec", "h1 upgrade h2", upgrader)
// TODO silently translate native h1 to h2
case _ => // Fall through and pass on the read.
}
// Stop trying to upgrade the protocol.
ctx.pipeline.remove(this)
log.debug("h2 server pipeline: installing framer: %s", ctx.pipeline)
// Pass it on.
ctx.fireChannelRead(obj); ()
}
}
| linkerd/linkerd | finagle/h2/src/main/scala/com/twitter/finagle/buoyant/h2/netty4/ServerUpgradeHandler.scala | Scala | apache-2.0 | 2,476 |
package edu.utsa.tl13
import edu.utsa.tl13._
import UnitTest._
import ParseTests._
import ScanTests._
/** All compiler unit tests */
object CompilerTests extends App {
val tests =
TestGroup("Compiler",
ParseTests.tests,
ScanTests.tests)
simpleReport(tests.run)
}
| jwtouron/tl13 | src/edu/utsa/tl13/CompilerTests.scala | Scala | mit | 304 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
/**
* Any measure converter.
*
* @author Araik Grigoryan
*/
trait AnyMeasureConverter extends SameTypeConverter[AnyMeasure]
{
protected override def convert(from: AnyMeasure, to: AnyMeasure): Option[Double] =
{
if (from == to)
{
Some(1.0)
}
else
{
(from.ultimateBase, to.ultimateBase) match
{
case (Some(f), Some(t)) if from.system == to.system && f._1 == t._1 => Some(f._2 / t._2)
case _ => super.convert(from, to)
}
}
}
}
| quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/AnyMeasureConverter.scala | Scala | apache-2.0 | 1,183 |
package physical.flow
class FlowPolygonContainer(polygon: FlowPolygon, index: Int) {
}
| shawes/zissou | src/main/scala/physical/flow/FlowPolygonContainer.scala | Scala | mit | 89 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Uniformity
import org.scalactic.Entry
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
import exceptions.TestFailedException
class ListShouldContainAllOfLogicalOrSpec extends Spec {
val invertedStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
val invertedListOfStringEquality =
new Equality[List[String]] {
def areEqual(a: List[String], b: Any): Boolean = a != b
}
private def upperCase(value: Any): Any =
value match {
case l: List[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = upperCase(a) == upperCase(b)
}
//ADDITIONAL//
val fileName: String = "ListShouldContainAllOfLogicalOrSpec.scala"
object `a List` {
val fumList: List[String] = List("fex", "fum", "foe", "fie", "fee")
val toList: List[String] = List("too", "you", "to", "birthday", "happy")
object `when used with (contain allOf (..) or contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain allOf ("fee", "fie", "foe", "fum") or contain allOf ("fie", "fee", "fum", "foe"))
fumList should (contain allOf ("fee", "fie", "foe", "fam") or contain allOf ("fie", "fee", "fum", "foe"))
fumList should (contain allOf ("fee", "fie", "foe", "fum") or contain allOf ("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (contain allOf ("fee", "fie", "foe", "fam") or contain allOf ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fam\"") + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain allOf ("FEE", "FIE", "FOE", "FUM") or contain allOf ("FIE", "FEE", "FUM", "FOE"))
fumList should (contain allOf ("FEE", "FIE", "FOE", "FAM") or contain allOf ("FIE", "FEE", "FUM", "FOE"))
fumList should (contain allOf ("FEE", "FIE", "FOE", "FUM") or contain allOf ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (contain allOf ("FEE", "FIE", "FOE", "FAM") or (contain allOf ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\"") + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain allOf ("FEE", "FIE", "FOE", "FUM") or contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain allOf ("FEE", "FIE", "FOE", "FAM") or contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain allOf ("FEE", "FIE", "FOE", "FUM") or contain allOf ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain allOf ("FEE", "FIE", "FOE", "FAM") or contain allOf ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\"") + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (contain allOf (" FEE ", " FIE ", " FOE ", " FUM ") or contain allOf (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain allOf ("fee", "fie", "foe", "fie", "fum") or contain allOf ("fie", "fee", "fum", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (contain allOf ("fie", "fee", "fum", "foe") or contain allOf ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (equal (..) and contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (equal (fumList) or contain allOf ("fie", "fee", "fum", "foe"))
fumList should (equal (toList) or contain allOf ("fie", "fee", "fum", "foe"))
fumList should (equal (fumList) or contain allOf ("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or contain allOf ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"happy\", \"birthday\", \"to\", \"you\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))
fumList should (equal (toList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))
fumList should (equal (fumList) or contain allOf ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or (contain allOf ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (equal (toList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (fumList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (toList) or contain allOf ("FIE", "FEE", "FAM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (fumList) or contain allOf ("FIE", "FEE", "FAM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (equal (toList) or contain allOf (" FEE ", " FIE ", " FOE ", " FUM "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (equal (fumList) or contain allOf ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (be (..) and contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (be_== (fumList) or contain allOf ("fie", "fee", "fum", "foe"))
fumList should (be_== (toList) or contain allOf ("fie", "fee", "fum", "foe"))
fumList should (be_== (fumList) or contain allOf ("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or contain allOf ("fie", "fee", "fam", "foe"))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"fie\", \"fee\", \"fam\", \"foe\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (be_== (fumList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))
fumList should (be_== (toList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))
fumList should (be_== (fumList) or contain allOf ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or (contain allOf ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (be_== (fumList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (be_== (toList) or contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (be_== (fumList) or contain allOf ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be_== (toList) or contain allOf ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FAM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (be_== (fumList) or contain allOf (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (be_== (fumList) or contain allOf ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (contain allOf (..) and be (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain allOf ("fie", "fee", "fum", "foe") or be_== (fumList))
fumList should (contain allOf ("fie", "fee", "fam", "foe") or be_== (fumList))
fumList should (contain allOf ("fie", "fee", "fum", "foe") or be_== (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain allOf ("fee", "fie", "foe", "fam") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fam\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain allOf ("FIE", "FEE", "FUM", "FOE") or be_== (fumList))
fumList should (contain allOf ("FIE", "FEE", "FAM", "FOE") or be_== (fumList))
fumList should (contain allOf ("FIE", "FEE", "FUM", "FOE") or be_== (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain allOf ("FEE", "FIE", "FOE", "FAM") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain allOf ("FIE", "FEE", "FUM", "FOE") or be_== (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain allOf ("FIE", "FEE", "FAM", "FOE") or be_== (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain allOf ("FIE", "FEE", "FUM", "FOE") or be_== (toList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain allOf ("FEE", "FIE", "FOE", "FAM") or be_== (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotContainAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FAM\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (contain allOf (" FEE ", " FIE ", " FOE ", " FUM ") or be_== (fumList))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain allOf ("fee", "fie", "foe", "fie", "fum") or be_== (fumList))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (not contain allOf (..) and not contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not contain allOf ("fee", "fie", "foe", "fuu") or not contain allOf ("fie", "fee", "fuu", "foe"))
fumList should (not contain allOf ("fee", "fie", "foe", "fum") or not contain allOf ("fie", "fee", "fuu", "foe"))
fumList should (not contain allOf ("fee", "fie", "foe", "fuu") or not contain allOf ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not contain allOf ("fee", "fie", "foe", "fum") or not contain allOf ("fie", "fee", "fum", "foe"))
}
checkMessageStackDepth(e1, Resources.containedAllOfElements(decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\"") + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"fie\", \"fee\", \"fum\", \"foe\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUU") or not contain allOf ("FIE", "FEE", "FUU", "FOE"))
fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUM") or not contain allOf ("FIE", "FEE", "FUU", "FOE"))
fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUU") or not contain allOf ("FIE", "FEE", "FUM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUM") or not contain allOf ("FIE", "FEE", "FUM", "FOE"))
}
checkMessageStackDepth(e1, Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\"") + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUU") or not contain allOf ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUM") or not contain allOf ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUU") or not contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain allOf ("FEE", "FIE", "FOE", "FUM") or not contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\"") + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (contain allOf (" FEE ", " FIE ", " FOE ", " FUM ") or contain allOf (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not contain allOf ("fee", "fie", "foe", "fie", "fum") or not contain allOf ("fie", "fee", "fuu", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (not contain allOf ("fie", "fee", "fuu", "foe") or not contain allOf ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (not equal (..) and not contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not equal (toList) or not contain allOf ("fie", "fee", "fuu", "foe"))
fumList should (not equal (fumList) or not contain allOf ("fie", "fee", "fuu", "foe"))
fumList should (not equal (toList) or not contain allOf ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) or not contain allOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))
fumList should (not equal (fumList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))
fumList should (not equal (toList) or not contain allOf ("FIE", "FEE", "FUM", "FOE"))
val e2 = intercept[TestFailedException] {
fumList should (not equal (fumList) or (not contain allOf ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not equal (fumList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (toList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (fumList) or not contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (toList) or not contain allOf ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FIE\", \"FEE\", \"FUM\", \"FOE\""), fileName, thisLineNumber - 2)
(fumList should (not contain allOf (" FEE ", " FIE ", " FOE ", " FUU ") or not contain allOf (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not equal (toList) or not contain allOf ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (not be (..) and not contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not be_== (toList) or not contain allOf ("fie", "fee", "fuu", "foe"))
fumList should (not be_== (fumList) or not contain allOf ("fie", "fee", "fuu", "foe"))
fumList should (not be_== (toList) or not contain allOf ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) or not contain allOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"fee\", \"fie\", \"foe\", \"fum\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not be_== (toList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))
fumList should (not be_== (fumList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))
fumList should (not be_== (toList) or not contain allOf ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) or (not contain allOf ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not be_== (toList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (not be_== (fumList) or not contain allOf ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (not be_== (toList) or not contain allOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be_== (fumList) or not contain allOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedAllOfElements(decorateToStringValue(fumList), "\"FEE\", \"FIE\", \"FOE\", \"FUM\""), fileName, thisLineNumber - 2)
(fumList should (not contain allOf (" FEE ", " FIE ", " FOE ", " FUU ") or not contain allOf (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not be_== (toList) or not contain allOf ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
}
object `collection of Lists` {
val list1s: Vector[List[Int]] = Vector(List(3, 2, 1, 0), List(3, 2, 1, 0), List(3, 2, 1, 0))
val lists: Vector[List[Int]] = Vector(List(3, 2, 1, 0), List(3, 2, 1, 0), List(8, 4, 3, 2))
val nils: Vector[List[Int]] = Vector(Nil, Nil, Nil)
val listsNil: Vector[List[Int]] = Vector(List(3, 2, 1, 0), List(3, 2, 1, 0), Nil)
val hiLists: Vector[List[String]] = Vector(List("howdy", "hi", "hello"), List("howdy", "hi", "hello"), List("howdy", "hi", "hello"))
val toLists: Vector[List[String]] = Vector(List("nice", "you", "to"), List("nice", "you", "to"), List("nice", "you", "to"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \n" +
"in " + decorateToStringValue(left)
object `when used with (contain allOf (..) and contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (contain allOf (3, 2, 1) or contain allOf (1, 3, 2))
all (list1s) should (contain allOf (3, 2, 5) or contain allOf (1, 3, 2))
all (list1s) should (contain allOf (3, 2, 1) or contain allOf (2, 3, 4))
atLeast (2, lists) should (contain allOf (3, 1, 2) or contain allOf (1, 2, 3))
atLeast (2, lists) should (contain allOf (3, 6, 5) or contain allOf (1, 3, 2))
atLeast (2, lists) should (contain allOf (3, 1, 2) or contain allOf (8, 3, 4))
val e1 = intercept[TestFailedException] {
all (lists) should (contain allOf (3, 1, 2) or contain allOf (1, 3, 2))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(List(8, 4, 3, 2)) + " did not contain all of " + "(3, 1, 2)" + ", and " + decorateToStringValue(List(8, 4, 3, 2)) + " did not contain all of " + "(1, 3, 2)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain allOf ("HELLO", "HI") or contain allOf ("hello", "hi"))
all (hiLists) should (contain allOf ("HELLO", "HO") or contain allOf ("hello", "hi"))
all (hiLists) should (contain allOf ("HELLO", "HI") or contain allOf ("hello", "ho"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain allOf ("HELLO", "HO") or contain allOf ("hello", "ho"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " did not contain all of " + "(\"HELLO\", \"HO\")" + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " did not contain all of " + "(\"hello\", \"ho\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (contain allOf ("HELLO", "HI") or contain allOf ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain allOf ("HELLO", "HO") or contain allOf ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain allOf ("HELLO", "HI") or contain allOf ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain allOf ("HELLO", "HO") or contain allOf ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " did not contain all of " + "(\"HELLO\", \"HO\")" + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " did not contain all of " + "(\"hello\", \"ho\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain allOf (3, 2, 2, 1) or contain allOf (1, 3, 2))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain allOf (1, 3, 2) or contain allOf (3, 2, 2, 1))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (be (..) and contain allOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (be_== (List(3, 2, 1, 0)) or contain allOf (1, 2, 3))
all (list1s) should (be_== (List(2, 3, 4)) or contain allOf (1, 2, 3))
all (list1s) should (be_== (List(3, 2, 1, 0)) or contain allOf (2, 3, 4))
val e1 = intercept[TestFailedException] {
all (list1s) should (be_== (List(2, 3, 4)) or contain allOf (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List(3, 2, 1, 0)) + " was not equal to " + decorateToStringValue(List(2, 3, 4)) + ", and " + decorateToStringValue(List(3, 2, 1, 0)) + " did not contain all of " + "(2, 3, 4)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be_== (List("howdy", "hi", "hello")) or contain allOf ("HELLO", "HI"))
all (hiLists) should (be_== (List("ho", "hello")) or contain allOf ("HELLO", "HI"))
all (hiLists) should (be_== (List("howdy", "hi", "hello")) or contain allOf ("HELLO", "HO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be_== (List("ho", "hello")) or contain allOf ("HELLO", "HO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " was not equal to " + decorateToStringValue(List("ho", "hello")) + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " did not contain all of " + "(\"HELLO\", \"HO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (be_== (List("howdy", "hi", "hello")) or contain allOf ("HELLO", "HI"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be_== (List("ho", "hello")) or contain allOf ("HELLO", "HI"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be_== (List("howdy", "hi", "hello")) or contain allOf ("HELLO", "HO"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be_== (List("ho", "hello")) or contain allOf ("HELLO", "HO"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " was not equal to " + decorateToStringValue(List("ho", "hello")) + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " did not contain all of " + "(\"HELLO\", \"HO\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (be_== (List(3, 2, 1, 0)) or contain allOf (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (not contain allOf xx and not contain allOf xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not contain allOf (3, 2, 8) or not contain allOf (8, 3, 4))
all (list1s) should (not contain allOf (1, 2, 3) or not contain allOf (8, 3, 4))
all (list1s) should (not contain allOf (3, 2, 8) or not contain allOf (2, 3, 1))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain allOf (4, 2, 3) or not contain allOf (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(List(8, 4, 3, 2)) + " contained all of " + "(4, 2, 3)" + ", and " + decorateToStringValue(List(8, 4, 3, 2)) + " contained all of " + "(2, 3, 4)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain allOf ("HELLO", "HO") or not contain allOf ("hello", "ho"))
all (hiLists) should (not contain allOf ("HELLO", "HI") or not contain allOf ("hello", "ho"))
all (hiLists) should (not contain allOf ("HELLO", "HO") or not contain allOf ("hello", "hi"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain allOf ("HELLO", "HI") or not contain allOf ("hello", "hi"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " contained all of " + "(\"HELLO\", \"HI\")" + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " contained all of " + "(\"hello\", \"hi\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not contain allOf ("HELLO", "HO") or not contain allOf ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain allOf ("HELLO", "HI") or not contain allOf ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain allOf ("HELLO", "HO") or not contain allOf ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain allOf ("HELLO", "HI") or not contain allOf ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " contained all of " + "(\"HELLO\", \"HI\")" + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " contained all of " + "(\"hello\", \"hi\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain allOf (1, 2, 2, 3) or not contain allOf (8, 3, 4))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain allOf (8, 3, 4) or not contain allOf (1, 2, 2, 3))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.allOfDuplicate))
}
}
object `when used with (not be (...) and not contain allOf (...))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not be_== (List(2)) or not contain allOf (8, 3, 4))
all (list1s) should (not be_== (List(3, 2, 1, 0)) or not contain allOf (8, 3, 4))
all (list1s) should (not be_== (List(2)) or not contain allOf (1, 2, 3))
val e1 = intercept[TestFailedException] {
all (list1s) should (not be_== (List(3, 2, 1, 0)) or not contain allOf (2, 3, 1))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List(3, 2, 1, 0)) + " was equal to " + decorateToStringValue(List(3, 2, 1, 0)) + ", and " + decorateToStringValue(List(3, 2, 1, 0)) + " contained all of " + "(2, 3, 1)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be_== (List("hello", "ho")) or not contain allOf ("HELLO", "HO"))
all (hiLists) should (not be_== (List("howdy", "hello", "hi")) or not contain allOf ("HELLO", "HO"))
all (hiLists) should (not be_== (List("hello", "ho")) or not contain allOf ("HELLO", "HI"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be_== (List("howdy", "hi", "hello")) or not contain allOf ("HELLO", "HI"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " was equal to " + decorateToStringValue(List("howdy", "hi", "hello")) + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " contained all of " + "(\"HELLO\", \"HI\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not be_== (List("hello", "ho")) or not contain allOf ("HELLO", "HO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be_== (List("howdy", "hello", "hi")) or not contain allOf ("HELLO", "HO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be_== (List("hello", "ho")) or not contain allOf ("HELLO", "HI"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (List("howdy", "hi", "hello")) or not contain allOf ("HELLO", "HI"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(List("howdy", "hi", "hello")) + " was equal to " + decorateToStringValue(List("howdy", "hi", "hello")) + ", and " + decorateToStringValue(List("howdy", "hi", "hello")) + " contained all of " + "(\"HELLO\", \"HI\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not be_== (List(2)) or not contain allOf (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.allOfDuplicate))
}
}
}
}
| SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/ListShouldContainAllOfLogicalOrSpec.scala | Scala | apache-2.0 | 42,860 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.pmml.export
import org.dmg.pmml.ClusteringModel
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.clustering.KMeansModel
import org.apache.spark.mllib.linalg.Vectors
/**
* 预言模型标记语言是一种基于xml语言,它能够定义和共享应用程序之间的预测模型
*/
class KMeansPMMLModelExportSuite extends SparkFunSuite {
test("KMeansPMMLModelExport generate PMML format") {
val clusterCenters = Array(
Vectors.dense(1.0, 2.0, 6.0),
Vectors.dense(1.0, 3.0, 0.0),
Vectors.dense(1.0, 4.0, 6.0))
val kmeansModel = new KMeansModel(clusterCenters)
val modelExport = PMMLModelExportFactory.createPMMLModelExport(kmeansModel)
// assert that the PMML format is as expected
assert(modelExport.isInstanceOf[PMMLModelExport])
val pmml = modelExport.asInstanceOf[PMMLModelExport].getPmml
assert(pmml.getHeader.getDescription === "k-means clustering")
// check that the number of fields match the single vector size
//clusterCenters聚类中心点
assert(pmml.getDataDictionary.getNumberOfFields === clusterCenters(0).size)
// This verify that there is a model attached to the pmml object and the model is a clustering
// one. It also verifies that the pmml model has the same number of clusters of the spark model.
val pmmlClusteringModel = pmml.getModels.get(0).asInstanceOf[ClusteringModel]
assert(pmmlClusteringModel.getNumberOfClusters === clusterCenters.length)
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExportSuite.scala | Scala | apache-2.0 | 2,314 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack
import monix.execution.Ack.Continue
import monix.reactive.{Observable, Observer}
import scala.concurrent.duration._
import scala.concurrent.duration.Duration.Zero
object MergeManySuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val o = Observable
.range(0L, sourceCount.toLong)
.mergeMap(i => Observable.fromIterable(Seq(i, i, i, i)))
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
def count(sourceCount: Int) =
4 * sourceCount
def observableInError(sourceCount: Int, ex: Throwable) = {
val o = Observable.range(0L, sourceCount.toLong).mergeMap(_ => Observable.raiseError(ex))
Some(Sample(o, 0, 0, Zero, Zero))
}
def sum(sourceCount: Int) = {
4L * sourceCount * (sourceCount - 1) / 2
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val o = Observable.range(0L, sourceCount.toLong).mergeMap(x => throw ex)
Sample(o, 0, 0, Zero, Zero)
}
override def cancelableObservables(): Seq[Sample] = {
val sample1 = Observable
.range(1, 100)
.mergeMap(x => Observable.range(0, 100).delayExecution(2.second))
val sample2 = Observable
.range(0, 100)
.delayOnNext(1.second)
.mergeMap(x => Observable.range(0, 100).delayExecution(2.second))
Seq(
Sample(sample1, 0, 0, 0.seconds, 0.seconds),
Sample(sample1, 0, 0, 1.seconds, 0.seconds),
Sample(sample2, 0, 0, 0.seconds, 0.seconds),
Sample(sample2, 0, 0, 1.seconds, 0.seconds)
)
}
test("mergeMap should be cancelable after main stream has finished") { implicit s =>
val source = Observable.now(1L).concatMap { x =>
Observable.intervalWithFixedDelay(1.second, 1.second).map(_ + x)
}
var total = 0L
val subscription = source.unsafeSubscribeFn(new Observer.Sync[Long] {
def onNext(elem: Long): Ack = {
total += elem
Continue
}
def onError(ex: Throwable): Unit = throw ex
def onComplete(): Unit = ()
})
s.tick(10.seconds)
assertEquals(total, 5 * 11L)
subscription.cancel()
s.tick()
assertEquals(total, 5 * 11L)
assert(s.state.tasks.isEmpty, "tasks.isEmpty")
}
}
| alexandru/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/MergeManySuite.scala | Scala | apache-2.0 | 2,949 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.control
import wvlet.airframe.control.ResultClass.Failed
import wvlet.log.LogSupport
import scala.util.{Failure, Random, Success, Try}
/**
* Retry logic implementation helper
*/
object Retry extends LogSupport {
def retryableFailure(e: Throwable) = Failed(isRetryable = true, e)
def nonRetryableFailure(e: Throwable) = Failed(isRetryable = false, e)
def withBackOff(
maxRetry: Int = 3,
initialIntervalMillis: Int = 100,
maxIntervalMillis: Int = 15000,
multiplier: Double = 1.5
): RetryContext = {
defaultRetryContext.withMaxRetry(maxRetry).withBackOff(initialIntervalMillis, maxIntervalMillis, multiplier)
}
def withBoundedBackoff(
initialIntervalMillis: Int = 100,
maxTotalWaitMillis: Int = 180000,
multiplier: Double = 1.5
): RetryContext = {
require(initialIntervalMillis > 0, s"initialWaitMillis must be > 0: ${initialIntervalMillis}")
// S = totalWaitMillis = w * r^0 + w * r^1 + w * r^2 + ... + w * r^n
// S = w * (1-r^n) / (1-r)
// r^n = 1 - S * (1-r)/w
// n * log(r) = log(1 - S * (1-r) / w)
val N = math.log(1 - (maxTotalWaitMillis * (1 - multiplier) / initialIntervalMillis)) / math.log(multiplier)
def total(n: Int) = initialIntervalMillis * (1 - math.pow(multiplier, n)) / (1 - multiplier)
var maxRetry = N.ceil.toInt
while (maxRetry > 0 && total(maxRetry) > maxTotalWaitMillis) {
maxRetry -= 1
}
var maxIntervalMillis = initialIntervalMillis * math.pow(multiplier, N).toInt
withBackOff(
maxRetry = maxRetry.max(0),
initialIntervalMillis = initialIntervalMillis,
maxIntervalMillis = maxIntervalMillis,
multiplier = multiplier
)
}
def withJitter(
maxRetry: Int = 3,
initialIntervalMillis: Int = 100,
maxIntervalMillis: Int = 15000,
multiplier: Double = 1.5
): RetryContext = {
defaultRetryContext.withMaxRetry(maxRetry).withJitter(initialIntervalMillis, maxIntervalMillis, multiplier)
}
private val defaultRetryContext: RetryContext = {
val retryConfig = RetryPolicyConfig()
RetryContext(
context = None,
lastError = NOT_STARTED,
retryCount = 0,
maxRetry = 3,
retryWaitStrategy = new ExponentialBackOff(retryConfig),
nextWaitMillis = retryConfig.initialIntervalMillis,
baseWaitMillis = retryConfig.initialIntervalMillis,
extraWaitMillis = 0
)
}
case class MaxRetryException(retryContext: RetryContext)
extends Exception(
s"Reached the max retry count ${retryContext.retryCount}/${retryContext.maxRetry}: ${retryContext.lastError.getMessage}",
retryContext.lastError
)
// Throw this to force retry the execution
case class RetryableFailure(e: Throwable) extends Exception(e)
case object NOT_STARTED extends Exception("Code is not executed")
private def REPORT_RETRY_COUNT: RetryContext => Unit = { (ctx: RetryContext) =>
warn(
f"[${ctx.retryCount}/${ctx.maxRetry}] Execution failed: ${ctx.lastError.getMessage}. Retrying in ${ctx.nextWaitMillis / 1000.0}%.2f sec."
)
}
private def RETHROW_ALL: Throwable => ResultClass.Failed = { (e: Throwable) => throw e }
private[control] val noExtraWait = ExtraWait()
case class ExtraWait(maxExtraWaitMillis: Int = 0, factor: Double = 0.0) {
require(maxExtraWaitMillis >= 0)
require(factor >= 0)
def hasNoWait: Boolean = {
maxExtraWaitMillis == 0 && factor == 0.0
}
// Compute the extra wait millis based on the next wait millis
def extraWaitMillis(nextWaitMillis: Int): Int = {
if (maxExtraWaitMillis == 0) {
if (factor == 0.0) {
0
} else {
(nextWaitMillis * factor).toInt
}
} else {
if (factor == 0.0) {
maxExtraWaitMillis
} else {
(nextWaitMillis * factor).toInt.min(maxExtraWaitMillis)
}
}
}
}
case class RetryContext(
context: Option[Any],
lastError: Throwable,
retryCount: Int,
maxRetry: Int,
retryWaitStrategy: RetryPolicy,
nextWaitMillis: Int,
baseWaitMillis: Int,
extraWaitMillis: Int,
resultClassifier: Any => ResultClass = ResultClass.ALWAYS_SUCCEED,
errorClassifier: Throwable => ResultClass.Failed = ResultClass.ALWAYS_RETRY,
beforeRetryAction: RetryContext => Any = REPORT_RETRY_COUNT
) {
def init(context: Option[Any] = None): RetryContext = {
this.copy(
context = context,
lastError = NOT_STARTED,
retryCount = 0,
nextWaitMillis = retryWaitStrategy.retryPolicyConfig.initialIntervalMillis,
baseWaitMillis = retryWaitStrategy.retryPolicyConfig.initialIntervalMillis,
extraWaitMillis = 0
)
}
def canContinue: Boolean = {
retryCount < maxRetry
}
/**
* Update the retry context, including retry count, last error, next wait time, etc.
*
* @param retryReason
* @return
* the next retry context
*/
def nextRetry(retryReason: Throwable): RetryContext = {
val nextRetryCtx = this.copy(
lastError = retryReason,
retryCount = retryCount + 1,
nextWaitMillis = retryWaitStrategy.nextWait(baseWaitMillis) + extraWaitMillis,
baseWaitMillis = retryWaitStrategy.updateBaseWait(baseWaitMillis),
extraWaitMillis = 0
)
beforeRetryAction(nextRetryCtx)
nextRetryCtx
}
def withExtraWait(extraWait: ExtraWait): RetryContext = {
if (extraWait.hasNoWait && this.extraWaitMillis == 0) {
this
} else {
this.copy(extraWaitMillis = extraWait.extraWaitMillis(nextWaitMillis))
}
}
def withRetryWaitStrategy(newRetryWaitStrategy: RetryPolicy): RetryContext = {
this.copy(retryWaitStrategy = newRetryWaitStrategy)
}
def withMaxRetry(newMaxRetry: Int): RetryContext = {
this.copy(maxRetry = newMaxRetry)
}
def noRetry: RetryContext = {
this.copy(maxRetry = 0)
}
def withBackOff(
initialIntervalMillis: Int = 100,
maxIntervalMillis: Int = 15000,
multiplier: Double = 1.5
): RetryContext = {
val config = RetryPolicyConfig(initialIntervalMillis, maxIntervalMillis, multiplier)
this.copy(retryWaitStrategy = new ExponentialBackOff(config))
}
def withJitter(
initialIntervalMillis: Int = 100,
maxIntervalMillis: Int = 15000,
multiplier: Double = 1.5
): RetryContext = {
val config = RetryPolicyConfig(initialIntervalMillis, maxIntervalMillis, multiplier)
this.copy(retryWaitStrategy = new Jitter(config))
}
def withResultClassifier[U](newResultClassifier: U => ResultClass): RetryContext = {
this.copy(resultClassifier = newResultClassifier.asInstanceOf[Any => ResultClass])
}
/**
* Set a detailed error handler upon Exception. If the given exception is not retryable, just rethrow the
* exception. Otherwise, consume the exception.
*/
def withErrorClassifier(errorClassifier: Throwable => ResultClass.Failed): RetryContext = {
this.copy(errorClassifier = errorClassifier)
}
def beforeRetry[U](handler: RetryContext => U): RetryContext = {
this.copy(beforeRetryAction = handler)
}
/**
* Clear the default beforeRetry action
*/
def noRetryLogging: RetryContext = {
this.copy(beforeRetryAction = { (x: RetryContext) => })
}
/**
* Add a partial function that accepts exceptions that need to be retried.
*
* @param errorClassifier
* @return
*/
def retryOn(errorClassifier: PartialFunction[Throwable, ResultClass.Failed]): RetryContext = {
this.copy(errorClassifier = { (e: Throwable) => errorClassifier.applyOrElse(e, RETHROW_ALL) })
}
def run[A](body: => A): A = {
runInternal(None)(body)
}
def runWithContext[A](context: Any)(body: => A): A = {
runInternal(Option(context))(body)
}
protected def runInternal[A](context: Option[Any])(body: => A): A = {
var result: Option[A] = None
var retryContext: RetryContext = init(context)
var isFirst: Boolean = true
while (isFirst || (result.isEmpty && retryContext.canContinue)) {
isFirst = false
val ret = Try(body)
val resultClass = ret match {
case Success(x) =>
// Test whether the code block execution is succeeded or failed
resultClassifier(x)
case Failure(RetryableFailure(e)) =>
ResultClass.retryableFailure(e)
case Failure(e) =>
errorClassifier(e)
}
resultClass match {
case ResultClass.Succeeded =>
// OK. Exit the loop
result = Some(ret.get)
case ResultClass.Failed(isRetryable, cause, extraWait) if isRetryable =>
// Retryable error
retryContext = retryContext.withExtraWait(extraWait).nextRetry(cause)
// Wait until the next retry
Compat.sleep(retryContext.nextWaitMillis)
case ResultClass.Failed(_, cause, _) =>
// Non-retryable error. Exit the loop by throwing the exception
throw cause
}
}
result match {
case Some(a) =>
a
case None =>
throw MaxRetryException(retryContext)
}
}
}
case class RetryPolicyConfig(
initialIntervalMillis: Int = 100,
maxIntervalMillis: Int = 15000,
multiplier: Double = 1.5
) {
require(initialIntervalMillis >= 0)
require(maxIntervalMillis >= 0)
require(multiplier >= 0)
}
trait RetryPolicy {
def retryPolicyConfig: RetryPolicyConfig
def updateBaseWait(waitMillis: Int): Int = {
math.round(waitMillis * retryPolicyConfig.multiplier).toInt.min(retryPolicyConfig.maxIntervalMillis)
}
def nextWait(baseWaitMillis: Int): Int
}
class ExponentialBackOff(val retryPolicyConfig: RetryPolicyConfig) extends RetryPolicy {
override def nextWait(baseWaitMillis: Int): Int = {
baseWaitMillis
}
}
class Jitter(val retryPolicyConfig: RetryPolicyConfig, rand: Random = new Random()) extends RetryPolicy {
override def nextWait(baseWaitMillis: Int): Int = {
(baseWaitMillis.toDouble * rand.nextDouble()).round.toInt
}
}
}
| wvlet/airframe | airframe-control/src/main/scala/wvlet/airframe/control/Retry.scala | Scala | apache-2.0 | 10,955 |
package com.lysdev.transperthcached.livetimes;
import org.joda.time.DateTime;
class TimesForStation(
station_name: String,
last_updated: DateTime,
trips: List[Trip]) {
def getTrips() = this.trips
def getLastUpdated() = this.last_updated
def getStationName() = this.station_name
override def toString() = f"<TimesForStation $getStationName - $getLastUpdated - ${getTrips.size} trips>"
}
| Mause/TransperthCached | src/com/lysdev/transperthcached/livetimes/TimesForStation.scala | Scala | apache-2.0 | 437 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.security
import scala.reflect.runtime.universe
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.Credentials
import org.apache.hadoop.security.token.{Token, TokenIdentifier}
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
private[security] class HBaseDelegationTokenProvider
extends HadoopDelegationTokenProvider with Logging {
override def serviceName: String = "hbase"
override def obtainDelegationTokens(
hadoopConf: Configuration,
creds: Credentials): Option[Long] = {
try {
val mirror = universe.runtimeMirror(Utils.getContextOrSparkClassLoader)
val obtainToken = mirror.classLoader.
loadClass("org.apache.hadoop.hbase.security.token.TokenUtil").
getMethod("obtainToken", classOf[Configuration])
logDebug("Attempting to fetch HBase security token.")
val token = obtainToken.invoke(null, hbaseConf(hadoopConf))
.asInstanceOf[Token[_ <: TokenIdentifier]]
logInfo(s"Get token from HBase: ${token.toString}")
creds.addToken(token.getService, token)
} catch {
case NonFatal(e) =>
logDebug(s"Failed to get token from service $serviceName", e)
}
None
}
override def delegationTokensRequired(hadoopConf: Configuration): Boolean = {
hbaseConf(hadoopConf).get("hbase.security.authentication") == "kerberos"
}
private def hbaseConf(conf: Configuration): Configuration = {
try {
val mirror = universe.runtimeMirror(Utils.getContextOrSparkClassLoader)
val confCreate = mirror.classLoader.
loadClass("org.apache.hadoop.hbase.HBaseConfiguration").
getMethod("create", classOf[Configuration])
confCreate.invoke(null, conf).asInstanceOf[Configuration]
} catch {
case NonFatal(e) =>
logDebug("Fail to invoke HBaseConfiguration", e)
conf
}
}
}
| aokolnychyi/spark | core/src/main/scala/org/apache/spark/deploy/security/HBaseDelegationTokenProvider.scala | Scala | apache-2.0 | 2,755 |
package rxscala.subscriptions
import org.junit.Assert._
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import rxscala.Subscription
class SubscriptionTests extends JUnitSuite {
@Test
def anonymousSubscriptionCreate() {
val subscription = Subscription{}
assertNotNull(subscription)
}
@Test
def anonymousSubscriptionDispose() {
var unsubscribed = false
val subscription = Subscription{ unsubscribed = true }
assertFalse(unsubscribed)
subscription.unsubscribe()
assertTrue(unsubscribed)
}
@Test
def emptySubscription() {
val subscription = Subscription(())
subscription.unsubscribe()
}
@Test
def booleanSubscription() {
val subscription = BooleanSubscription()
assertFalse(subscription.isUnsubscribed)
subscription.unsubscribe()
assertTrue(subscription.isUnsubscribed)
subscription.unsubscribe()
assertTrue(subscription.isUnsubscribed)
}
@Test
def compositeSubscriptionAdd() {
var u0 = false
val s0 = BooleanSubscription{ u0 = true }
var u1 = false
val s1 = Subscription{ u1 = true }
val composite = CompositeSubscription()
assertFalse(composite.isUnsubscribed)
composite += s0
composite += s1
composite.unsubscribe()
assertTrue(composite.isUnsubscribed)
assertTrue(s0.isUnsubscribed)
assertTrue(u0)
assertTrue(u1)
val s2 = BooleanSubscription()
assertFalse(s2.isUnsubscribed)
composite += s2
assertTrue(s2.isUnsubscribed)
}
@Test
def compositeSubscriptionRemove() {
val s0 = BooleanSubscription()
val composite = CompositeSubscription()
composite += s0
assertFalse(s0.isUnsubscribed)
composite -= s0
assertTrue(s0.isUnsubscribed)
composite.unsubscribe()
assertTrue(composite.isUnsubscribed)
}
@Test
def multiAssignmentSubscriptionAdd() {
val s0 = BooleanSubscription()
val s1 = BooleanSubscription()
val multiple = MultipleAssignmentSubscription()
assertFalse(multiple.isUnsubscribed)
multiple.subscription = s0
assertEquals(s0.asJavaSubscription, multiple.subscription.asJavaSubscription)
multiple.subscription = s1
assertEquals(s1.asJavaSubscription, multiple.subscription.asJavaSubscription)
assertFalse(s0.isUnsubscribed)
assertFalse(s1.isUnsubscribed)
multiple.unsubscribe()
assertTrue(multiple.isUnsubscribed)
assertFalse(s0.isUnsubscribed)
assertTrue(s1.isUnsubscribed)
val s2 = BooleanSubscription()
assertFalse(s2.isUnsubscribed)
multiple.subscription = s2
assertTrue(s2.isUnsubscribed)
}
}
| kevinwright/RxScala | src/test/scala/rxscala/subscriptions/SubscriptionTests.scala | Scala | apache-2.0 | 2,652 |
package system.cell.sensormanagement.sensors
import org.scalatest.{FlatSpec, Matchers}
import system.ontologies.sensor.SensorCategories
/**
* Created by Matteo Gabellini on 31/07/2017.
*/
class SmokeSensorTest extends FlatSpec with Matchers {
val sensorName = "smokeSensor"
val minValue = 0.0
val maxValue = 50.0
val thresholdValue = 50
val sThreshold = new SmokeThreshold(thresholdValue)
var sSensor = new SmokeSensor(sensorName, 0, minValue, maxValue, sThreshold)
"A Smoke Sensor" should "have the same name assigned during the creation" in {
sSensor.name should be(sensorName)
}
"A Smoke Sensor" should "have the category smoke" in {
sSensor.category should be(SensorCategories.Smoke)
}
"A Smoke Sensor" should "measure the carbon monoxide value" in {
sSensor.gasMeasured should be(Gas.carbonMonoxide)
}
"A Smoke Sensor" should "have the same minimum value assigned in the creation" in {
sSensor.minValue should be(minValue)
}
"A SmokeSensor" should "have the same maximum value assigned in the creation" in {
sSensor.maxValue should be(maxValue)
}
"A Smoke Threshold checking method" should "return true if the smoke value is over the threshold" in {
sThreshold.hasBeenExceeded(thresholdValue + 1) should be(true)
}
"A Smoke Sensor" should "have the same threshold assigned in the creation" in {
sSensor.threshold should be(sThreshold)
}
val refreshRate = 1000
val changeStep = 0.15
var simulatedSmokeSensor = new SimulatedGasSensor(sSensor, refreshRate, SimulationStrategies.LinearDoubleSimulation(changeStep))
var oGSensor: ObservableGasSensor = new ObservableGasSensor(simulatedSmokeSensor)
"A Simulated Smoke Sensor" should "have a same gas measured of the decorated smoke sensor" in {
simulatedSmokeSensor.gasMeasured should be(sSensor.gasMeasured)
}
"A Simulated Smoke Sensor" should "have a same threshold of the decorated smoke sensor" in {
simulatedSmokeSensor.threshold should be(sSensor.threshold)
}
"A Observable Smoke Sensor" should "have a same gas measured of the decorated smoke sensor" in {
oGSensor.gasMeasured should be(simulatedSmokeSensor.gasMeasured)
}
"A Observable Smoke Sensor" should "have a same threshold of the decorated smoke sensor" in {
oGSensor.threshold should be(simulatedSmokeSensor.threshold)
}
}
| albertogiunta/arianna | src/test/scala/system/cell/sensormanagement/sensors/SmokeSensorTest.scala | Scala | gpl-3.0 | 2,477 |
/*
* Copyright (C) 2012 reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.environment.egi
import org.openmole.core.batch.storage.{ RemoteStorage, SimpleStorage }
import org.openmole.core.tools.io.FileUtil
import org.openmole.core.workspace.Workspace
import FileUtil._
import fr.iscpif.gridscale.storage.{ Storage ⇒ GSStorage }
import java.io.File
import java.net.URI
import scala.sys.process._
class RemoteGliteStorage(val host: String, val port: Int, val voName: String) extends RemoteStorage with LCGCp { s ⇒
val timeout = Workspace.preferenceAsDuration(EGIEnvironment.RemoteTimeout).toSeconds
@transient lazy val url = new URI("srm", null, host, port, null, null, null)
protected def run(cmd: String) = {
val output = new StringBuilder
val error = new StringBuilder
val logger =
ProcessLogger(
(o: String) ⇒ output.append("\\n" + o),
(e: String) ⇒ error.append("\\n" + e)
)
val exit = Process(cmd) ! logger
if (exit != 0) throw new RuntimeException(s"Command $cmd had a non 0 return value.\\n Output: ${output.toString}. Error: ${error.toString}")
output.toString
}
override def child(parent: String, child: String): String = GSStorage.child(parent, child)
override def downloadGZ(src: String, dest: File): Unit = Workspace.withTmpFile { tmpFile ⇒
download(src, tmpFile)
tmpFile.copyUncompressFile(dest)
}
override def download(src: String, dest: File): Unit = run(lcgCpCmd(url.resolve(src), dest.getAbsolutePath))
override def uploadGZ(src: File, dest: String): Unit = Workspace.withTmpFile { tmpFile ⇒
src.copyCompressFile(tmpFile)
upload(tmpFile, dest)
}
override def upload(src: File, dest: String): Unit = run(lcgCpCmd(src.getAbsolutePath, url.resolve(dest)))
}
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.environment.egi/src/main/scala/org/openmole/plugin/environment/egi/RemoteGliteStorage.scala | Scala | agpl-3.0 | 2,423 |
/*
Copyright The MITRE Corporation 2009-2010. All rights reserved.
*/
package org.mitre.jcarafe.posttagger
import org.mitre.jcarafe.tagger.TaggerTask
import org.mitre.jcarafe.crf.FactoredTrainer
import org.mitre.jcarafe.crf.FactoredDecoder
import org.mitre.jcarafe.crf.TrainingSeqGen
import org.mitre.jcarafe.crf.FactoredDecodingSeqGen
import org.mitre.jcarafe.crf.TextSeqGen
import org.mitre.jcarafe.crf.SeqGenScorer
import org.mitre.jcarafe.crf.TrainingFactoredFeatureRep
import org.mitre.jcarafe.crf.DecodingFactoredFeatureRep
import org.mitre.jcarafe.crf.FeatureManagerBuilder
import org.mitre.jcarafe.crf.SeqGenScorer
import org.mitre.jcarafe.crf.StandardSerializer
import org.mitre.jcarafe.util._
class SummaryTagger(argv: Array[String]) extends TaggerTask[Array[String]](argv) {
import StandardSerializer._
lazy val trainer = new FactoredTrainer[Array[String]](opts) {
val fspecStr = FeatureManagerBuilder.getFeatureSpecString(opts.featureSpec.get)
val builder = new PostFeatureManagerBuilder(fspecStr)
//FeatureManager.setWordProperties(opts, mgr)
//FeatureManager.setLexicon(opts,mgr)
val mgr = builder.getFeatureManager
val fr = new TrainingFactoredFeatureRep[Array[String]](mgr, opts)
val sGen : TrSeqGen = new TrainingSeqGen[Array[String]] (fr, opts) with SummarizationTextSeqGen
}
lazy val decoder = new FactoredDecoder[Array[String]](opts) {
val model = readModel(opts.model.get)
val builder = new PostFeatureManagerBuilder(model.fspec)
//mgr.lex_=(model.lex)
val mgr = builder.getFeatureManager
val fr = new DecodingFactoredFeatureRep[Array[String]](mgr, opts, model, false)
opts.priorAdjust match {case Some(v) => model.adjustParameter(org.mitre.jcarafe.crf.IncrementalMurmurHash.hash(":U:",0),Label("Post",Map("summary" -> "no")),v) case None => }
val sGen : FactoredDecodingSeqGen[Array[String]] =
new FactoredDecodingSeqGen[Array[String]] (fr, model,opts) with SummarizationTextSeqGen with SeqGenScorer[Array[String]]
setDecoder(true)
}
}
object SummaryTaggerMain {
def printUsage = println(" Usage: ")
def main(iargv: Array[String]) : Unit = {
val tagger = new SummaryTagger(iargv)
tagger.process()}
}
| wellner/jcarafe | jcarafe-ext/src/main/scala/org/mitre/jcarafe/posttagger/SummaryTagger.scala | Scala | bsd-3-clause | 2,237 |
package dbtarzan.config.util
import org.scalatest.flatspec.AnyFlatSpec
class ByteArrayHexTest extends AnyFlatSpec {
"converting to hex and back" should "give the original value" in {
val original = "F263575E7B00A977A8E9A37E08B9C215FEB9BFB2F992B2B8F11E"
val bytes = ByteArrayHex.fromHex(original)
println(bytes)
val hex = ByteArrayHex.toHex(bytes)
assert(hex === original)
}
} | aferrandi/dbtarzan | src/test/scala/dbtarzan/config/util/ByteArrayHexTest.scala | Scala | apache-2.0 | 397 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.serialization
import com.typesafe.scalalogging.LazyLogging
import org.geotools.factory.Hints
// noinspection LanguageFeature
trait GenericMapSerialization[T <: PrimitiveWriter, V <: PrimitiveReader] extends LazyLogging {
def serialize(out: T, map: java.util.Map[AnyRef, AnyRef]): Unit
def deserialize(in: V): java.util.Map[AnyRef, AnyRef]
def deserialize(in: V, map: java.util.Map[AnyRef, AnyRef]): Unit
protected def write(out: T, value: AnyRef) = value match {
case v: String => out.writeString(v)
case v: java.lang.Integer => out.writeInt(v)
case v: java.lang.Long => out.writeLong(v)
case v: java.lang.Float => out.writeFloat(v)
case v: java.lang.Double => out.writeDouble(v)
case v: java.lang.Boolean => out.writeBoolean(v)
case v: java.util.Date => out.writeLong(v.getTime)
case v: Hints.Key => out.writeString(HintKeySerialization.keyToId(v))
case _ => throw new IllegalArgumentException(s"Unsupported value: $value (${value.getClass})")
}
protected def read(in: V, clas: Class[_]): AnyRef = clas match {
case c if classOf[java.lang.String].isAssignableFrom(c) => in.readString()
case c if classOf[java.lang.Integer].isAssignableFrom(c) => Int.box(in.readInt())
case c if classOf[java.lang.Long].isAssignableFrom(c) => Long.box(in.readLong())
case c if classOf[java.lang.Float].isAssignableFrom(c) => Float.box(in.readFloat())
case c if classOf[java.lang.Double].isAssignableFrom(c) => Double.box(in.readDouble())
case c if classOf[java.lang.Boolean].isAssignableFrom(c) => Boolean.box(in.readBoolean())
case c if classOf[java.util.Date].isAssignableFrom(c) => new java.util.Date(in.readLong())
case c if classOf[Hints.Key].isAssignableFrom(c) => HintKeySerialization.idToKey(in.readString())
case _ => throw new IllegalArgumentException(s"Unsupported value class: $clas")
}
protected def canSerialize(obj: AnyRef): Boolean = obj match {
case key: Hints.Key => HintKeySerialization.canSerialize(key)
case _ => true
}
}
| ronq/geomesa | geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/serialization/GenericMapSerialization.scala | Scala | apache-2.0 | 2,588 |
package com.cave.metrics.data.influxdb
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.duration._
class InfluxClientSpec extends FlatSpec with Matchers with MockitoSugar {
val SomeData =
"""
|[
| {
| "name": "orders",
| "columns": [ "time", "sequence_number", "value" ],
| "points": [
| [ 1404086940, 54800001, 0],
| [ 1404086880, 54820001, 0],
| [ 1404086820, 54850001, 0],
| [ 1404086760, 54860001, 0],
| [ 1404086700, 54890001, 0],
| [ 1404086640, 54900001, 0],
| [ 1404086580, 54930001, 0],
| [ 1404086520, 54940001, 0],
| [ 1404086460, 54960001, 0]
| ]
| }
|]
""".stripMargin
val mockConfig = mock[InfluxClusterConfig]
val influxClient = new InfluxClient(mockConfig)
"createResponse" should "return a proper map of data points" in {
val data = influxClient.createResponse(SomeData)
data.metrics.size should be(9)
data.metrics.find(_.time == new DateTime(1404086940000L)).map(_.value) should be(Some(0))
}
"Influx Client" should "generate valid Continuous Queries" in {
influxClient.createSQL("METRIC",
Map("tag_name1" -> "tag_val1", "tag_name2" -> "tag_val2"),
"AGGREGATOR", 5.seconds , "QUERY_NAME") should be (
"""select AGGREGATOR as value from "METRIC" where tag_name1='tag_val1' and tag_name2='tag_val2' group by time(5s) into "QUERY_NAME"""")
influxClient.createSQL("METRIC", Map(), "AGGREGATOR", 5.hours , "QUERY_NAME") should be (
"""select AGGREGATOR as value from "METRIC" group by time(18000s) into "QUERY_NAME"""")
}
val StartDateString = "2014-09-24 09:43:00"
val StartDate = ISODateTimeFormat.dateTimeNoMillis().parseDateTime("2014-09-24T09:43:00Z")
val EndDateString = "2014-09-25 09:43:00"
val EndDate = ISODateTimeFormat.dateTimeNoMillis().parseDateTime("2014-09-25T09:43:00Z")
it should "create valid queries for GetMetricData" in {
influxClient.buildQuery("orders", Map.empty[String, String], None, None, None) should be(
"""select value from "orders" limit 1440""")
influxClient.buildQuery("orders", Map.empty[String, String], None, None, Some(1)) should be(
"""select value from "orders" limit 1""")
influxClient.buildQuery("orders", Map("shipTo" -> "US"), None, None, None) should be(
"""select value from "orders" where shipTo='US' limit 1440""")
influxClient.buildQuery("orders", Map("shipTo" -> "US"), Some(StartDate), None, None) should be(
s"""select value from "orders" where shipTo='US' and time > '$StartDateString' limit 1440""")
influxClient.buildQuery("orders", Map("shipTo" -> "US"), None, Some(EndDate), None) should be(
s"""select value from "orders" where shipTo='US' and time < '$EndDateString' limit 1440""")
influxClient.buildQuery("orders", Map("shipTo" -> "US"), Some(StartDate), Some(EndDate), None) should be(
s"""select value from "orders" where shipTo='US' and time > '$StartDateString' and time < '$EndDateString' limit 1440""")
influxClient.buildQuery("orders", Map("shipTo" -> "US"), Some(StartDate), Some(EndDate), Some(2000)) should be(
s"""select value from "orders" where shipTo='US' and time > '$StartDateString' and time < '$EndDateString' limit 1440""")
influxClient.buildQuery("orders", Map("shipTo" -> "US"), Some(StartDate), Some(EndDate), Some(2)) should be(
s"""select value from "orders" where shipTo='US' and time > '$StartDateString' and time < '$EndDateString' limit 2""")
}
it should "create valid queries for aggregated metrics" in {
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map.empty[String, String], None, None, None) should be(
"""select sum(value) from "orders" group by time(300s) limit 1440""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map.empty[String, String], None, None, Some(1)) should be(
"""select sum(value) from "orders" group by time(300s) limit 1""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map("shipTo" -> "US"), None, None, None) should be(
"""select sum(value) from "orders" where shipTo='US' group by time(300s) limit 1440""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map("shipTo" -> "US"), Some(StartDate), None, None) should be(
s"""select sum(value) from "orders" where shipTo='US' and time > '$StartDateString' group by time(300s) limit 1440""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map("shipTo" -> "US"), None, Some(EndDate), None) should be(
s"""select sum(value) from "orders" where shipTo='US' and time < '$EndDateString' group by time(300s) limit 1440""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map("shipTo" -> "US"), Some(StartDate), Some(EndDate), None) should be(
s"""select sum(value) from "orders" where shipTo='US' and time > '$StartDateString' and time < '$EndDateString' group by time(300s) limit 1440""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map("shipTo" -> "US"), Some(StartDate), Some(EndDate), Some(2000)) should be(
s"""select sum(value) from "orders" where shipTo='US' and time > '$StartDateString' and time < '$EndDateString' group by time(300s) limit 1440""")
influxClient.buildAggregatedQuery("sum(value)", 5.minutes, "orders", Map("shipTo" -> "US"), Some(StartDate), Some(EndDate), Some(2)) should be(
s"""select sum(value) from "orders" where shipTo='US' and time > '$StartDateString' and time < '$EndDateString' group by time(300s) limit 2""")
}
val SomeMetrics =
"""
|[
| {
| "points": [ [ 1410878579000, 1226770002, 1, "29" ] ],
| "columns": [ "time", "sequence_number", "value", "alert" ],
| "name": "alertsHistory"
| },
| {
| "points": [ [ 1413890367000, 2576490001, 1, "stage57", "svc-sku-pricing", "stage" ] ],
| "columns": [ "time", "sequence_number", "value", "host", "service", "environment" ],
| "name": "svc-sku-pricing-publisher"
| }
|]
""".stripMargin
"createMetricInfoResponse" should "return a list of MetricInfo objects" in {
val data = influxClient.createMetricInfoResponse(SomeMetrics)
data.size should be(2)
val skuData = data.find(_.name == "svc-sku-pricing-publisher").get
skuData.tags.size should be(3)
skuData.tags.contains("environment") should be(true)
skuData.tags.contains("service") should be(true)
skuData.tags.contains("host") should be(true)
val alertsHistory = data.find(_.name == "alertsHistory").get
alertsHistory.tags.size should be(1)
alertsHistory.tags.contains("alert") should be(true)
}
}
| gilt/cave | core/src/test/scala/com/cave/metrics/data/influxdb/InfluxClientSpec.scala | Scala | mit | 6,975 |
package jauter
import org.scalatest._
class RoutingSpec extends FlatSpec with Matchers {
import StringMethodRouter.router
"A router" should "ignore slashes at both ends" in {
router.route(Method.GET, "articles").target should be ("index")
router.route(Method.GET, "/articles").target should be ("index")
router.route(Method.GET, "//articles").target should be ("index")
router.route(Method.GET, "articles/").target should be ("index")
router.route(Method.GET, "articles//").target should be ("index")
router.route(Method.GET, "/articles/").target should be ("index")
router.route(Method.GET, "//articles//").target should be ("index")
}
"A router" should "handle empty params" in {
val routed = router.route(Method.GET, "/articles")
routed.target should be ("index")
routed.params.size should be (0)
}
"A router" should "handle params" in {
val routed = router.route(Method.GET, "/articles/123")
routed.target should be ("show")
routed.params.size should be (1)
routed.params.get("id") should be ("123")
}
"A router" should "handle none" in {
val router = (new MethodlessRouter[String]).pattern("/articles", "index")
val routed = router.route("/noexist")
(routed == null) should be (true)
}
"A router" should "handle splat (wildcard)" in {
val routed = router.route(Method.GET, "/download/foo/bar.png")
routed.target should be ("download")
routed.params.size should be (1)
routed.params.get("*") should be ("foo/bar.png")
}
"A router" should "handle subclasses" in {
trait Action
class Index extends Action
class Show extends Action
val router = new MethodlessRouter[Class[_ <: Action]]
router.pattern("/articles", classOf[Index])
router.pattern("/articles/:id", classOf[Show])
val routed1 = router.route("/articles")
val routed2 = router.route("/articles/123")
routed1.target should be (classOf[Index])
routed2.target should be (classOf[Show])
}
"A router" should "handle order" in {
val routed1 = router.route(Method.GET, "/articles/new")
routed1.target should be ("new")
routed1.params.size should be (0)
val routed2 = router.route(Method.GET, "/articles/123")
routed2.target should be ("show")
routed2.params.size should be (1)
routed2.params.get("id") should be ("123")
val routed3 = router.route(Method.GET, "/notfound")
routed3.target should be ("404")
routed3.params.size should be (0)
}
"A router" should "handle any method" in {
val routed1 = router.route(Method.GET, "/any")
routed1.target should be ("any")
routed1.params.size should be (0)
val routed2 = router.route(Method.POST, "/any")
routed2.target should be ("any")
routed2.params.size should be (0)
}
"A router" should "handle remove by target" in {
val router = (new MethodlessRouter[String]).pattern("/articles", "index")
router.removeTarget("index")
val routed = router.route("/articles")
(routed == null) should be (true)
}
"A router" should "handle remove by path" in {
val router = (new MethodlessRouter[String]).pattern("/articles", "index")
router.removePath("/articles")
val routed = router.route("/articles")
(routed == null) should be (true)
}
}
| sinetja/jauter | src/test/scala/jauter/RoutingSpec.scala | Scala | mit | 3,390 |
package coltfred.jsongenerator
import scalaz.effect.IO
import scalaz._, Scalaz._
import com.nicta.rng.Rng
object Main extends scalaz.effect.SafeApp {
//implicit vals to allow closing of Source and the output stream.
implicit val resource = new scalaz.effect.Resource[scala.io.BufferedSource] { def close(b: scala.io.BufferedSource): IO[Unit] = IO(b.close) }
implicit val resource2 = new scalaz.effect.Resource[java.io.OutputStreamWriter] { def close(b: java.io.OutputStreamWriter): IO[Unit] = IO(b.close) }
private[this] final val Utf8Encoding = "utf-8"
private[this] val recordGenerator = new RngOutputRecordGenerator()
override def runl(args: List[String]): IO[Unit] = {
if (args.length != 3) {
IO.putStrLn("Usage: Main <input json> <output dir> <# of records>")
} else {
val inputFilename = args(0)
val outputFilename = args(1)
val numRecords = \\/.fromTryCatch(args(2).toInt).leftMap(ex => ParseError(ex.getMessage))
val eitherT = for {
recordCount <- EitherT(numRecords.point[IO])
input <- readFile(inputFilename)
//The values don't matter as long as they parse.
//Could be changed if we want to be more strict.
graph <- EitherT(Graph.fromJson[argonaut.Json](input).point[IO])
randomRecords = recordGenerator.generateValues[argonaut.Json](graph).fill(recordCount).run
outputs <- EitherT.right(randomRecords)
_ <- writeFile(outputFilename, outputs)
} yield ()
eitherT.run.flatMap(_.fold(printErrorAndExit, _.point[IO]))
}
}
def printErrorAndExit(error: Error): IO[Unit] = for {
_ <- IO.putStrLn(error.toString)
_ <- IO(sys.exit(1))
} yield ()
def readFile(filename: String): EitherT[IO, Error, String] = EitherT {
import scala.io.BufferedSource
IO(scala.io.Source.fromFile(filename, Utf8Encoding)).using { source: BufferedSource =>
IO(source.getLines.mkString("\\n"))
}.catchLeft.map(_.leftMap(FileReadError(_)))
}
def writeFile[A: argonaut.EncodeJson](outputFilename: String, actions: List[OutputRecord[A]]): EitherT[IO, Error, Unit] = EitherT {
import java.io._
import argonaut._, Argonaut._
def initFile(fileName: String, append: Boolean = false): OutputStreamWriter =
new OutputStreamWriter(new FileOutputStream(fileName, append), Utf8Encoding)
IO(initFile(outputFilename)).using { writer: OutputStreamWriter =>
IO(writer.write(actions.asJson.spaces2))
}.catchLeft.map(_.leftMap(FileWriteError(_)))
}
}
| coltfred/jsongenerator | src/main/scala/coltfred/jsongenerator/Main.scala | Scala | apache-2.0 | 2,520 |
package basics
object PartialApplication {
val add = (x: Int, y: Int) => x + y
val addPartial = add(2, _: Int)
def doAddPartial(x:Int) = addPartial(x)
} | szaqal/KitchenSink | Scala/01/src/main/scala/basics/PartialApplication.scala | Scala | gpl-3.0 | 167 |
package team16.euler
/**
* 10001st prime
* Problem 7
* By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
*
* What is the 10,001st prime number?
*/
import Stream._
object Problem007 extends App {
def isPrime(primes: List[Long], x: Long) = primes.find(p => x % p == 0L) == None
def addIfPrime(primes: List[Long], x: Long) = {
if(isPrime(primes, x)) primes :+ x
else primes
}
def addNextPrime(primes: List[Long], from: Long) = {
primes :+ ( iterate(from) { i => i + 1 } find(i => isPrime(primes, i)) get)
}
val n = 10001
var primes = List[Long](2)
while(primes.length < n) {
primes = addNextPrime(primes, primes.last)
}
println(primes last)
} | gatesy/euler-scala | src/team16/euler/Problem007.scala | Scala | gpl-2.0 | 789 |
/**
* Copyright 2013-2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.cascade.http.tests.matchers
import akka.actor.Actor
import org.specs2.matcher.{MatchResult, Expectable, Matcher}
import com.paypal.cascade.http.tests.actor.RefAndProbe
import scala.util.Try
/**
* this trait has specs2 matcher functionality for the [[com.paypal.cascade.http.tests.actor.RefAndProbe]] class. Mix it into your [[org.specs2.Specification]] (or trait/class inside your specification).
*
* Example usage:
*
* {{{
* class MySpec extends Specification { override def is = s2"""
* MySpec1 ${spec1}
* """
*
* implicit system = ActorSystem("hello-world")
*
* def mySpec1 = {
* val r = RefAndProbe(TestActorRef(new MyActor))
* //...
* system.stop(r.ref)
* r must beStopped
* }
* }
* }}}
*
*/
trait RefAndProbeMatchers {
/**
* the matcher for testing whether the [[akka.testkit.TestActorRef]] inside the [[com.paypal.cascade.http.tests.actor.RefAndProbe]] is stopped
* @tparam T the [[akka.actor.Actor]] that the [[akka.testkit.TestActorRef]] contains
*/
class RefAndProbeIsStopped[T <: Actor]() extends Matcher[RefAndProbe[T]] {
override def apply[S <: RefAndProbe[T]](r: Expectable[S]): MatchResult[S] = {
val refAndProbe = r.value
val res = Try(refAndProbe.probe.expectTerminated(refAndProbe.ref))
result(res.isSuccess, s"${refAndProbe.ref} has stopped", s"${refAndProbe.ref} is not stopped", r)
}
}
/**
* the matcher function to test whether the [[akka.testkit.TestActorRef]] inside a [[com.paypal.cascade.http.tests.actor.RefAndProbe]] is stopped
*
* Example usage:
*
* {{{
* val refAndProbe = RefAndProbe(TestActorRef(new MyActor))
* //do stuff with refAndProbe.ref
* ...
* //shut down refAndProbe.ref
* refAndProbe must beStopped
* }}}
* @tparam T the [[akka.actor.Actor]] that the [[akka.testkit.TestActorRef]] contains
* @return the new matcher.
*/
def beStopped[T <: Actor] = {
new RefAndProbeIsStopped[T]
}
}
| 2rs2ts/cascade | http/src/test/scala/com/paypal/cascade/http/tests/matchers/RefAndProbeMatchers.scala | Scala | apache-2.0 | 2,601 |
package net.batyuk.akkahttp.examples.api
import akka.http.marshallers.xml.ScalaXmlSupport
import akka.http.server.directives.AuthenticationDirectives._
import com.typesafe.config.{ ConfigFactory, Config }
import akka.actor.ActorSystem
import akka.pattern._
import scala.concurrent.duration._
import akka.util.Timeout
import akka.stream.FlowMaterializer
import akka.http.Http
import akka.http.server._
import net.batyuk.akkahttp.examples.core.PongActor
import scala.concurrent.duration.Duration
object TestServer extends App {
val testConf: Config = ConfigFactory.parseString("""
akka.loglevel = INFO
akka.log-dead-letters = off""")
implicit val system = ActorSystem("ServerTest", testConf)
import system.dispatcher
implicit val materializer = FlowMaterializer()
implicit val timeout = Timeout(5 seconds)
import ScalaXmlSupport._
import Directives._
def auth =
HttpBasicAuthenticator.provideUserName {
case p @ UserCredentials.Provided(name) ⇒ p.verifySecret(name + "-password")
case _ ⇒ false
}
val binding = Http().bind(interface = "localhost", port = 8080)
val materializedMap = binding startHandlingWith {
get {
path("") {
complete(index)
} ~ pathPrefix("static") {
getFromResourceDirectory("static/")
} ~
path("secure") {
HttpBasicAuthentication("My very secure site")(auth) { user ⇒
complete(<html><body>Hello <b>{ user }</b>. Access has been granted!</body></html>)
}
} ~
path("ping") {
complete("PONG!")
} ~
path("coreping") {
complete((system.actorOf(PongActor.props) ? "ping").mapTo[String])
} ~
path("crash") {
complete(sys.error("BOOM!"))
} ~
path("shutdown") {
shutdown
complete("SHUTDOWN")
}
}
}
def shutdown(): Unit = binding.unbind(materializedMap).onComplete(_ ⇒ system.shutdown())
lazy val index =
<html>
<body>
<h1>Say hello to <i>akka-http-core</i>!</h1>
<p>Defined resources:</p>
<ul>
<li><a href="/ping">/ping</a></li>
<li><a href="/coreping">/coreping</a> - ping from actor in another project</li>
<li><a href="/static/index.html">/static/index.html</a> - serving static content from the src/main/resources</li>
<li><a href="/secure">/secure</a> Use any username and '<username>-password' as credentials</li>
<li><a href="/crash">/crash</a></li>
<li><a href="/shutdown">/shutdown</a> - never do this in production :-)</li>
</ul>
</body>
</html>
}
| abatyuk/akka-http-examples | multiProject/api/src/main/scala/net/batyuk/akkahttp/examples/api/TestServer.scala | Scala | apache-2.0 | 2,699 |
/*
* Copyright 2006-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package mapper {
import _root_.java.sql.{Connection,PreparedStatement,ResultSet,Statement}
import _root_.net.liftweb.common._
/**
* JDBC Driver Abstraction base class. New driver types should extend this base
* class. New drivers should "register" in the companion object
* DriverType.calcDriver method.
*/
abstract class DriverType(val name : String) {
def binaryColumnType: String
def clobColumnType: String
def varcharColumnType(len : Int) : String = "VARCHAR(%d)".format(len)
def booleanColumnType: String
def dateTimeColumnType: String
def dateColumnType: String
def timeColumnType: String
def integerColumnType: String
def integerIndexColumnType: String
def enumColumnType: String
def longForeignKeyColumnType: String
def longIndexColumnType: String
def enumListColumnType: String
def longColumnType: String
def doubleColumnType: String
def supportsForeignKeys_? : Boolean = false
def createTablePostpend: String = ""
/**
* Whether this database supports LIMIT clause in SELECTs.
*/
def brokenLimit_? : Boolean = false
/**
* Whether the primary key has been defined by the index column.
*/
def pkDefinedByIndexColumn_? : Boolean = false
/**
* Maximum value of the LIMIT clause in SELECT.
*/
def maxSelectLimit : String = _root_.java.lang.Long.MAX_VALUE.toString
/**
* Performs an insert and optionally returns the ResultSet of the generated keys that were inserted. If no keys are
* specified, return the number of rows updated.
*
* @param conn A connection that the method can optionally use if it needs to execute ancillary statements
* @param query The prepared query string to use for the insert
* @param setter A function that will set the parameters on the prepared statement
* @param pkName Zero or more generated column names that need to be returned
*/
def performInsert [T](conn : SuperConnection, query : String, setter : PreparedStatement => Unit, tableName : String, genKeyNames : List[String])(handler : Either[ResultSet,Int] => T) : T =
genKeyNames match {
case Nil =>
DB.prepareStatement(query, conn) {
stmt =>
setter(stmt)
handler(Right(stmt.executeUpdate))
}
case pk =>
performInsertWithGenKeys(conn, query, setter, tableName, pk, handler)
}
/*
* Subclasses should override this method if they don't have proper getGeneratedKey support (JDBC3)
*/
protected def performInsertWithGenKeys [T](conn : SuperConnection, query : String, setter : PreparedStatement => Unit, tableName : String, genKeyNames : List[String], handler : Either[ResultSet,Int] => T) : T =
DB.prepareStatement(query, Statement.RETURN_GENERATED_KEYS, conn) {
stmt =>
setter(stmt)
stmt.executeUpdate
handler(Left(stmt.getGeneratedKeys))
}
/**
* Name of the default db schema. If not set, then the schema is assumed to
* equal the db user name.
*/
def defaultSchemaName : Box[String] = Empty
type TypeMapFunc = PartialFunction[Int,Int]
/**
* Allow the driver to do specific remapping of column types for cases
* where not all types are supported. Classes that want to do custom type
* mapping for columns should override the customColumnTypeMap method.
*/
def columnTypeMap : TypeMapFunc =
customColumnTypeMap orElse {
case x => x
}
/**
* Allows the Vendor-specific Driver to do custom type mapping for a particular
* column type.
*/
protected def customColumnTypeMap : TypeMapFunc = new TypeMapFunc {
def apply (in : Int) = -1
def isDefinedAt (in : Int) = false
}
/**
* This method can be overriden by DriverType impls to allow for custom setup
* of Primary Key Columns (creating sequeneces or special indices, for example).
* The List of commands will be executed in order.
*/
def primaryKeySetup(tableName : String, columnName : String) : List[String] = {
List("ALTER TABLE "+tableName+" ADD CONSTRAINT "+tableName+"_PK PRIMARY KEY("+columnName+")")
}
/** This defines the syntax for adding a column in an alter. This is
* used because some DBs (Oracle, for one) use slightly different syntax. */
def alterAddColumn = "ADD COLUMN"
}
object DriverType {
var calcDriver: Connection => DriverType = conn => {
val meta = conn.getMetaData
(meta.getDatabaseProductName,meta.getDatabaseMajorVersion,meta.getDatabaseMinorVersion) match {
case (DerbyDriver.name,_,_) => DerbyDriver
case (MySqlDriver.name,_,_) => MySqlDriver
case (PostgreSqlDriver.name, major, minor) if ((major == 8 && minor >= 2) || major > 8) => PostgreSqlDriver
case (PostgreSqlDriver.name, _, _) => PostgreSqlOldDriver
case (H2Driver.name,_,_) => H2Driver
case (SqlServerDriver.name,major,_) if major >= 9 => SqlServerDriver
case (SqlServerDriver.name,_,_) => SqlServerPre2005Driver
case (OracleDriver.name,_,_) => OracleDriver
case (MaxDbDriver.name,_,_) => MaxDbDriver
case x => throw new Exception(
"Lift mapper does not support JDBC driver %s.\n".format(x) +
"See http://wiki.liftweb.net/index.php/Category:Database for a list of supported databases.")
}
}
}
object DerbyDriver extends DriverType("Apache Derby") {
def binaryColumnType = "LONG VARCHAR FOR BIT DATA"
def booleanColumnType = "SMALLINT"
def clobColumnType = "LONG VARCHAR"
def dateTimeColumnType = "TIMESTAMP"
def dateColumnType = "DATE"
def timeColumnType = "TIME"
def integerColumnType = "INTEGER"
def integerIndexColumnType = "INTEGER NOT NULL GENERATED BY DEFAULT AS IDENTITY"
def enumColumnType = "BIGINT"
def longForeignKeyColumnType = "BIGINT"
def longIndexColumnType = "BIGINT NOT NULL GENERATED BY DEFAULT AS IDENTITY"
def enumListColumnType = "BIGINT"
def longColumnType = "BIGINT"
def doubleColumnType = "DOUBLE"
override def brokenLimit_? : Boolean = true
}
object MySqlDriver extends DriverType("MySQL") {
def binaryColumnType = "MEDIUMBLOB"
def clobColumnType = "LONGTEXT"
def booleanColumnType = "BOOLEAN"
def dateTimeColumnType = "DATETIME"
def dateColumnType = "DATE"
def timeColumnType = "TIME"
def integerColumnType = "INTEGER"
def integerIndexColumnType = "INTEGER NOT NULL AUTO_INCREMENT UNIQUE"
def enumColumnType = "BIGINT"
def longForeignKeyColumnType = "BIGINT UNSIGNED"
def longIndexColumnType = "BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE KEY"
def enumListColumnType = "BIGINT"
def longColumnType = "BIGINT"
def doubleColumnType = "DOUBLE"
override def createTablePostpend: String = " ENGINE = InnoDB "
}
object H2Driver extends DriverType("H2") {
def binaryColumnType = "BINARY"
def clobColumnType = "LONGVARCHAR"
def booleanColumnType = "BOOLEAN"
def dateTimeColumnType = "TIMESTAMP"
def dateColumnType = "DATE"
def timeColumnType = "TIME"
def integerColumnType = "INTEGER"
def integerIndexColumnType = "INTEGER NOT NULL AUTO_INCREMENT"
def enumColumnType = "BIGINT"
def longForeignKeyColumnType = "BIGINT"
def longIndexColumnType = "BIGINT NOT NULL AUTO_INCREMENT"
def enumListColumnType = "BIGINT"
def longColumnType = "BIGINT"
def doubleColumnType = "DOUBLE"
/**
* Whether the primary key has been defined by the index column.
* H2 creates primary key for a table, when AUTO_INCREMENT type
* is used. <--- NOT TRUE
* I went into the H2 console, created a table with auto_increment
* and was able to insert duplicate ids. Then I created it with
* AUTO_INCREMENT PRIMARY KEY and it did not allow it.
*/
override def pkDefinedByIndexColumn_? : Boolean = false //changed to false by nafg
override def maxSelectLimit = "0";
override def defaultSchemaName : Box[String] = Full("PUBLIC")
}
/**
* Provides some base definitions for PostgreSql databases.
*/
abstract class BasePostgreSQLDriver extends DriverType("PostgreSQL") {
def binaryColumnType = "BYTEA"
def clobColumnType = "TEXT"
def booleanColumnType = "BOOLEAN"
def dateTimeColumnType = "TIMESTAMP"
def dateColumnType = "DATE"
def timeColumnType = "TIME"
def integerColumnType = "INTEGER"
def integerIndexColumnType = "SERIAL"
def enumColumnType = "BIGINT"
def longForeignKeyColumnType = "BIGINT"
def longIndexColumnType = "BIGSERIAL"
def enumListColumnType = "BIGINT"
def longColumnType = "BIGINT"
def doubleColumnType = "DOUBLE PRECISION"
override def maxSelectLimit = "ALL"
/**
* "$user" schema is searched before "public", but it does not exist by default,
* so "public" is our default choice.
*/
override def defaultSchemaName : Box[String] = Full("public")
}
/**
* PostgreSql driver for versions 8.2 and up. Tested with:
*
* <ul>
* <li>8.3</li>
* </ul>
*/
object PostgreSqlDriver extends BasePostgreSQLDriver {
/* PostgreSQL doesn't support generated keys via the JDBC driver. Instead, we use the RETURNING clause on the insert.
* From: http://www.postgresql.org/docs/8.2/static/sql-insert.html
*/
override def performInsertWithGenKeys [T](conn : SuperConnection, query : String, setter : PreparedStatement => Unit, tableName : String, genKeyNames : List[String], handler : Either[ResultSet,Int] => T) : T =
DB.prepareStatement(query + " RETURNING " + genKeyNames.mkString(","), conn) {
stmt =>
setter(stmt)
handler(Left(stmt.executeQuery))
}
}
/**
* PostgreSql driver for versions 8.1 and earlier. Tested with
*
* <ul>
* <li>8.1</li>
* <li>8.0</li>
* </ul>
*
* Successfuly use of earlier versions should be reported to [email protected].
*/
object PostgreSqlOldDriver extends BasePostgreSQLDriver {
/* PostgreSQL doesn't support generated keys via the JDBC driver.
* Instead, we use the lastval() function to get the last inserted
* key from the DB.
*/
override def performInsertWithGenKeys [T](conn : SuperConnection, query : String, setter : PreparedStatement => Unit, tableName : String, genKeyNames : List[String], handler : Either[ResultSet,Int] => T) : T = {
DB.prepareStatement(query, conn) {
stmt =>
setter(stmt)
stmt.executeUpdate
}
val pkValueQuery = genKeyNames.map(String.format("currval('%s_%s_seq')", tableName, _)).mkString(", ")
DB.statement(conn) {
stmt =>
handler(Left(stmt.executeQuery("SELECT " + pkValueQuery)))
}
}
}
abstract class SqlServerBaseDriver extends DriverType("Microsoft SQL Server") {
def binaryColumnType = "IMAGE"
def booleanColumnType = "BIT"
override def varcharColumnType(len : Int) : String = "NVARCHAR(%d)".format(len)
def clobColumnType = "NTEXT"
def dateTimeColumnType = "DATETIME"
def dateColumnType = "DATE"
def timeColumnType = "TIME"
def integerColumnType = "INT"
def integerIndexColumnType = "INT IDENTITY NOT NULL"
def enumColumnType = "BIGINT"
def longForeignKeyColumnType = "BIGINT"
def longIndexColumnType = "BIGINT IDENTITY NOT NULL"
def enumListColumnType = "BIGINT"
def longColumnType = "BIGINT"
def doubleColumnType = "FLOAT"
override def defaultSchemaName : Box[String] = Full("dbo")
// Microsoft doesn't use "COLUMN" syntax when adding a column to a table
override def alterAddColumn = "ADD"
}
/**
* Microsoft SQL Server driver for versions 2000 and below
*/
object SqlServerPre2005Driver extends SqlServerBaseDriver
object SqlServerDriver extends SqlServerBaseDriver {
override def binaryColumnType = "VARBINARY(MAX)"
override def clobColumnType = "NVARCHAR(MAX)"
}
/**
* Driver for Oracle databases. Tested with:
*
* <ul>
* <li>Oracle XE 10.2.0.1</li>
* </ul>
*
* Other working install versions should be reported to [email protected].
*/
object OracleDriver extends DriverType("Oracle") {
def binaryColumnType = "LONG RAW"
def booleanColumnType = "NUMBER"
def clobColumnType = "CLOB"
def dateTimeColumnType = "TIMESTAMP"
/*
* It's unclear whether DATE would suffice here. The PL/SQL ref at
* http://download.oracle.com/docs/cd/B19306_01/java.102/b14355/apxref.htm
* seems to indicate that DATE and TIMESTAMP can both be used
* for java.sql.Date and java.sql.Time representations.
*/
def dateColumnType = "TIMESTAMP"
def timeColumnType = "TIMESTAMP"
def integerColumnType = "NUMBER"
def integerIndexColumnType = "NUMBER NOT NULL"
def enumColumnType = "NUMBER"
def longForeignKeyColumnType = "NUMBER"
def longIndexColumnType = "NUMBER NOT NULL"
def enumListColumnType = "NUMBER"
def longColumnType = "NUMBER"
def doubleColumnType = "NUMBER"
/**
* Whether this database supports LIMIT clause in SELECTs.
*/
override def brokenLimit_? : Boolean = true
import _root_.java.sql.Types
override def customColumnTypeMap = {
case Types.BOOLEAN => Types.INTEGER
}
override def primaryKeySetup(tableName : String, columnName : String) : List[String] = {
/*
* This trigger and sequence setup is taken from http://www.databaseanswers.org/sql_scripts/ora_sequence.htm
*/
super.primaryKeySetup(tableName, columnName) :::
List("CREATE SEQUENCE " + tableName + "_sequence START WITH 1 INCREMENT BY 1",
"CREATE OR REPLACE TRIGGER " + tableName + "_trigger BEFORE INSERT ON " + tableName + " " +
"FOR EACH ROW " +
"WHEN (new." + columnName + " is null) " +
"BEGIN " +
"SELECT " + tableName + "_sequence.nextval INTO :new." + columnName + " FROM DUAL; " +
"END;")
}
// Oracle supports returning generated keys only if we specify the names of the column(s) to return.
override def performInsertWithGenKeys [T](conn : SuperConnection, query : String, setter : PreparedStatement => Unit, tableName : String , genKeyNames : List[String], handler : Either[ResultSet,Int] => T) : T =
DB.prepareStatement(query, genKeyNames.toArray, conn) {
stmt =>
setter(stmt)
stmt.executeUpdate
handler(Left(stmt.getGeneratedKeys))
}
// Oracle doesn't use "COLUMN" syntax when adding a column to a table
override def alterAddColumn = "ADD"
}
object MaxDbDriver extends DriverType("MaxDB") {
def binaryColumnType = "BLOB"
def booleanColumnType = "BOOLEAN"
def clobColumnType = "CLOB"
def dateTimeColumnType = "TIMESTAMP"
def dateColumnType = "DATE"
def timeColumnType = "TIME"
def integerColumnType = "INTEGER"
def integerIndexColumnType = "FIXED(10) DEFAULT SERIAL"
def enumColumnType = "FIXED(38)"
def longForeignKeyColumnType = "FIXED(38)"
def longIndexColumnType = "FIXED(38) DEFAULT SERIAL"
def enumListColumnType = "FIXED(38)"
def longColumnType = "FIXED(38)"
def doubleColumnType = "FLOAT(38)"
}
}
}
| jeppenejsum/liftweb | framework/lift-persistence/lift-mapper/src/main/scala/net/liftweb/mapper/Driver.scala | Scala | apache-2.0 | 15,343 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.web.components
import scala.xml.NodeSeq
import scala.xml.Text
import com.normation.inventory.domain.NodeId
import com.normation.plugins.SnippetExtensionKey
import com.normation.plugins.SpringExtendableSnippet
import com.normation.rudder.authorization.Edit
import com.normation.rudder.authorization.Read
import com.normation.rudder.domain.nodes.NodeGroupId
import com.normation.rudder.domain.policies.DirectiveId
import com.normation.rudder.domain.policies.FullRuleTargetInfo
import com.normation.rudder.domain.policies.GroupTarget
import com.normation.rudder.domain.policies.Rule
import com.normation.rudder.domain.policies.RuleTarget
import com.normation.rudder.domain.policies.TargetUnion
import com.normation.rudder.domain.policies.TargetExclusion
import com.normation.rudder.domain.reports._
import com.normation.rudder.domain.workflows.ChangeRequestId
import com.normation.rudder.repository.FullActiveTechniqueCategory
import com.normation.rudder.repository.FullNodeGroupCategory
import com.normation.rudder.web.components.popup.RuleModificationValidationPopup
import com.normation.rudder.web.model.CurrentUser
import com.normation.rudder.web.model.FormTracker
import com.normation.rudder.web.model.WBTextAreaField
import com.normation.rudder.web.model.WBTextField
import com.normation.rudder.web.services.DisplayDirectiveTree
import com.normation.rudder.web.services.DisplayNodeGroupTree
import bootstrap.liftweb.RudderConfig
import net.liftweb.common._
import net.liftweb.http.DispatchSnippet
import net.liftweb.http.S
import net.liftweb.http.SHtml
import net.liftweb.http.SHtml.BasicElemAttr
import net.liftweb.http.SHtml.ElemAttr.pairToBasic
import net.liftweb.http.Templates
import net.liftweb.http.js.JE._
import net.liftweb.http.js.JsCmd
import net.liftweb.http.js.JsCmds._
import net.liftweb.json._
import net.liftweb.util.ClearClearable
import net.liftweb.util.Helpers
import net.liftweb.util.Helpers._
import com.normation.rudder.domain.nodes.NodeInfo
import org.joda.time.DateTime
import com.normation.rudder.web.model.WBSelectField
import com.normation.rudder.rule.category.RuleCategoryId
import com.normation.rudder.domain.policies.TargetExclusion
import com.normation.rudder.domain.policies.TargetComposition
import com.normation.rudder.web.services.CategoryHierarchyDisplayer
import com.normation.rudder.rule.category.RoRuleCategoryRepository
import com.normation.rudder.rule.category.RuleCategoryId
import com.normation.rudder.web.model.JsInitContextLinkUtil
object RuleEditForm {
/**
* This is part of component static initialization.
* Any page which contains (or may contains after an ajax request)
* that component have to add the result of that method in it.
*/
def staticInit:NodeSeq =
(for {
xml <- Templates("templates-hidden" :: "components" :: "ComponentRuleEditForm" :: Nil)
} yield {
chooseTemplate("component", "staticInit", xml)
}) openOr Nil
private def body =
(for {
xml <- Templates("templates-hidden" :: "components" :: "ComponentRuleEditForm" :: Nil)
} yield {
chooseTemplate("component", "body", xml)
}) openOr Nil
private def crForm =
(for {
xml <- Templates("templates-hidden" :: "components" :: "ComponentRuleEditForm" :: Nil)
} yield {
chooseTemplate("component", "form", xml)
}) openOr Nil
val htmlId_groupTree = "groupTree"
val htmlId_activeTechniquesTree = "userPiTree"
}
/**
* The form that handles Rule edition
* (not creation)
* - update name, description, etc
* - update parameters
*
* It handle save itself (TODO: see how to interact with the component parent)
*
* Parameters can not be null.
*
* Injection should not be used in components
* ( WHY ? I will try to see...)
*
*/
class RuleEditForm(
htmlId_rule : String //HTML id for the div around the form
, var rule : Rule //the Rule to edit
, workflowEnabled : Boolean
, changeMsgEnabled : Boolean
, onSuccessCallback : (Rule) => JsCmd = { (r : Rule) => Noop } //JS to execute on form success (update UI parts)
//there are call by name to have the context matching their execution when called,
, onFailureCallback : () => JsCmd = { () => Noop }
, onCloneCallback : (Rule) => JsCmd = { (r:Rule) => Noop }
) extends DispatchSnippet with SpringExtendableSnippet[RuleEditForm] with Loggable {
import RuleEditForm._
private[this] val htmlId_save = htmlId_rule + "Save"
private[this] val htmlId_EditZone = "editRuleZone"
private[this] val roRuleRepository = RudderConfig.roRuleRepository
private[this] val roCategoryRepository = RudderConfig.roRuleCategoryRepository
private[this] val userPropertyService = RudderConfig.userPropertyService
private[this] val categoryService = RudderConfig.ruleCategoryService
private[this] val roChangeRequestRepo = RudderConfig.roChangeRequestRepository
private[this] val categoryHierarchyDisplayer = RudderConfig.categoryHierarchyDisplayer
private[this] var ruleTarget = RuleTarget.merge(rule.targets)
private[this] var selectedDirectiveIds = rule.directiveIds
private[this] val getFullNodeGroupLib = RudderConfig.roNodeGroupRepository.getFullGroupLibrary _
private[this] val getFullDirectiveLib = RudderConfig.roDirectiveRepository.getFullDirectiveLibrary _
private[this] val getAllNodeInfos = RudderConfig.nodeInfoService.getAll _
private[this] val usedDirectiveIds = roRuleRepository.getAll().getOrElse(Seq()).flatMap { case r =>
r.directiveIds.map( id => (id -> r.id))
}.groupBy( _._1 ).mapValues( _.size).toSeq
//////////////////////////// public methods ////////////////////////////
val extendsAt = SnippetExtensionKey(classOf[RuleEditForm].getSimpleName)
def mainDispatch = Map(
"showForm" -> { _:NodeSeq =>
showForm() },
"showEditForm" -> { _:NodeSeq =>
showForm(1)}
)
private[this] def showForm(tab :Int = 0) : NodeSeq = {
(getFullNodeGroupLib(), getFullDirectiveLib(), getAllNodeInfos()) match {
case (Full(groupLib), Full(directiveLib), Full(nodeInfos)) =>
val form = {
if(CurrentUser.checkRights(Read("rule"))) {
val formContent = if (CurrentUser.checkRights(Edit("rule"))) {
showCrForm(groupLib, directiveLib)
} else {
<div>You have no rights to see rules details, please contact your administrator</div>
}
(
s"#${htmlId_EditZone} *" #> { (n:NodeSeq) => SHtml.ajaxForm(n) } andThen
ClearClearable &
"#ruleForm" #> formContent &
"#details" #> new RuleCompliance(rule,directiveLib, nodeInfos).display &
actionButtons()
).apply(body)
} else {
<div>You have no rights to see rules details, please contact your administrator</div>
}
}
def updateCompliance() = {
roRuleRepository.get(rule.id) match {
case Full(updatedrule) =>
new RuleCompliance(updatedrule,directiveLib, nodeInfos).display
case eb:EmptyBox =>
logger.error("could not get updated version of the Rule")
<div>Could not get updated version of the Rule, please </div>
}
}
val ruleComplianceTabAjax = SHtml.ajaxCall(JsRaw("'"+rule.id.value+"'"), (v:String) => Replace("details",updateCompliance()))._2.toJsCmd
form ++
Script(
OnLoad(JsRaw(
s"""$$( "#editRuleZone" ).tabs(); $$( "#editRuleZone" ).tabs('select', ${tab});"""
)) &
JsRaw(s"""
| $$("#editRuleZone").bind( "show", function(event, ui) {
| if(ui.panel.id== 'ruleComplianceTab') { ${ruleComplianceTabAjax}; }
| });
""".stripMargin('|')
)
)
case (a, b, c) =>
List(a,b,c).collect{ case eb: EmptyBox =>
val e = eb ?~! "An error happens when trying to get the node group library"
logger.error(e.messageChain)
<div class="error">{e.msg}</div>
}
}
}
private[this] def actionButtons () = {
"#removeAction *" #> {
SHtml.ajaxButton("Delete", () => onSubmitDelete(),("class","dangerButton"))
} &
"#desactivateAction *" #> {
val status = rule.isEnabledStatus ? "disable" | "enable"
SHtml.ajaxButton(status.capitalize, () => onSubmitDisable(status))
} &
"#clone" #> SHtml.ajaxButton(
{ Text("Clone") }
, { () => onCloneCallback(rule) }
, ("type", "button")
) &
"#save" #> saveButton
}
private[this] def showCrForm(groupLib: FullNodeGroupCategory, directiveLib: FullActiveTechniqueCategory) : NodeSeq = {
val maptarget = groupLib.allTargets.map{
case (gt,fg) => s" ${encJs(gt.target)} : ${encJs(fg.name)}"
}.toList.mkString("{",",","}")
val included = ruleTarget.includedTarget.targets
val excluded = ruleTarget.excludedTarget.targets
(
"#pendingChangeRequestNotification" #> { xml:NodeSeq =>
PendingChangeRequestDisplayer.checkByRule(xml, rule.id, workflowEnabled)
} &
//activation button: show disactivate if activated
"#disactivateButtonLabel" #> { if(rule.isEnabledStatus) "Disable" else "Enable" } &
"#nameField" #> crName.toForm_! &
"#categoryField" #> category.toForm_! &
"#shortDescriptionField" #> crShortDescription.toForm_! &
"#longDescriptionField" #> crLongDescription.toForm_! &
"#selectPiField" #> {
<div id={htmlId_activeTechniquesTree}>{
<ul>{
DisplayDirectiveTree.displayTree(
directiveLib = directiveLib
, usedDirectiveIds = usedDirectiveIds
, onClickCategory = None
, onClickTechnique = None
, onClickDirective = None
//filter techniques without directives, and categories without technique
, keepCategory = category => category.allDirectives.nonEmpty
, keepTechnique = technique => technique.directives.nonEmpty
, addEditLink = true
)
}</ul>
}</div> } &
"#selectGroupField" #> {
<div id={htmlId_groupTree}>
<ul>{DisplayNodeGroupTree.displayTree(
groupLib
, None
, Some( ( (_,target) => targetClick (target)))
, Map(
"include" -> includeRuleTarget _
, "exclude" -> excludeRuleTarget _
)
, included
, excluded
)}</ul>
</div> } &
"#notifications" #> updateAndDisplayNotifications
)(crForm) ++
Script(OnLoad(JsRaw("""
correctButtons();
""")))++ Script(
//a function to update the list of currently selected Directives in the tree
//and put the json string of ids in the hidden field.
JsCrVar("updateSelectedPis", AnonFunc(JsRaw("""
$('#selectedPis').val(JSON.stringify(
$.jstree._reference('#%s').get_selected().map(function(){
return this.id;
}).get()));""".format(htmlId_activeTechniquesTree)
))) &
OnLoad(
// Initialize angular part of page and group tree
JsRaw(s"""
angular.bootstrap('#groupManagement', ['groupManagement']);
var scope = angular.element($$("#GroupCtrl")).scope();
scope.$$apply(function(){
scope.init(${ruleTarget.toString()},${maptarget});
} );
buildGroupTree('#${htmlId_groupTree}','${S.contextPath}', [], 'on');"""
) &
//function to update list of PIs before submiting form
JsRaw(s"buildDirectiveTree('#${htmlId_activeTechniquesTree}', ${serializedirectiveIds(selectedDirectiveIds.toSeq)},'${S.contextPath}', -1);") &
After(TimeSpan(50), JsRaw("""createTooltip();"""))
)
)
}
/*
* from a list of PI ids, get a string.
* the format is a JSON array: [ "id1", "id2", ...]
*/
private[this] def serializedirectiveIds(ids:Seq[DirectiveId]) : String = {
implicit val formats = Serialization.formats(NoTypeHints)
Serialization.write(ids.map( "jsTree-" + _.value ))
}
private[this] def serializeTargets(targets:Seq[RuleTarget]) : String = {
implicit val formats = Serialization.formats(NoTypeHints)
Serialization.write(
targets.map { target =>
target match {
case GroupTarget(g) => "jsTree-" + g.value
case _ => "jsTree-" + target.target
}
}
)
}
private[this] def serializeTarget(target:RuleTarget) : String = {
target.toString()
}
/*
* from a JSON array: [ "id1", "id2", ...], get the list of
* Directive Ids.
* Never fails, but returned an empty list.
*/
private[this] def unserializedirectiveIds(ids:String) : Seq[DirectiveId] = {
implicit val formats = DefaultFormats
parse(ids).extract[List[String]].map( x => DirectiveId(x.replace("jsTree-","")) )
}
private[this] def unserializeTarget(target:String) = {
RuleTarget.unser(target).map {
case exclusionTarget : TargetExclusion => exclusionTarget
case t => RuleTarget.merge(Set(t))}.getOrElse(RuleTarget.merge(Set()))
}
////////////// Callbacks //////////////
private[this] def updateFormClientSide() : JsCmd = {
Replace(htmlId_EditZone, this.showForm(1) )
}
private[this] def onSuccess() : JsCmd = {
//MUST BE THIS WAY, because the parent may change some reference to JsNode
//and so, our AJAX could be broken
onSuccessCallback(rule) & updateFormClientSide() &
//show success popup
successPopup
}
private[this] def onFailure() : JsCmd = {
onFailureCallback() &
updateFormClientSide() &
JsRaw("""scrollToElement("notifications");""")
}
private[this] def onNothingToDo() : JsCmd = {
formTracker.addFormError(error("There are no modifications to save."))
onFailure()
}
/*
* Create the ajax save button
*/
private[this] def saveButton : NodeSeq = {
// add an hidden field to hold the list of selected directives
val save = SHtml.ajaxSubmit("Save", onSubmit _) % ("id" -> htmlId_save)
// update onclick to get the list of directives and groups in the hidden
// fields before submitting
val newOnclick = "updateSelectedPis(); " +
save.attributes.asAttrMap("onclick")
SHtml.hidden( { ids =>
selectedDirectiveIds = unserializedirectiveIds(ids).toSet
}, serializedirectiveIds(selectedDirectiveIds.toSeq)
) % ( "id" -> "selectedPis") ++
SHtml.hidden( { target =>
ruleTarget = unserializeTarget(target)
}, ruleTarget.target
) % ( "id" -> "selectedTargets") ++
save % ( "onclick" -> newOnclick)
}
private[this] def targetClick(targetInfo: FullRuleTargetInfo) : JsCmd = {
val target = targetInfo.target.target.target
JsRaw(s"""onClickTarget("${target}");""")
}
private[this] def includeRuleTarget(targetInfo: FullRuleTargetInfo) : JsCmd = {
val target = targetInfo.target.target.target
JsRaw(s"""includeTarget("${target}");""")
}
private[this] def excludeRuleTarget(targetInfo: FullRuleTargetInfo) : JsCmd = {
val target = targetInfo.target.target.target
JsRaw(s"""excludeTarget("${target}");""")
}
/////////////////////////////////////////////////////////////////////////
/////////////////////////////// Edit form ///////////////////////////////
/////////////////////////////////////////////////////////////////////////
///////////// fields for Rule settings ///////////////////
private[this] val crName = new WBTextField("Name", rule.name) {
override def setFilter = notNull _ :: trim _ :: Nil
override def className = "twoCol"
override def validations =
valMinLen(3, "The name must have at least 3 characters") _ :: Nil
}
private[this] val crShortDescription = {
new WBTextField("Short description", rule.shortDescription) {
override def className = "twoCol"
override def setFilter = notNull _ :: trim _ :: Nil
override val maxLen = 255
override def validations = Nil
}
}
private[this] val crLongDescription = {
new WBTextAreaField("Description", rule.longDescription.toString) {
override def setFilter = notNull _ :: trim _ :: Nil
override def className = "twoCol"
}
}
private[this] val categories = categoryHierarchyDisplayer.getRuleCategoryHierarchy(roCategoryRepository.getRootCategory.get, None)
private[this] val category =
new WBSelectField(
"Rule category"
, categories.map { case (id, name) => (id.value -> name)}
, rule.categoryId.value
) {
override def className = "twoCol"
}
private[this] val formTracker = new FormTracker(List(crName, crShortDescription, crLongDescription))
private[this] def error(msg:String) = <span class="error">{msg}</span>
private[this] def onSubmit() : JsCmd = {
if(formTracker.hasErrors) {
onFailure
} else { //try to save the rule
val newCr = rule.copy(
name = crName.is
, shortDescription = crShortDescription.is
, longDescription = crLongDescription.is
, targets = Set(ruleTarget)
, directiveIds = selectedDirectiveIds
, isEnabledStatus = rule.isEnabledStatus
, categoryId = RuleCategoryId(category.is)
)
if (newCr == rule) {
onNothingToDo()
} else {
displayConfirmationPopup("save", newCr)
}
}
}
//action must be 'enable' or 'disable'
private[this] def onSubmitDisable(action:String): JsCmd = {
displayConfirmationPopup(
action
, rule.copy(isEnabledStatus = action == "enable")
)
}
private[this] def onSubmitDelete(): JsCmd = {
displayConfirmationPopup(
"delete"
, rule
)
}
// Create the popup for workflow
private[this] def displayConfirmationPopup(
action : String
, newRule : Rule
) : JsCmd = {
// for the moment, we don't have creation from here
val optOriginal = Some(rule)
val popup = new RuleModificationValidationPopup(
newRule
, optOriginal
, action
, workflowEnabled
, cr => workflowCallBack(action)(cr)
, () => JsRaw("$.modal.close();") & onFailure
, parentFormTracker = Some(formTracker)
)
if((!changeMsgEnabled) && (!workflowEnabled)) {
popup.onSubmit
} else {
SetHtml("confirmUpdateActionDialog", popup.popupContent) &
JsRaw("""createPopup("confirmUpdateActionDialog")""")
}
}
private[this] def workflowCallBack(action:String)(returns : Either[Rule,ChangeRequestId]) : JsCmd = {
if ((!workflowEnabled) & (action == "delete")) {
JsRaw("$.modal.close();") & onSuccessCallback(rule) & SetHtml("editRuleZone",
<div id={htmlId_rule}>Rule '{rule.name}' successfully deleted</div>
)
} else {
returns match {
case Left(rule) => // ok, we've received a rule, do as before
this.rule = rule
JsRaw("$.modal.close();") & onSuccess
case Right(changeRequestId) => // oh, we have a change request, go to it
JsInitContextLinkUtil.redirectToChangeRequestLink(changeRequestId)
}
}
}
private[this] def updateAndDisplayNotifications : NodeSeq = {
val notifications = formTracker.formErrors
formTracker.cleanErrors
if(notifications.isEmpty) {
<div id="notifications" />
}
else {
val html =
<div id="notifications" class="notify">
<ul class="field_errors">{notifications.map( n => <li>{n}</li>) }</ul>
</div>
html
}
}
///////////// success pop-up ///////////////
private[this] def successPopup : JsCmd = {
def warning(warn : String) : NodeSeq = {
<div style="padding-top: 15px; clear:both">
<img src="/images/icWarn.png" alt="Warning!" height="25" width="25" class="warnicon"/>
<h4 style="float:left">{warn} No configuration policy will be deployed.</h4>
</div>
}
val content =
if ( ruleTarget.excludedTarget.targets.size + ruleTarget.includedTarget.targets.size== 0 ) {
if ( selectedDirectiveIds.size == 0 ) {
warning("This Rule is not applied to any Groups and does not have any Directives to apply.")
} else {
warning("This Rule is not applied to any Groups.")
}
} else {
if ( selectedDirectiveIds.size == 0 ) {
warning("This Rule does not have any Directives to apply.")
} else {
NodeSeq.Empty
} }
SetHtml("successDialogContent",content) &
JsRaw(""" callPopupWithTimeout(200, "successConfirmationDialog")""")
}
}
| Kegeruneku/rudder | rudder-web/src/main/scala/com/normation/rudder/web/components/RuleEditForm.scala | Scala | agpl-3.0 | 22,572 |
package org.scalafmt.rewrite
import org.scalafmt.rewrite.TokenPatch.{Add, Remove}
import scala.meta._
import scala.meta.tokens.Token
sealed abstract class Patch
abstract class TreePatch extends Patch
abstract class TokenPatch(val tok: Token, val newTok: String) extends TreePatch
object TokenPatch {
case class Remove(override val tok: Token) extends TokenPatch(tok, "")
def AddRight(tok: Token,
toAdd: String,
keepTok: Boolean = false): TokenPatch =
Add(tok, "", toAdd, keepTok)
def AddLeft(tok: Token,
toAdd: String,
keepTok: Boolean = false): TokenPatch =
Add(tok, toAdd, "", keepTok)
case class Add(override val tok: Token,
addLeft: String,
addRight: String,
keepTok: Boolean)
extends TokenPatch(tok,
s"""$addLeft${if (keepTok) tok else ""}$addRight""")
}
object Patch {
def merge(a: TokenPatch, b: TokenPatch): TokenPatch = (a, b) match {
case (add1: Add, add2: Add) =>
Add(add1.tok,
add1.addLeft + add2.addLeft,
add1.addRight + add2.addRight,
add1.keepTok && add2.keepTok)
case (_: Remove, add: Add) => add.copy(keepTok = false)
case (add: Add, _: Remove) => add.copy(keepTok = false)
case (rem: Remove, _: Remove) => rem
case _ =>
sys.error(s"""Can't merge token patches:
|1. $a
|2. $b""".stripMargin)
}
def apply(ast: Tree, patches: Seq[Patch])(implicit ctx: RewriteCtx): String = {
val input = ast.tokens
val tokenPatches = patches.collect { case e: TokenPatch => e }
val patchMap: Map[(Int, Int), String] =
(tokenPatches)
.groupBy(t => t.tok.start -> t.tok.end)
.mapValues(_.reduce(merge).newTok)
input.toIterator
.map(x => patchMap.getOrElse(x.start -> x.end, x.syntax))
.mkString
}
}
| Daxten/scalafmt | core/src/main/scala/org/scalafmt/rewrite/Patch.scala | Scala | apache-2.0 | 1,917 |
package com.microsoft.awt.components
import com.microsoft.awt.models.Group
import org.scalajs.angularjs.Service
import org.scalajs.angularjs.http.Http
import scala.concurrent.ExecutionContext
import scala.scalajs.js
/**
* Groups Service
* @author [email protected]
*/
class GroupService($http: Http) extends Service {
/**
* Creates a new group
* @param group the given [[Group group]]
* @return the newly created group
*/
def createGroup(group: Group) = $http.post[Group]("/api/group")
/**
* Retrieves a group by ID
* @param groupID the given group ID
* @return a promise of a [[com.microsoft.awt.models.Group group]]
*/
def getGroupByID(groupID: String) = $http.get[Group](s"/api/group/$groupID")
/**
* Retrieves all groups
* @param maxResults the maximum number of results to return
* @return a promise of an array of [[com.microsoft.awt.models.Group groups]]
*/
def getGroups(maxResults: Int = 20) = $http.get[js.Array[Group]](s"/api/groups?maxResults=$maxResults")
/**
* Retrieves all groups that do not contain a specific user
* @param userID the given member (user) ID
* @param maxResults the maximum number of results to return
* @return a promise of an array of [[com.microsoft.awt.models.Group groups]]
*/
def getGroupsExcludingUser(userID: String, maxResults: Int = 20) = {
$http.get[js.Array[Group]](s"/api/groups/user/$userID/nin?maxResults=$maxResults")
}
/**
* Retrieves all groups that are owned by or include a specific user
* @param userID the given member (user) ID
* @param maxResults the maximum number of results to return
* @return a promise of an array of [[com.microsoft.awt.models.Group groups]]
*/
def getGroupsIncludingOrOwnedByUser(userID: String, maxResults: Int = 20)(implicit ec: ExecutionContext) = {
$http.get[js.Array[Group]](s"/api/groups/user/$userID/all?maxResults=$maxResults")
}
/**
* Retrieves all groups that include a specific user
* @param userID the given member (user) ID
* @param maxResults the maximum number of results to return
* @return a promise of an array of [[com.microsoft.awt.models.Group groups]]
*/
def getGroupsIncludingUser(userID: String, maxResults: Int = 20) = {
$http.get[js.Array[Group]](s"/api/groups/user/$userID/in?maxResults=$maxResults")
}
/**
* Updates an existing group
* @param group the given [[Group group]]
* @return the updated group
*/
def updateGroup(group: Group) = $http.put[Group](s"/api/group/${group._id}")
}
| ldaniels528/awt | app-angularjs/src/main/scala/com/microsoft/awt/components/GroupService.scala | Scala | apache-2.0 | 2,601 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.cluster.sdv.generated
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util._
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonV3DataFormatConstants}
import org.apache.carbondata.core.util.CarbonProperties
/**
* Test Class for V3offheapvectorTestCase to verify all scenerios
*/
class V3offheapvectorTestCase extends QueryTest with BeforeAndAfterAll {
//Check query reponse for select * query with no filters
test("V3_01_Query_01_033", Include) {
dropTable("3lakh_uniqdata")
sql(s"""CREATE TABLE 3lakh_uniqdata (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128','include_dictionary'='BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,CUST_ID')""").collect
sql(s"""LOAD DATA INPATH '$resourcesPath/Data/3Lakh.csv' into table 3lakh_uniqdata OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1')""").collect
checkAnswer(s"""select count(*) from 3lakh_uniqdata""",
Seq(Row(300635)), "V3offheapvectorTestCase_V3_01_Query_01_033")
}
//Check query reponse where table is having > 10 columns as dimensions and all the columns are selected in the query
test("V3_01_Query_01_034", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1 from 3lakh_uniqdata)c""",
Seq(Row(300635)), "V3offheapvectorTestCase_V3_01_Query_01_034")
}
//Check query reponse when filter is having eq condition on 1st column and data is selected within a page
test("V3_01_Query_01_035", Include) {
checkAnswer(s"""select CUST_ID from 3lakh_uniqdata where cust_id = 35000""",
Seq(Row(35000)), "V3offheapvectorTestCase_V3_01_Query_01_035")
}
//Check query reponse when filter is having in condition on 1st column and data is selected within a page
test("V3_01_Query_01_036", Include) {
checkAnswer(s"""select CUST_ID from 3lakh_uniqdata where cust_id in (30000, 35000 ,37000)""",
Seq(Row(30000),Row(35000),Row(37000)), "V3offheapvectorTestCase_V3_01_Query_01_036")
}
//Check query reponse when filter is having range condition on 1st column and data is selected within a page
test("V3_01_Query_01_037", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata where cust_id between 59000 and 60000)c""",
Seq(Row(1001)), "V3offheapvectorTestCase_V3_01_Query_01_037")
}
//Check query reponse when filter is having range condition on 1st coluumn and data is selected within a pages - values just in the boundary of the page upper llimit - with offheap sort and vector reader
test("V3_01_Query_01_041", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata where cust_id between 59000 and 61000)c""",
Seq(Row(2001)), "V3offheapvectorTestCase_V3_01_Query_01_041")
}
//Check query reponse when filter is having in condition 1st column and data is selected across multiple pages - with no offheap sort and vector reader
test("V3_01_Query_01_042", Include) {
checkAnswer(s"""select CUST_ID from 3lakh_uniqdata where cust_id in (30000, 35000 ,37000, 69000,101000,133000,165000,197000,229000,261000,293000, 329622)""",
Seq(Row(133000),Row(165000),Row(197000),Row(30000),Row(229000),Row(261000),Row(35000),Row(37000),Row(293000),Row(329622),Row(69000),Row(101000)), "V3offheapvectorTestCase_V3_01_Query_01_042")
}
//Check query reponse when filter is having not between condition 1st column and data is selected across all pages - with offheap sort and vector reader
test("V3_01_Query_01_043", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata where cust_id not between 29001 and 329621)c""",
Seq(Row(3)), "V3offheapvectorTestCase_V3_01_Query_01_043")
}
//Check query reponse when filter is applied on on the 2nd column and data is selected across all pages -with no offheap sort and vector reader
test("V3_01_Query_01_044", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata where cust_name like 'CUST_NAME_2%')c""",
Seq(Row(110000)), "V3offheapvectorTestCase_V3_01_Query_01_044")
}
//Check query reponse when filter is having not like condition set on the 2nd columns and data is selected across all pages
test("V3_01_Query_01_045", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata where cust_name not like 'CUST_NAME_2%')c""",
Seq(Row(190635)), "V3offheapvectorTestCase_V3_01_Query_01_045")
}
//Check query reponse when filter is having > operator set on the 10th columns and data is selected within a page
test("V3_01_Query_01_046", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata where Double_COLUMN1 > 42000)b""",
Seq(Row(300624)), "V3offheapvectorTestCase_V3_01_Query_01_046")
}
//Check query reponse when filter is having like operator set on the 3rd columns and data is selected across all pages - with no offheap sort and vector reader
test("V3_01_Query_01_047", Include) {
checkAnswer(s"""select count(*) from (select ACTIVE_EMUI_VERSION from 3lakh_uniqdata where ACTIVE_EMUI_VERSION like 'ACTIVE_EMUI_VERSION_20%')c""",
Seq(Row(11000)), "V3offheapvectorTestCase_V3_01_Query_01_047")
}
//Check query reponse when filter condtion is put on all collumns connected through and operator and data is selected across from 1 page
test("V3_01_Query_01_048", Include) {
checkAnswer(s"""select count(*) from (select * from 3lakh_uniqdata where CUST_ID = 29000 and CUST_NAME = 'CUST_NAME_20000' and ACTIVE_EMUI_VERSION = 'ACTIVE_EMUI_VERSION_20000' and DOB = '04-10-2010 01:00' and DOJ = '04-10-2012 02:00' and BIGINT_COLUMN1 = 1.23372E+11 and BIGINT_COLUMN2 = -2.23E+11 and DECIMAL_COLUMN1 = 12345698901 and DECIMAL_COLUMN2 = 22345698901 and Double_COLUMN1 = 11234567490 and Double_COLUMN2 = -11234567490 and INTEGER_COLUMN1 = 20001)c""",
Seq(Row(0)), "V3offheapvectorTestCase_V3_01_Query_01_048")
}
//Check query reponse when filter condtion is put on all collumns connected through and and grouping operator and data is selected across from 1 page
test("V3_01_Query_01_050", Include) {
checkAnswer(s"""select count(*) from (select * from 3lakh_uniqdata where CUST_ID = 29000 and CUST_NAME = 'CUST_NAME_20000' and (ACTIVE_EMUI_VERSION = 'ACTIVE_EMUI_VERSION_20001' or DOB = '04-10-2010 01:00') and DOJ = '04-10-2012 02:00' and BIGINT_COLUMN1 = 1.23372E+11 and BIGINT_COLUMN2 = -2.23E+11 and DECIMAL_COLUMN1 = 12345698901 and DECIMAL_COLUMN2 = 22345698901 or Double_COLUMN1 = 11234567490 and ( Double_COLUMN2 = -11234567490 or INTEGER_COLUMN1 = 20003))c""",
Seq(Row(300623)), "V3offheapvectorTestCase_V3_01_Query_01_050")
}
//Check query reponse when filter condtion is 1st column and connected through OR condition and data is selected across multiple pages
test("V3_01_Query_01_051", Include) {
checkAnswer(s"""select CUST_NAME from 3lakh_uniqdata where CUST_ID = 29000 or CUST_ID = 60000 or CUST_ID = 100000 or CUST_ID = 130000""",
Seq(Row("CUST_NAME_121000"),Row("CUST_NAME_20000"),Row("CUST_NAME_51000"),Row("CUST_NAME_91000")), "V3offheapvectorTestCase_V3_01_Query_01_051")
}
//Check query reponse when filter condtion is put on all collumns connected through and/or operator and range is used and data is selected across multiple pages
test("V3_01_Query_01_052", Include) {
checkAnswer(s"""select count(*) from (select * from 3lakh_uniqdata where (CUST_ID >= 29000 and CUST_ID <= 60000) and CUST_NAME like 'CUST_NAME_20%' and ACTIVE_EMUI_VERSION = 'ACTIVE_EMUI_VERSION_20000' and DOB = '04-10-2010 01:00' and DOJ = '04-10-2012 02:00' and BIGINT_COLUMN1 = 1.23372E+11 and BIGINT_COLUMN2 = -2.23E+11 and DECIMAL_COLUMN1 = 12345698901 or DECIMAL_COLUMN2 = 22345698901 and Double_COLUMN1 = 11234567490 and (Double_COLUMN2 = -11234567490 or INTEGER_COLUMN1 = 20001))c""",
Seq(Row(1)), "V3offheapvectorTestCase_V3_01_Query_01_052")
}
//Check query reponse when 1st column select ed nd filter is applied and data is selected from 1 page
test("V3_01_Query_01_054", Include) {
checkAnswer(s"""select CUST_ID from 3lakh_uniqdata limit 10""",
Seq(Row(8999),Row(null),Row(null),Row(null),Row(null),Row(null),Row(null),Row(null),Row(null),Row(null)), "V3offheapvectorTestCase_V3_01_Query_01_054")
}
//Check query reponse when 2nd column select ed nd filter is applied and data is selected from 1 page
test("V3_01_Query_01_055", Include) {
checkAnswer(s"""select count(*) from (select CUST_NAME from 3lakh_uniqdata limit 30000)c""",
Seq(Row(30000)), "V3offheapvectorTestCase_V3_01_Query_01_055")
}
//Check query reponse when 4th column select ed nd filter is applied and data is selected from 1 page
test("V3_01_Query_01_056", Include) {
checkAnswer(s"""select count(*) from (select DOB from 3lakh_uniqdata limit 30000)c""",
Seq(Row(30000)), "V3offheapvectorTestCase_V3_01_Query_01_056")
}
//Check query reponse when 1st column select ed nd filter is applied and data is selected from 2 page
test("V3_01_Query_01_057", Include) {
checkAnswer(s"""select count(*) from (select CUST_ID from 3lakh_uniqdata limit 60000)c""",
Seq(Row(60000)), "V3offheapvectorTestCase_V3_01_Query_01_057")
}
//Check query reponse when 2nd column select ed nd filter is applied and data is selected from 2 page
test("V3_01_Query_01_058", Include) {
checkAnswer(s"""select count(*) from (select CUST_NAME from 3lakh_uniqdata limit 60000)c""",
Seq(Row(60000)), "V3offheapvectorTestCase_V3_01_Query_01_058")
}
//Check query reponse when 4th column selected nd filter is applied and data is selected from 2 page
test("V3_01_Query_01_059", Include) {
checkAnswer(s"""select count(*) from (select DOB from 3lakh_uniqdata limit 60000)c""",
Seq(Row(60000)), "V3offheapvectorTestCase_V3_01_Query_01_059")
}
//Check query reponse when 2nd column select ed nd with order by and data is selected from 1 page
test("V3_01_Query_01_060", Include) {
checkAnswer(s"""select cust_id from 3lakh_uniqdata order by CUST_NAME desc limit 10""",
Seq(Row(108999),Row(108998),Row(108997),Row(108996),Row(108995),Row(108994),Row(108993),Row(108992),Row(108991),Row(108990)), "V3offheapvectorTestCase_V3_01_Query_01_060")
}
//Check query reponse when temp table is used and multiple pages are scanned
test("V3_01_Query_01_061", Include) {
checkAnswer(s"""select count(*) from ( select a.cust_id from 3lakh_uniqdata a where a.cust_id in (select c.cust_id from 3lakh_uniqdata c where c.cust_name like 'CUST_NAME_2000%') and a.cust_id between 29000 and 60000)d""",
Seq(Row(10)), "V3offheapvectorTestCase_V3_01_Query_01_061")
}
//Check query reponse when aggregate table is used and multiple pages are scanned
test("V3_01_Query_01_062", Include) {
checkAnswer(s"""select substring(CUST_NAME,1,11),count(*) from 3lakh_uniqdata group by substring(CUST_NAME,1,11) having count(*) > 1""",
Seq(Row("CUST_NAME_4",10000),Row("CUST_NAME_1",100000),Row("CUST_NAME_8",10000),Row("CUST_NAME_6",10000),Row("CUST_NAME_2",110000),Row("CUST_NAME_5",10000),Row("CUST_NAME_7",10000),Row("CUST_NAME_9",10000),Row("",11),Row("CUST_NAME_3",30623)), "V3offheapvectorTestCase_V3_01_Query_01_062")
}
//Check query reponse when aggregate table is used along with filter condition and multiple pages are scanned
test("V3_01_Query_01_063", Include) {
checkAnswer(s"""select substring(CUST_NAME,1,11),count(*) from 3lakh_uniqdata where cust_id between 59000 and 160000 group by substring(CUST_NAME,1,11) having count(*) > 1""",
Seq(Row("CUST_NAME_1",51001),Row("CUST_NAME_8",10000),Row("CUST_NAME_6",10000),Row("CUST_NAME_5",10000),Row("CUST_NAME_7",10000),Row("CUST_NAME_9",10000)), "V3offheapvectorTestCase_V3_01_Query_01_063")
}
//Check query when table is having single column so that the records count per blocklet is > 120000, where query scan is done on single page
test("V3_01_Param_01_007", Include) {
sql(s"""CREATE TABLE 3lakh_uniqdata1 (CUST_NAME String) STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128')""").collect
sql(s"""insert into 3lakh_uniqdata1 select cust_name from 3lakh_uniqdata""").collect
checkAnswer(s"""select count(*) from (select CUST_NAME from 3lakh_uniqdata where cust_name like 'CUST_NAME_2000%')c""",
Seq(Row(110)), "V3offheapvectorTestCase_V3_01_Param_01_007")
}
//Check query when table is having single column so that the records count per blocklet is > 120000, where query scan is done across the pages in the blocklet
test("V3_01_Param_01_008", Include) {
checkAnswer(s"""select count(*) from (select CUST_NAME from 3lakh_uniqdata where cust_name like 'CUST_NAME_20%')c""",
Seq(Row(11000)), "V3offheapvectorTestCase_V3_01_Param_01_008")
}
//Check impact on load and query reading when larger value (1 lakh length) present in the column
ignore("V3_01_Stress_01_008", Include) {
sql(s"""create table t_carbn1c (name string) stored by 'carbondata' TBLPROPERTIES('table_blocksize'='128','include_dictionary'='name')""").collect
sql(s"""LOAD DATA INPATH '$resourcesPath/Data/1lakh.csv' into table t_carbn1c OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='name')""").collect
checkAnswer(s"""select count(*) from t_carbn1c""",
Seq(Row(1)), "V3offheapvectorTestCase_V3_01_Stress_01_008")
}
//Check impact on load and query reading when larger value (1 lakh length) present in the column when the column is measure
ignore("V3_01_Stress_01_009", Include) {
checkAnswer(s"""select substring(name,1,10) from t_carbn1c""",
Seq(Row("hellohowar")), "V3offheapvectorTestCase_V3_01_Stress_01_009")
}
//Check join query when the table is having v3 format
test("V3_01_Query_01_064", Include) {
dropTable("3lakh_uniqdata2")
sql(s"""CREATE TABLE 3lakh_uniqdata2 (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128','include_dictionary'='BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,CUST_ID')""").collect
sql(s"""LOAD DATA INPATH '$resourcesPath/Data/3Lakh.csv' into table 3lakh_uniqdata2 OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1')""").collect
checkAnswer(s"""select a.cust_id, b.cust_name from 3lakh_uniqdata a, 3lakh_uniqdata2 b where a.cust_id = b.cust_id and a.cust_name = b.cust_name and a.cust_id in (29000, 59000, 69000,15000,250000, 310000)""",
Seq(Row(29000,"CUST_NAME_20000"),Row(250000,"CUST_NAME_241000"),Row(310000,"CUST_NAME_301000"),Row(59000,"CUST_NAME_50000"),Row(69000,"CUST_NAME_60000")), "V3offheapvectorTestCase_V3_01_Query_01_064")
sql(s"""drop table 3lakh_uniqdata""").collect
sql(s"""drop table if exists 3lakh_uniqdata2""").collect
sql(s"""drop table if exists t_carbn1c""").collect
sql(s"""drop table if exists 3lakh_uniqdata1""").collect
}
val prop = CarbonProperties.getInstance()
val p1 = prop.getProperty("carbon.blockletgroup.size.in.mb", CarbonV3DataFormatConstants.BLOCKLET_SIZE_IN_MB_DEFAULT_VALUE)
val p2 = prop.getProperty("enable.offheap.sort", CarbonCommonConstants.ENABLE_OFFHEAP_SORT_DEFAULT)
val p3 = prop.getProperty("carbon.enable.vector.reader", CarbonCommonConstants.ENABLE_VECTOR_READER_DEFAULT)
val p4 = prop.getProperty("carbon.data.file.version", CarbonCommonConstants.CARBON_DATA_FILE_DEFAULT_VERSION)
val p5 = prop.getProperty("carbon.enable.auto.load.merge", CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
val p6 = prop.getProperty("carbon.compaction.level.threshold", CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
override protected def beforeAll() {
// Adding new properties
prop.addProperty("carbon.blockletgroup.size.in.mb", "16")
prop.addProperty("enable.offheap.sort", "true")
prop.addProperty("carbon.enable.vector.reader", "true")
prop.addProperty("carbon.data.file.version", "V3")
prop.addProperty("carbon.enable.auto.load.merge", "false")
prop.addProperty("carbon.compaction.level.threshold", "(2,2)")
}
override def afterAll: Unit = {
//Reverting to old
prop.addProperty("carbon.blockletgroup.size.in.mb", p1)
prop.addProperty("enable.offheap.sort", p2)
prop.addProperty("carbon.enable.vector.reader", p3)
prop.addProperty("carbon.data.file.version", p4)
prop.addProperty("carbon.enable.auto.load.merge", p5)
prop.addProperty("carbon.compaction.level.threshold", p6)
}
} | aniketadnaik/carbondataStreamIngest | integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/V3offheapvectorTestCase.scala | Scala | apache-2.0 | 18,580 |
package io.koff.lenses
import io.koff.model.{GeneralInfo, ProblemExample, User}
/**
* Example of usage of quicklens
*/
object QuickLensExample {
def main(args: Array[String]) {
/**
* Use of scalaz.Lens is quite difficult
* but if we are not afraid to use macros in a project we might use `quicklens` instead.
*
* You have already seen a simple example for `quicklens` so let's go deeper and see what else `quicklens` can do
*/
/**
* `Quicklens` has support of chain modifications which can be helpful if you want to change several fields at the same time
*/
{
import com.softwaremill.quicklens._
val user = ProblemExample.user
val updatedUser = user
.modify(_.generalInfo.siteInfo.userRating).using(_ + 1)
.modify(_.billInfo.addresses.each.isConfirmed).using(_ => true)
println(updatedUser.generalInfo.siteInfo.userRating)
println(updatedUser.billInfo.addresses)
}
/**
* It is also possible to create reusable lens as well as in scalaz.Lens
*/
{
import com.softwaremill.quicklens._
val userRatingLens = modify(_:User)(_.generalInfo.siteInfo.userRating).using _
val user = ProblemExample.user
val updatedUser1 = userRatingLens(user)(_ + 10)
val updatedUser2 = userRatingLens(user)(_ + 12)
println(updatedUser1.generalInfo.siteInfo.userRating)
println(updatedUser2.generalInfo.siteInfo.userRating)
}
/**
* Of course lens composition is also possible.
*/
{
import com.softwaremill.quicklens._
//create lens
val generalInfoLens = modify(_:User)(_.generalInfo)
val emailConfirmedLens = modify(_:GeneralInfo)(_.isEmailConfirmed)
val phoneConfirmedLens = modify(_:GeneralInfo)(_.isPhoneConfirmed)
//compose the lens
val confirmEmail = generalInfoLens.andThenModify(emailConfirmedLens)(_:User).using(_ => true)
val confirmPhone = generalInfoLens.andThenModify(phoneConfirmedLens)(_:User).using(_ => true)
val user = ProblemExample.user
//compose the functions in order to make both changes at once
val updatedUser = confirmEmail.andThen(confirmPhone)(user)
println(updatedUser.generalInfo.isEmailConfirmed)
println(updatedUser.generalInfo.isPhoneConfirmed)
}
}
}
| coffius/koffio-lenses | src/main/scala/io/koff/lenses/QuickLensExample.scala | Scala | mit | 2,335 |
package som
trait CustomHash {
def customHash(): Int
}
| cedricviaccoz/scala-native | benchmarks/src/main/scala/som/CustomHash.scala | Scala | bsd-3-clause | 58 |
package com.bigchange.mllib
import breeze.linalg.SparseVector
import org.apache.log4j.{Logger, Level}
import org.apache.spark.mllib.evaluation.{RankingMetrics, RegressionMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.mllib.linalg.{SparseVector => SV}
import breeze.linalg._
import org.apache.spark.mllib.recommendation.{MatrixFactorizationModel, ALS, Rating}
import org.jblas.DoubleMatrix
/**
* Created by C.J.YOU on 2016/3/21.
*/
object MoviesLensALS {
case class Params(
ratingsData: String = "J:\\\\github\\\\dataSet\\\\ml-1m\\\\ml-1m\\\\ratings.dat",
moviesData:String = "J:\\\\github\\\\dataSet\\\\ml-1m\\\\ml-1m\\\\movies.dat",
kryo: Boolean = false,
numIterations: Int = 10,
var lambda: Double = 0.1,
rank: Int = 20,
numUserBlocks: Int = -1,
numProductBlocks: Int = -1,
implicitPrefs: Boolean = false) extends AbstractParams[Params]
/** Compute RMSE (Root Mean Squared Error). */
def computeRmse(model: MatrixFactorizationModel, data: RDD[Rating], implicitPrefs: Boolean): Double = {
def mapPredictedRating(r: Double): Double = {
if (implicitPrefs) math.max(math.min(r, 1.0), 0.0) else r
}
val predictions: RDD[Rating] = model.predict(data.map(x => (x.user, x.product)))
val predictionsAndRatings = predictions.map{ x =>
((x.user, x.product), mapPredictedRating(x.rating))
}.join(data.map(x => ((x.user, x.product), x.rating))).values
math.sqrt(predictionsAndRatings.map(x => (x._1 - x._2) * (x._1 - x._2)).mean())
}
def main(args: Array[String]) {
val conf = new SparkConf ()
.setAppName (s"MovieLensALS with")
.setMaster("local")
val params = Params()
if (params.kryo) {
conf.registerKryoClasses (Array (classOf [scala.collection.mutable.BitSet], classOf [Rating]))
.set ("spark.kryoserializer.buffer", "8m")
}
val sc = new SparkContext (conf)
Logger.getRootLogger.setLevel (Level.WARN)
val implicitPrefs = params.implicitPrefs
/*
* MovieLens ratings are on a scale of 1-5:
* 5: Must see
* 4: Will enjoy
* 3: It's okay
* 2: Fairly bad
* 1: Awful
* So we should not recommend a movie if the predicted rating is less than 3.
* To map ratings to confidence scores, we use
* 5 -> 2.5, 4 -> 1.5, 3 -> 0.5, 2 -> -0.5, 1 -> -1.5. This mappings means unobserved
* entries are generally between It's okay and Fairly bad.
* The semantics of 0 in this expanded world of non-positive weights
* are "the same as never having interacted at all".
*/
val ratings = sc.textFile (params.ratingsData).map { line =>
val fields = line.split ("::")
if (implicitPrefs) {
Rating (fields (0).toInt, fields (1).toInt, fields (2).toDouble - 2.5)
} else {
Rating (fields (0).toInt, fields (1).toInt, fields (2).toDouble)
}
}.cache ()
val moviesMap = sc.textFile(params.moviesData).map{ line =>
val fields = line.split("::")
(fields(0).toInt,fields(1))
}.collectAsMap()
val numRatings = ratings.count ()
val numUsers = ratings.map (_.user).distinct ().count ()
val numMovies = ratings.map (_.product).distinct ().count ()
println (s"Got $numRatings ratings from $numUsers users on $numMovies movies.")
val splits = ratings.randomSplit (Array (0.8, 0.2))
val training = splits (0).cache ()
val test = if (params.implicitPrefs) {
/*
* 0 means "don't know" and positive values mean "confident that the prediction should be 1".
* Negative values means "confident that the prediction should be 0".
* We have in this case used some kind of weighted RMSE. The weight is the absolute value of
* the confidence. The error is the difference between prediction and either 1 or 0,
* depending on whether r is positive or negative.
*/
splits (1).map (x => Rating (x.user, x.product, if (x.rating > 0) 1.0 else 0.0))
} else {
splits (1)
}.cache ()
val numTraining = training.count ()
val numTest = test.count ()
println (s"Training: $numTraining, test: $numTest.")
ratings.unpersist (blocking = false)
var minRmse = 100.0
var bestLamda = 0.01
// for(i <- 1 to 10){
params.lambda = 0.1 // * i
// ALS model
val model = new ALS()
.setRank (params.rank) // 因子的个数,低阶近似矩阵中隐含特征的个数。合理取值(10-200)
.setIterations (params.numIterations) // 10次左右的迭代基本收敛
.setLambda (params.lambda) // 正则化参数,控制模型出现过拟合,参数应该让通过非样本的测试数据进行交叉验证来调整
.setImplicitPrefs (params.implicitPrefs)
.setUserBlocks (params.numUserBlocks) //
.setProductBlocks (params.numProductBlocks) //
.run (training)
//.save(sc,"F:\\\\datatest\\\\ai\\\\ALSModel")
// recommendation item for user (allow recommend item for user and user for item )
val userId = 22
val topK = 10
val topKItems = model.recommendProducts(userId,topK)
val topProduct = model.recommendProductsForUsers(10)
val topUser = model.recommendUsersForProducts(10)
// 用户和物品的因子向量
val userFeatures = model.userFeatures
val productFeatures = model.productFeatures
// check recommend item for user
val userRatingMoviesSize = ratings.keyBy(_.user).lookup(userId).size // userId 22 rated movies
// list recommend movies for userId
println(topK+" movies for user:"+userId)
topKItems.map(ratings => (moviesMap(ratings.product),ratings.rating)).foreach(println)
// evaluation RMSE
val rmse = computeRmse (model, test, params.implicitPrefs)
// println (s"$i -> Test RMSE = $rmse.")
if(rmse < minRmse) {
minRmse = rmse
// bestLamda = i
}
// }
println(s"the best model of lamda:$bestLamda,RMSE:$minRmse")
// 计算k值平均准确率
// 物品因子矩阵: 广播出去用于后续计算
val itemFactors = model.productFeatures.map{ case (id,factor) => factor }.collect()
val itemMatrix = new DoubleMatrix(itemFactors)
val imBroadcast = sc.broadcast(itemMatrix)
// 计算每个用户的推荐
val allRecs = model.userFeatures.map{ case(userID,array) =>
val userVector = new DoubleMatrix(array)
val scores = imBroadcast.value.mmul(userVector)
val sortedWithId = scores.data.zipWithIndex.sortBy(-_._1)
val recommendedIds = sortedWithId.map(_._2 + 1).toSeq // 物品id + 1 由于物品因子矩阵的编号为0开始
(userID,recommendedIds)
}
val userMovies = ratings.map{ case Rating(user,product,rating) => (user,product)}.groupBy(_._1)
val K = 10
val MAPK = allRecs.join(userMovies).map{ case(userID,(predicted,actualWithIds)) =>
val actual = actualWithIds.map(_._2).toSeq
avgPrecisionK(actual,predicted,K)
}.reduce(_+_) / allRecs.count // MAPK: 整个数据集上的平均准确率
println(s"Mean Average Precision at K =" + MAPK)
// MLlib 内置的评估函数 使用(RegressionMetrics,RankingMetrics)
val predictionAndActual = training.map{ x =>
val predicted = model.predict(x.user,x.product)
(predicted,x.rating)
}
val regressionMetrics = new RegressionMetrics(predictionAndActual)
println("Mean Squared Error = " + regressionMetrics.meanSquaredError)
println("Root Mean Squared Error = "+ regressionMetrics.rootMeanSquaredError)
// MAP (Mean Average Precision ) equal K 值比较大
val predictionAndActualForRanking = allRecs.join(userMovies).map{ case(userID,(predicted,actualWithIds)) =>
val actual = actualWithIds.map(_._2)
(predicted.toArray,actual.toArray)
}
val rankingMetrics = new RankingMetrics(predictionAndActualForRanking)
println(s"Mean Average Precision =" + rankingMetrics.meanAveragePrecision) // almost same while K = 实际物品的总数
// item to item
// 创建向量对象 jblas.DoubleMatrix
val itemId = 567
val itemFactor = model.productFeatures.lookup(itemId).head
val itemVector = new DoubleMatrix(itemFactor)
cosineSimilarity(itemVector,itemVector) // 1.0 : 自己与自己的相似度为1.0 totally same
// cal cosineSimilarity
// using DoubleMatrix
val sims = productFeatures.map{ case(id,factor) =>
val factorVector = new DoubleMatrix(factor)
val sim = cosineSimilarity(itemVector,factorVector)
(id,sim)
}
// using SparseVector to cal cosineSimilarity
val itemVector2 = Vector.apply(itemFactor)
val sv1 = itemVector2.asInstanceOf[SV]
val itemSparseVector = new SparseVector[Double](sv1.indices,sv1.values,sv1.size)
// cosineSimilarity cal method
val sims2 = productFeatures.map{ case (id,factor) =>
val factorVector = Vector.apply(factor)
val sv1 = factorVector.asInstanceOf[SV]
val factorSparseVector = new SparseVector[Double](sv1.indices,sv1.values,sv1.size)
val sim = itemSparseVector.dot(factorSparseVector) / (norm(itemSparseVector) * norm(factorSparseVector))
(id,sim)
}
val sortedSims = sims.top(topK)(Ordering.by[(Int,Double),Double]{ case (id,similarity) => similarity })
println(s"$itemId -> $topK simlarity movies:")
sortedSims.take(topK).map{ case (id,similarity) =>(moviesMap(id),similarity) }
.foreach(println)
sc.stop ()
}
// 相似度的衡量方法:皮尔森相关系数,实数向量的余弦相似度,二元向量的杰卡德相似系数,这里介绍余弦相似度
// 余弦相似度取值(-1 ~ 1):向量的点积与向量范数 或长度(L2-范数)的乘积的商
def cosineSimilarity(vector1:DoubleMatrix,vector2:DoubleMatrix):Double = {
vector1.dot(vector2) / (vector1.norm2 * vector2.norm2)
}
// 计算K值平均准确率:衡量查询所返回的前k个文档的平均相关性,实际与预测的比较
def avgPrecisionK(actual:Seq[Int],predicted:Seq[Int],k:Int):Double={
val predK = predicted.take(k)
var score = 0.0
var numHits = 0.0
for((p,i) <- predK.zipWithIndex){
if(actual.contains(p)){
numHits += 1.0
score += numHits / (i.toDouble + 1.0)
}
}
if(actual.isEmpty){
1.0
}else {
score / scala.math.min(actual.size,k).toDouble
}
}
}
| bigchange/AI | src/main/scala/com/bigchange/mllib/MoviesLensALS.scala | Scala | apache-2.0 | 10,652 |
package skinny.http
/**
* Request body from a text value.
*/
case class TextInput(textBody: String, charset: String = HTTP.DEFAULT_CHARSET)
/**
* No text input
*/
object NoTextInput extends TextInput(null, null)
| BlackPrincess/skinny-framework | http-client/src/main/scala/skinny/http/TextInput.scala | Scala | mit | 218 |
/**
* Copyright (c) 2014, MoonGene. All rights reserved.
*
* This source code is licensed under the GPL license found in the
* LICENSE_GPL file in the root directory of this source tree. An alternative
* commercial license is also available upon request.
*/
package com.moongene.models.track
import org.joda.time.DateTime
object ExitObj {
case class Exit( deviceId: String, //User's device ID as string
deviceBinId: Option[Array[Byte]], //User's device ID as byte array
version: String, //User defined application version
sessionLength: Long, //Session length in seconds
timestamp: DateTime, //User's timestamp in UTC
auth: Common.SysAuth, //App authorization information
ip: Option[String], //User's IP address
geoData: Option[Common.IPGeoData]) //User's geo data based on IP
}
| MoonGene/Analytics | src/gene/src/main/scala/com/moongene/models/track/ExitObj.scala | Scala | gpl-3.0 | 1,015 |
package org.apache.spark.ml.classification
import org.apache.spark.SparkException
import org.apache.spark.ml.knn.KNN.{RowWithVector, VectorWithNorm}
import org.apache.spark.ml.knn.{DistanceMetric, EuclideanDistanceMetric, KNNModel, KNNParams}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.util.{Identifiable, SchemaUtils}
import org.apache.spark.ml.{Model, Predictor}
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{ArrayType, DoubleType, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.mllib.rdd.MLPairRDDFunctions._
import scala.collection.mutable.ArrayBuffer
/**
* Brute-force kNN with k = 1
*/
class NaiveKNNClassifier(override val uid: String, val distanceMetric: DistanceMetric)
extends Predictor[Vector, NaiveKNNClassifier, NaiveKNNClassifierModel] {
def this() = this(Identifiable.randomUID("naiveknnc"), EuclideanDistanceMetric)
override def copy(extra: ParamMap): NaiveKNNClassifier = defaultCopy(extra)
override protected def train(dataset: Dataset[_]): NaiveKNNClassifierModel = {
// Extract columns from data. If dataset is persisted, do not persist oldDataset.
val instances = extractLabeledPoints(dataset).map {
case LabeledPoint(label: Double, features: Vector) => (label, features)
}
val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE
if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK)
val labelSummarizer = instances.treeAggregate(new MultiClassSummarizer)(
seqOp = (c, v) => (c, v) match {
case (labelSummarizer: MultiClassSummarizer, (label: Double, features: Vector)) =>
labelSummarizer.add(label)
},
combOp = (c1, c2) => (c1, c2) match {
case (classSummarizer1: MultiClassSummarizer, classSummarizer2: MultiClassSummarizer) =>
classSummarizer1.merge(classSummarizer2)
})
val histogram = labelSummarizer.histogram
val numInvalid = labelSummarizer.countInvalid
val numClasses = histogram.length
if (numInvalid != 0) {
val msg = s"Classification labels should be in {0 to ${numClasses - 1} " +
s"Found $numInvalid invalid labels."
logError(msg)
throw new SparkException(msg)
}
val points = instances.map{
case (label, features) => (label, new VectorWithNorm(features))
}
new NaiveKNNClassifierModel(uid, points, numClasses, distanceMetric)
}
}
class NaiveKNNClassifierModel(
override val uid: String,
val points: RDD[(Double, VectorWithNorm)],
val _numClasses: Int,
val distanceMetric: DistanceMetric) extends ProbabilisticClassificationModel[Vector, NaiveKNNClassifierModel] {
override def numClasses: Int = _numClasses
override def transform(dataset: Dataset[_]): DataFrame = {
import dataset.sparkSession.implicits._
val features = dataset.select($(featuresCol))
.map(r => new VectorWithNorm(r.getAs[Vector](0)))
val merged = features.rdd.zipWithUniqueId()
.cartesian(points)
.map {
case ((u, i), (label, v)) =>
val dist = distanceMetric.fastSquaredDistance(u, v)
(i, (dist, label))
}
.topByKey(1)(Ordering.by(e => -e._1))
.map{
case (id, labels) =>
val vector = new Array[Double](numClasses)
var i = 0
while (i < labels.length) {
vector(labels(i)._2.toInt) += 1
i += 1
}
val rawPrediction = Vectors.dense(vector)
lazy val probability = raw2probability(rawPrediction)
lazy val prediction = probability2prediction(probability)
val values = new ArrayBuffer[Any]
if ($(rawPredictionCol).nonEmpty) {
values.append(rawPrediction)
}
if ($(probabilityCol).nonEmpty) {
values.append(probability)
}
if ($(predictionCol).nonEmpty) {
values.append(prediction)
}
(id, values.toSeq)
}
dataset.sqlContext.createDataFrame(
dataset.toDF().rdd.zipWithUniqueId().map { case (row, i) => (i, row) }
.leftOuterJoin(merged) //make sure we don't lose any observations
.map {
case (i, (row, values)) => Row.fromSeq(row.toSeq ++ values.get)
},
transformSchema(dataset.schema)
)
}
override def transformSchema(schema: StructType): StructType = {
var transformed = schema
if ($(rawPredictionCol).nonEmpty) {
transformed = SchemaUtils.appendColumn(transformed, $(rawPredictionCol), new VectorUDT)
}
if ($(probabilityCol).nonEmpty) {
transformed = SchemaUtils.appendColumn(transformed, $(probabilityCol), new VectorUDT)
}
if ($(predictionCol).nonEmpty) {
transformed = SchemaUtils.appendColumn(transformed, $(predictionCol), DoubleType)
}
transformed
}
override def copy(extra: ParamMap): NaiveKNNClassifierModel = {
val copied = new NaiveKNNClassifierModel(uid, points, numClasses, distanceMetric)
copyValues(copied, extra).setParent(parent)
}
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
var i = 0
val size = dv.size
var sum = 0.0
while (i < size) {
sum += dv.values(i)
i += 1
}
i = 0
while (i < size) {
dv.values(i) /= sum
i += 1
}
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in KNNClassificationModel:" +
" raw2probabilitiesInPlace encountered SparseVector")
}
}
override def predictRaw(features: Vector): Vector = {
throw new SparkException("predictRaw function should not be called directly since kNN prediction is done in distributed fashion. Use transform instead.")
}
}
| saurfang/spark-knn | spark-knn-examples/src/main/scala/org/apache/spark/ml/classification/NaiveKNN.scala | Scala | apache-2.0 | 6,144 |
package linter.linters
import java.io.File
import linter.{Language, OutputGenerator}
import json_parser.Error
import scala.collection.mutable
import scala.io.Source
/**
* A linter that checks if a line goes over 80 characters
* <p> Inherits everything from BaseLinter </p>
*/
class LengthCheckerLinter(path: File, language: Language.Value) extends BaseLinter(path, language) {
private val mistakes = new mutable.MutableList[Error]
/**
* For every file given, checks whether any line is bigger than 80 characters
*
* @return The list of mistakes found in the file(s)
*/
override def parseFiles: Seq[Error] = {
fileList.filterNot(_.isDirectory).foreach(scanFile)
mistakes
}
private def scanFile(f: File): Unit = {
for ((line, index) <- Source.fromFile(f).getLines().zipWithIndex) {
if (line.length >= 80) {
OutputGenerator.addScore(0.1d)
mistakes += new Error("Line is over 80 characters", f.toString, index + 1, 0, "style")
}
}
}
}
| ke00n/alabno | backend/linter/src/main/scala/linter/linters/LengthCheckerLinter.scala | Scala | mit | 1,014 |
package org.apache.spark.examples.mllib
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.Matrices
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed.IndexedRow
import org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.distributed.CoordinateMatrix
import org.apache.spark.mllib.linalg.distributed.MatrixEntry
import org.apache.spark.mllib.linalg.distributed.BlockMatrix
/**
* 数据类型测试
*/
object DataTypes {
/**
* 稀疏矩阵:在矩阵中,若数值为0的元素数目远远多于非0元素的数目时,则称该矩阵为稀疏矩阵
* 密集矩阵:在矩阵中,若非0元素数目占大多数时,则称该矩阵为密集矩阵
*/
def main(args: Array[String]) {
val sparkConf = new SparkConf().setMaster("local[2]").setAppName("SparkHdfsLR")
val sc = new SparkContext(sparkConf)
/**创建本地向量**/
//本地向量(Local Vector)存储在单台机器上,索引采用0开始的整型表示,值采用Double类型的值表示
// Create a dense vector (1.0, 0.0, 3.0).
//密度矩阵,零值也存储
val dv: Vector = Vectors.dense(1.0, 0.0, 3.0)
// Create a sparse vector (1.0, 0.0, 3.0) by specifying its indices and values corresponding to nonzero entries.
//创建稀疏矩阵,指定元素的个数、索引及非零值,数组方式
val sv1: Vector = Vectors.sparse(3, Array(0, 2), Array(1.0, 3.0))
// Create a sparse vector (1.0, 0.0, 3.0) by specifying its nonzero entries.
// 创建稀疏矩阵,指定元素的个数、索引及非零值,采用序列方式
val sv2: Vector = Vectors.sparse(3, Seq((0, 1.0), (2, 3.0)))
/**含标签点**/
// Create a labeled point with a positive label and a dense feature vector.
//创建带有正标签和稠密特征向量的标记点
val pos = LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0))
// Create a labeled point with a negative label and a sparse feature vector.
//LabeledPoint标记点是局部向量,向量可以是密集型或者稀疏型,每个向量会关联了一个标签(label)
val neg = LabeledPoint(0.0, Vectors.sparse(3, Array(0, 2), Array(1.0, 3.0)))
//稀疏数据,MLlib可以读取以LibSVM格式存储的训练实例,每行代表一个含类标签的稀疏特征向量
//索引从1开始并且递增,加载被转换为从0开始
/**
* libSVM的数据格式
* <label> <index1>:<value1> <index2>:<value2> ...
* 其中<label>是训练数据集的目标值,对于分类,它是标识某类的整数(支持多个类);对于回归,是任意实数
* <index>是以1开始的整数,可以是不连续
* <value>为实数,也就是我们常说的自变量
*/
val examples: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
/**本地密集矩阵***/
// Create a dense matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0))
val dm: Matrix = Matrices.dense(3, 2, Array(1.0, 3.0, 5.0, 2.0, 4.0, 6.0))
/**本地稀疏矩阵***/
/**下列矩阵
1.0 0.0 4.0
0.0 3.0 5.0
2.0 0.0 6.0
如果采用稀疏矩阵存储的话,其存储信息包括按列的形式:
实际存储值: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]`,
矩阵元素对应的行索引:rowIndices=[0, 2, 1, 0, 1, 2]`
列起始位置索引: `colPointers=[0, 2, 3, 6]**/
// Create a sparse matrix ((9.0, 0.0), (0.0, 8.0), (0.0, 6.0))
val sm: Matrix = Matrices.sparse(3, 2, Array(0, 1, 3), Array(0, 2, 1), Array(9, 6, 8))
/**分布式矩阵**/
val rows: RDD[Vector] = null // an RDD of local vectors
// Create a RowMatrix from an RDD[Vector].
//行矩阵(RowMatrix)按行分布式存储,无行索引,底层支撑结构是多行数据组成的RDD,每行是一个局部向量
val mat: RowMatrix = new RowMatrix(rows)
// Get its size.
val m = mat.numRows()
val n = mat.numCols()
// QR decomposition
val qrResult = mat.tallSkinnyQR(true)
/**索引行分布式矩阵**/
//包涵行索引数据集信息
val rowsIndex: RDD[IndexedRow] = null // an RDD of indexed rows 索引行的RDD
// Create an IndexedRowMatrix from an RDD[IndexedRow].
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val matIndex: IndexedRowMatrix = new IndexedRowMatrix(rowsIndex)
// Get its size. 得到它的大小
val mIndex = matIndex.numRows()
val nIndex = matIndex.numCols()
// Drop its row indices. 下降行索引
val rowMat: RowMatrix = matIndex.toRowMatrix()
/***三元组矩阵*/
val entries: RDD[MatrixEntry] = null // an RDD of matrix entries 矩阵元素的RDD
// Create a CoordinateMatrix from an RDD[MatrixEntry].
//CoordinateMatrix常用于稀疏性比较高的计算中,MatrixEntry是一个 Tuple类型的元素,其中包含行、列和元素值
val matCoordinate: CoordinateMatrix = new CoordinateMatrix(entries)
// Get its size.
//得到它的大小
val mCoordinate = mat.numRows()
val nCoordinate = mat.numCols()
// Convert it to an IndexRowMatrix whose rows are sparse vectors.
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val indexedRowMatrix = matCoordinate.toIndexedRowMatrix()
/**BlockMatrix块矩阵**/
val coordMat: CoordinateMatrix = new CoordinateMatrix(entries)
val matA: BlockMatrix = coordMat.toBlockMatrix().cache()
// Validate whether the BlockMatrix is set up properly. Throws an Exception when it is not valid.
//验证是否正确设置属性,当它无效时抛出一个异常
// Nothing happens if it is valid.如果它是有效的,什么都不会发生
matA.validate()
// Calculate A^T A.
val ata = matA.transpose.multiply(matA)
}
}
| tophua/spark1.52 | examples/src/main/scala/org/apache/spark/examples/mllib/DataTypes.scala | Scala | apache-2.0 | 6,365 |
package dk.gp.cogp.testutils
import breeze.linalg.DenseMatrix
import breeze.linalg.DenseVector
import breeze.linalg._
import dk.gp.cogp.model.Task
object loadToyModelDataIncomplete {
def apply(): Array[Task] = {
val y0Filter = (x: Double) => (x < -7 || x > -3)
val y1Filter = (x: Double) => (x < 4 || x > 8)
val data = loadToyModelData(n = Int.MaxValue, y0Filter, y1Filter)
data
}
} | danielkorzekwa/bayes-scala-gp | src/test/scala/dk/gp/cogp/testutils/loadToyModelDataIncomplete.scala | Scala | bsd-2-clause | 407 |
package endpoints.testsuite
import endpoints.algebra
trait JsonFromCodecTestApi
extends algebra.Endpoints
with algebra.JsonEntitiesFromCodec {
implicit def userCodec: JsonCodec[User]
implicit def addressCodec: JsonCodec[Address]
val jsonCodecEndpoint = endpoint(
post(path / "user-json-codec", jsonRequest[User]),
jsonResponse[Address]
)
}
trait JsonFromCirceCodecTestApi
extends JsonFromCodecTestApi
with algebra.circe.JsonEntitiesFromCodec {
def userCodec = implicitly[JsonCodec[User]]
def addressCodec = implicitly[JsonCodec[Address]]
}
trait JsonFromPlayJsonCodecTestApi
extends JsonFromCodecTestApi
with algebra.playjson.JsonEntitiesFromCodec {
def userCodec = implicitly[JsonCodec[User]]
def addressCodec = implicitly[JsonCodec[Address]]
}
| Krever/endpoints | testsuite/testsuite/src/main/scala/endpoints/testsuite/JsonFromCodecTestApi.scala | Scala | mit | 799 |
/*
* Copyright (c) 2015-16 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.language.existentials
import scala.language.experimental.macros
import scala.annotation.{ StaticAnnotation, tailrec }
import scala.reflect.api.Universe
import scala.reflect.macros.{ blackbox, whitebox }
import ops.{ hlist, coproduct }
trait Generic1[F[_], FR[_[_]]] extends Serializable {
type R[t]
lazy val fr: FR[R] = mkFrr
def to[T](ft: F[T]): R[T]
def from[T](rt: R[T]): F[T]
def mkFrr: FR[R]
}
object Generic1 extends Generic10 {
type Aux[F[_], FR[_[_]], R0[_]] = Generic1[F, FR] { type R[t] = R0[t] }
implicit def apply[T[_], FR[_[_]]]: Generic1[T, FR] = macro Generic1Macros.mkGeneric1Impl[T, FR]
}
trait Generic10 {
implicit def mkGeneric10[T[_], U[_], FR[_[_], _[_]]]: Generic1[T, ({ type λ[t[_]] = FR[t, U] })#λ] =
macro Generic1Macros.mkGeneric1Impl[T, ({ type λ[t[_]] = FR[t, U] })#λ]
implicit def mkGeneric11[T[_], U[_], FR[_[_], _[_]]]: Generic1[T, ({ type λ[t[_]] = FR[U, t] })#λ] =
macro Generic1Macros.mkGeneric1Impl[T, ({ type λ[t[_]] = FR[U, t] })#λ]
}
trait IsHCons1[L[_], FH[_[_]], FT[_[_]]] extends Serializable {
type H[_]
type T[_] <: HList
lazy val fh: FH[H] = mkFhh
lazy val ft: FT[T] = mkFtt
def pack[A](u: (H[A], T[A])): L[A]
def unpack[A](p: L[A]): (H[A], T[A])
def mkFhh: FH[H]
def mkFtt: FT[T]
}
object IsHCons1 extends IsHCons10 {
type Aux[L[_], FH[_[_]], FT[_[_]], H0[_], T0[_] <: HList] = IsHCons1[L, FH, FT] { type H[t] = H0[t] ; type T[t] = T0[t] }
implicit def apply[L[_], FH[_[_]], FT[_[_]]]: IsHCons1[L, FH, FT] = macro IsHCons1Macros.mkIsHCons1Impl[L, FH, FT]
}
trait IsHCons10 {
implicit def mkIsHCons10[L[_], FH[_[_], _[_]], U[_], FT[_[_]]]: IsHCons1[L, ({ type λ[t[_]] = FH[t, U] })#λ, FT] =
macro IsHCons1Macros.mkIsHCons1Impl[L, ({ type λ[t[_]] = FH[t, U] })#λ, FT]
implicit def mkIsHCons11[L[_], FH[_[_], _[_]], U[_], FT[_[_]]]: IsHCons1[L, ({ type λ[t[_]] = FH[U, t] })#λ, FT] =
macro IsHCons1Macros.mkIsHCons1Impl[L, ({ type λ[t[_]] = FH[U, t] })#λ, FT]
implicit def mkIsHCons12[L[_], FH[_[_]], FT[_[_], _[_]], U[_]]: IsHCons1[L, FH, ({ type λ[t[_]] = FT[t, U] })#λ] =
macro IsHCons1Macros.mkIsHCons1Impl[L, FH, ({ type λ[t[_]] = FT[t, U] })#λ]
implicit def mkIsHCons13[L[_], FH[_[_]], FT[_[_], _[_]], U[_]]: IsHCons1[L, FH, ({ type λ[t[_]] = FT[U, t] })#λ] =
macro IsHCons1Macros.mkIsHCons1Impl[L, FH, ({ type λ[t[_]] = FT[U, t] })#λ]
}
trait IsCCons1[L[_], FH[_[_]], FT[_[_]]] extends Serializable {
type H[_]
type T[_] <: Coproduct
lazy val fh: FH[H] = mkFhh
lazy val ft: FT[T] = mkFtt
def pack[A](u: Either[H[A], T[A]]): L[A]
def unpack[A](p: L[A]): Either[H[A], T[A]]
def mkFhh: FH[H]
def mkFtt: FT[T]
}
object IsCCons1 extends IsCCons10 {
type Aux[L[_], FH[_[_]], FT[_[_]], H0[_], T0[_] <: Coproduct] = IsCCons1[L, FH, FT] { type H[t] = H0[t] ; type T[t] = T0[t] }
implicit def apply[L[_], FH[_[_]], FT[_[_]]]: IsCCons1[L, FH, FT] = macro IsCCons1Macros.mkIsCCons1Impl[L, FH, FT]
}
trait IsCCons10 {
implicit def mkIsCCons10[L[_], FH[_[_], _[_]], U[_], FT[_[_]]]: IsCCons1[L, ({ type λ[t[_]] = FH[t, U] })#λ, FT] =
macro IsCCons1Macros.mkIsCCons1Impl[L, ({ type λ[t[_]] = FH[t, U] })#λ, FT]
implicit def mkIsCCons11[L[_], FH[_[_], _[_]], U[_], FT[_[_]]]: IsCCons1[L, ({ type λ[t[_]] = FH[U, t] })#λ, FT] =
macro IsCCons1Macros.mkIsCCons1Impl[L, ({ type λ[t[_]] = FH[U, t] })#λ, FT]
implicit def mkIsCCons12[L[_], FH[_[_]], FT[_[_], _[_]], U[_]]: IsCCons1[L, FH, ({ type λ[t[_]] = FT[t, U] })#λ] =
macro IsCCons1Macros.mkIsCCons1Impl[L, FH, ({ type λ[t[_]] = FT[t, U] })#λ]
implicit def mkIsCCons13[L[_], FH[_[_]], FT[_[_], _[_]], U[_]]: IsCCons1[L, FH, ({ type λ[t[_]] = FT[U, t] })#λ] =
macro IsCCons1Macros.mkIsCCons1Impl[L, FH, ({ type λ[t[_]] = FT[U, t] })#λ]
}
trait Split1[L[_], FO[_[_]], FI[_[_]]] extends Serializable {
type O[_]
type I[_]
lazy val fo: FO[O] = mkFoo
lazy val fi: FI[I] = mkFii
def pack[T](u: O[I[T]]): L[T]
def unpack[T](p: L[T]): O[I[T]]
def mkFoo: FO[O]
def mkFii: FI[I]
}
object Split1 extends Split10 {
type Aux[L[_], FO[_[_]], FI[_[_]], O0[_], I0[_]] = Split1[L, FO, FI] { type O[T] = O0[T] ; type I[T] = I0[T] }
implicit def apply[L[_], FO[_[_]], FI[_[_]]]: Split1[L, FO, FI] = macro Split1Macros.mkSplit1Impl[L, FO, FI]
}
trait Split10 {
implicit def mkSplit10[L[_], FO[_[_], _[_]], U[_], FI[_[_]]]: Split1[L, ({ type λ[t[_]] = FO[t, U] })#λ, FI] =
macro Split1Macros.mkSplit1Impl[L, ({ type λ[t[_]] = FO[t, U] })#λ, FI]
implicit def mkSplit11[L[_], FO[_[_], _[_]], U[_], FI[_[_]]]: Split1[L, ({ type λ[t[_]] = FO[U, t] })#λ, FI] =
macro Split1Macros.mkSplit1Impl[L, ({ type λ[t[_]] = FO[U, t] })#λ, FI]
implicit def mkSplit12[L[_], FO[_[_]], FI[_[_], _[_]], U[_]]: Split1[L, FO, ({ type λ[t[_]] = FI[t, U] })#λ] =
macro Split1Macros.mkSplit1Impl[L, FO, ({ type λ[t[_]] = FI[t, U] })#λ]
implicit def mkSplit13[L[_], FO[_[_]], FI[_[_], _[_]], U[_]]: Split1[L, FO, ({ type λ[t[_]] = FI[U, t] })#λ] =
macro Split1Macros.mkSplit1Impl[L, FO, ({ type λ[t[_]] = FI[U, t] })#λ]
}
@macrocompat.bundle
class Generic1Macros(val c: whitebox.Context) extends CaseClassMacros {
import c.ImplicitCandidate
import c.universe._
import internal.constantType
import Flag._
def mkGeneric1Impl[T[_], FR[_[_]]](implicit tTag: WeakTypeTag[T[_]], frTag: WeakTypeTag[FR[Any]]): Tree = {
val tpe = tTag.tpe
val frTpe =
c.openImplicits.headOption match {
case Some(ImplicitCandidate(_, _, TypeRef(_, _, List(_, tpe)), _)) => tpe
case other => frTag.tpe.typeConstructor
}
if(isReprType1(tpe))
abort("No Generic1 instance available for HList or Coproduct")
if(isProduct1(tpe))
mkProductGeneric1(tpe, frTpe)
else
mkCoproductGeneric1(tpe, frTpe)
}
def mkProductGeneric1(tpe: Type, frTpe: Type): Tree = {
val ctorDtor = CtorDtor(tpe)
val (p, ts) = ctorDtor.binding
val to = cq""" $p => ${mkHListValue(ts)} """
val (rp, rts) = ctorDtor.reprBinding
val from = cq""" $rp => ${ctorDtor.construct(rts)} """
val nme = TypeName(c.freshName)
val reprTpt = reprTypTree1(tpe, nme)
val rnme = TypeName(c.freshName)
val clsName = TypeName(c.freshName("anon$"))
q"""
type Apply0[F[_], T] = F[T]
type Apply1[F[_[_]], T[_]] = F[T]
final class $clsName extends _root_.shapeless.Generic1[$tpe, $frTpe] {
type R[$nme] = $reprTpt
def mkFrr: Apply1[$frTpe, R] = _root_.shapeless.lazily[Apply1[$frTpe, R]]
def to[$nme](ft: Apply0[$tpe, $nme]): R[$nme] = (ft match { case $to }).asInstanceOf[R[$nme]]
def from[$nme](rt: R[$nme]): Apply0[$tpe, $nme] = rt match { case $from }
}
type $rnme[$nme] = $reprTpt
new $clsName(): _root_.shapeless.Generic1.Aux[$tpe, $frTpe, $rnme]
"""
}
def mkCoproductGeneric1(tpe: Type, frTpe: Type): Tree = {
def mkCoproductCases(tpe: Type, index: Int): CaseDef = {
val name = TermName(c.freshName("pat"))
val tc = tpe.typeConstructor
val params = tc.typeParams.map { _ => Bind(typeNames.WILDCARD, EmptyTree) }
val tpeTpt = AppliedTypeTree(mkAttributedRef(tc), params)
cq"$name: $tpeTpt => $index"
}
val nme = TypeName(c.freshName)
val reprTpt = reprTypTree1(tpe, nme)
val rnme = TypeName(c.freshName)
val to = {
val toCases = ctorsOf1(tpe) zip (Stream from 0) map (mkCoproductCases _).tupled
q"""_root_.shapeless.Coproduct.unsafeMkCoproduct((ft: Any) match { case ..$toCases }, ft).asInstanceOf[R[$nme]]"""
}
val clsName = TypeName(c.freshName("anon$"))
q"""
type Apply0[F[_], T] = F[T]
type Apply1[F[_[_]], T[_]] = F[T]
final class $clsName extends _root_.shapeless.Generic1[$tpe, $frTpe] {
type R[$nme] = $reprTpt
def mkFrr: Apply1[$frTpe, R] = _root_.shapeless.lazily[Apply1[$frTpe, R]]
def to[$nme](ft: Apply0[$tpe, $nme]): R[$nme] = $to
def from[$nme](rt: R[$nme]): Apply0[$tpe, $nme] = _root_.shapeless.Coproduct.unsafeGet(rt).asInstanceOf[Apply0[$tpe, $nme]]
}
type $rnme[$nme] = $reprTpt
new $clsName(): _root_.shapeless.Generic1.Aux[$tpe, $frTpe, $rnme]
"""
}
}
@macrocompat.bundle
class IsHCons1Macros(val c: whitebox.Context) extends IsCons1Macros {
import c.universe._
def mkIsHCons1Impl[L[_], FH[_[_]], FT[_[_]]]
(implicit lTag: WeakTypeTag[L[_]], fhTag: WeakTypeTag[FH[Any]], ftTag: WeakTypeTag[FT[Any]]): Tree =
mkIsCons1(lTag.tpe, fhTag.tpe.typeConstructor, ftTag.tpe.typeConstructor)
val isCons1TC: Tree = tq"_root_.shapeless.IsHCons1"
val consTpe: Type = hconsTpe
def mkPackUnpack(nme: TypeName, lTpt: Tree, hdTpt: Tree, tlTpt: Tree): (Tree, Tree) =
(
q"""
def pack[$nme](u: ($hdTpt, $tlTpt)): $lTpt = _root_.shapeless.::(u._1, u._2)
""",
q"""
def unpack[$nme](p: $lTpt): ($hdTpt, $tlTpt) = (p.head, p.tail)
"""
)
}
@macrocompat.bundle
class IsCCons1Macros(val c: whitebox.Context) extends IsCons1Macros {
import c.universe._
def mkIsCCons1Impl[L[_], FH[_[_]], FT[_[_]]]
(implicit lTag: WeakTypeTag[L[_]], fhTag: WeakTypeTag[FH[Any]], ftTag: WeakTypeTag[FT[Any]]): Tree =
mkIsCons1(lTag.tpe, fhTag.tpe.typeConstructor, ftTag.tpe.typeConstructor)
val isCons1TC: Tree = tq"_root_.shapeless.IsCCons1"
val consTpe: Type = cconsTpe
def mkPackUnpack(nme: TypeName, lTpt: Tree, hdTpt: Tree, tlTpt: Tree): (Tree, Tree) =
(
q"""
def pack[$nme](u: _root_.scala.Either[$hdTpt, $tlTpt]): $lTpt = u match {
case _root_.scala.Left(hd) => _root_.shapeless.Inl[$hdTpt, $tlTpt](hd)
case _root_.scala.Right(tl) => _root_.shapeless.Inr[$hdTpt, $tlTpt](tl)
}
""",
q"""
def unpack[$nme](p: $lTpt): _root_.scala.Either[$hdTpt, $tlTpt] = p match {
case _root_.shapeless.Inl(hd) => _root_.scala.Left[$hdTpt, $tlTpt](hd)
case _root_.shapeless.Inr(tl) => _root_.scala.Right[$hdTpt, $tlTpt](tl)
}
"""
)
}
@macrocompat.bundle
trait IsCons1Macros extends CaseClassMacros {
import c.ImplicitCandidate
import c.universe._
val isCons1TC: Tree
val consTpe: Type
def mkPackUnpack(nme: TypeName, lTpt: Tree, hdTpt: Tree, tlTpt: Tree): (Tree, Tree)
def mkIsCons1(lTpe: Type, fhTpe0: Type, ftTpe0: Type): Tree = {
val lParam = lTpe.typeParams.head
val lParamTpe = lParam.asType.toType
val lDealiasedTpe = appliedType(lTpe, lParamTpe).dealias
val (fhTpe, ftTpe) =
c.openImplicits.headOption match {
case Some(ImplicitCandidate(_, _, TypeRef(_, _, List(_, fh, ft)), _)) => (fh, ft)
case other => (fhTpe0, ftTpe0)
}
if(!(lDealiasedTpe.typeConstructor =:= consTpe))
abort("Not H/CCons")
val TypeRef(_, _, List(hd, tl)) = lDealiasedTpe
val lPoly = c.internal.polyType(List(lParam), lDealiasedTpe)
val hdPoly = c.internal.polyType(List(lParam), hd)
val tlPoly = c.internal.polyType(List(lParam), tl)
val nme = TypeName(c.freshName)
val lTpt = appliedTypTree1(lPoly, lParamTpe, nme)
val hdTpt = appliedTypTree1(hdPoly, lParamTpe, nme)
val tlTpt = appliedTypTree1(tlPoly, lParamTpe, nme)
val (pack, unpack) = mkPackUnpack(nme, lTpt, hdTpt, tlTpt)
q"""
type Apply0[F[_], T] = F[T]
type Apply1[F[_[_]], T[_]] = F[T]
new $isCons1TC[$lTpe, $fhTpe, $ftTpe] {
type H[$nme] = $hdTpt
type T[$nme] = $tlTpt
def mkFhh: Apply1[$fhTpe, H] = _root_.shapeless.lazily[Apply1[$fhTpe, H]]
def mkFtt: Apply1[$ftTpe, T] = _root_.shapeless.lazily[Apply1[$ftTpe, T]]
$pack
$unpack
}
"""
}
}
@macrocompat.bundle
class Split1Macros(val c: whitebox.Context) extends CaseClassMacros {
import c.ImplicitCandidate
import c.universe._
def mkSplit1Impl[L[_], FO[_[_]], FI[_[_]]]
(implicit lTag: WeakTypeTag[L[_]], foTag: WeakTypeTag[FO[Any]], fiTag: WeakTypeTag[FI[Any]]): Tree = {
val lTpe = lTag.tpe
val (foTpe, fiTpe) =
c.openImplicits.headOption match {
case Some(ImplicitCandidate(_, _, TypeRef(_, _, List(_, fo, fi)), _)) => (fo, fi)
case other => (foTag.tpe.typeConstructor, fiTag.tpe.typeConstructor)
}
if(isReprType1(lTpe))
abort("No Split1 instance available for HList or Coproduct")
val lParam = lTpe.typeParams.head
val lParamTpe = lParam.asType.toType
val lDealiasedTpe = appliedType(lTpe, lParamTpe).dealias
val nme = TypeName(c.freshName)
def balanced(args: List[Type]): Boolean =
args.find(_.contains(lParam)).map { pivot =>
!(pivot =:= lParamTpe) &&
args.forall { arg =>
arg =:= pivot || !arg.contains(lParam)
}
}.getOrElse(false)
val (oTpt, iTpt) =
lDealiasedTpe match {
case tpe @ TypeRef(pre, sym, args) if balanced(args) =>
val Some(pivot) = args.find(_.contains(lParam))
val oPoly = c.internal.polyType(List(lParam), appliedType(tpe.typeConstructor, args.map { arg => if(arg =:= pivot) lParamTpe else arg }))
val oTpt = appliedTypTree1(oPoly, lParamTpe, nme)
val iPoly = c.internal.polyType(List(lParam), pivot)
val iTpt = appliedTypTree1(iPoly, lParamTpe, nme)
(oTpt, iTpt)
case other =>
c.abort(c.enclosingPosition, s"Can't split $other into a non-trivial outer and inner type constructor")
}
val lPoly = c.internal.polyType(List(lParam), lDealiasedTpe)
val lTpt = appliedTypTree1(lPoly, lParamTpe, nme)
q"""
type Apply0[F[_], T] = F[T]
type Apply1[F[_[_]], T[_]] = F[T]
new _root_.shapeless.Split1[$lTpe, $foTpe, $fiTpe] {
type O[$nme] = $oTpt
type I[$nme] = $iTpt
def mkFoo: Apply1[$foTpe, O] = _root_.shapeless.lazily[Apply1[$foTpe, O]]
def mkFii: Apply1[$fiTpe, I] = _root_.shapeless.lazily[Apply1[$fiTpe, I]]
def pack[$nme](u: O[I[$nme]]): $lTpt = u
def unpack[$nme](p: $lTpt): O[I[$nme]] = p
}
"""
}
}
| rorygraves/perf_tester | corpus/shapeless/src/main/scala/shapeless/generic1.scala | Scala | apache-2.0 | 14,718 |
package me.archdev.restapi.utils.db
import com.zaxxer.hikari.{ HikariConfig, HikariDataSource }
class DatabaseConnector(jdbcUrl: String, dbUser: String, dbPassword: String) {
private val hikariDataSource = {
val hikariConfig = new HikariConfig()
hikariConfig.setJdbcUrl(jdbcUrl)
hikariConfig.setUsername(dbUser)
hikariConfig.setPassword(dbPassword)
new HikariDataSource(hikariConfig)
}
val profile = slick.jdbc.PostgresProfile
import profile.api._
val db = Database.forDataSource(hikariDataSource, None)
db.createSession()
}
| ArchDev/akka-http-rest | src/main/scala/me/archdev/restapi/utils/db/DatabaseConnector.scala | Scala | mit | 564 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package cqrs.command
/**
* A marker trait for commands handled by the aggregates on the write side.
*/
trait DomainCommand | cqrs-endeavour/cqrs-endeavour | cqrs-framework/src/main/scala/cqrs/command/DomainCommand.scala | Scala | mpl-2.0 | 331 |
/**
* Copyright (C) 2016 Verizon. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.verizon.bda.trapezium.dal.core.cassandra
import com.datastax.driver.core.{ResultSet, Cluster}
import com.datastax.driver.core.querybuilder.QueryBuilder
import com.verizon.bda.trapezium.dal.core.cassandra.utils.CassandraDAOUtils
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql.SQLContext
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.slf4j.LoggerFactory
import scala.collection.mutable.ListBuffer
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import scala.collection.JavaConverters._
/**
* Created by Faraz on 2/29/16.
* These are unit tests which uses embeded dummy Cassandra.
* The table used here like iprepuattion or ipreputation are just for testing
* and should not be confused
* with tables in netintel keyspace.
* extends SparkFunSuite with LocalSparkContext
* @deprecated
*/
class CassandraDAOUnitTest extends CassandraTestSuiteBase {
}
| Verizon/trapezium | cassandra-dao/src/test/scala/com/verizon/bda/trapezium/dal/core/cassandra/CassandraDAOUnitTest.scala | Scala | apache-2.0 | 1,540 |
package core.shapes.dim1
import core.main._, core.pieces._, core.shapes.dim0._, core.shapes.dim1._, core.shapes.dim2._, core.shapes.forces._
class LineSegmentImpl(p_start: Point, val p_end: Point, override val rawDir: Vector2) extends RayImpl(p_start,rawDir) with LineSegment {
override lazy val dir = rawDir.unit
lazy val length = p.distTo(p_end)
} | radiotech/FlatLand | src/core/shapes/dim1/LineSegmentImpl.scala | Scala | mit | 355 |
package lila.app
package templating
import lila.bookmark.Env.{ current => bookmarkEnv }
import lila.game.Game
import lila.user.User
trait BookmarkHelper {
def isBookmarked(game: Game, user: User): Boolean =
bookmarkEnv.api.bookmarked(game, user)
}
| r0k3/lila | app/templating/BookmarkHelper.scala | Scala | mit | 259 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import org.apache.spark.streaming.dstream.{InputDStream, ForEachDStream}
import org.apache.spark.streaming.util.ManualClock
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.SynchronizedBuffer
import scala.reflect.ClassTag
import java.io.{ObjectInputStream, IOException}
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
/**
* This is a input stream just for the testsuites. This is equivalent to a checkpointable,
* replayable, reliable message queue like Kafka. It requires a sequence as input, and
* returns the i_th element at the i_th batch unde manual clock.
*/
class TestInputStream[T: ClassTag](ssc_ : StreamingContext, input: Seq[Seq[T]], numPartitions: Int)
extends InputDStream[T](ssc_) {
def start() {}
def stop() {}
def compute(validTime: Time): Option[RDD[T]] = {
logInfo("Computing RDD for time " + validTime)
val index = ((validTime - zeroTime) / slideDuration - 1).toInt
val selectedInput = if (index < input.size) input(index) else Seq[T]()
// lets us test cases where RDDs are not created
if (selectedInput == null)
return None
val rdd = ssc.sc.makeRDD(selectedInput, numPartitions)
logInfo("Created RDD " + rdd.id + " with " + selectedInput)
Some(rdd)
}
}
/**
* This is a output stream just for the testsuites. All the output is collected into a
* ArrayBuffer. This buffer is wiped clean on being restored from checkpoint.
*
* The buffer contains a sequence of RDD's, each containing a sequence of items
*/
class TestOutputStream[T: ClassTag](parent: DStream[T],
val output: ArrayBuffer[Seq[T]] = ArrayBuffer[Seq[T]]())
extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
val collected = rdd.collect()
output += collected
}) {
// This is to clear the output buffer every it is read from a checkpoint
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream) {
ois.defaultReadObject()
output.clear()
}
}
/**
* This is a output stream just for the testsuites. All the output is collected into a
* ArrayBuffer. This buffer is wiped clean on being restored from checkpoint.
*
* The buffer contains a sequence of RDD's, each containing a sequence of partitions, each
* containing a sequence of items.
*/
class TestOutputStreamWithPartitions[T: ClassTag](parent: DStream[T],
val output: ArrayBuffer[Seq[Seq[T]]] = ArrayBuffer[Seq[Seq[T]]]())
extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
val collected = rdd.glom().collect().map(_.toSeq)
output += collected
}) {
// This is to clear the output buffer every it is read from a checkpoint
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream) {
ois.defaultReadObject()
output.clear()
}
def toTestOutputStream = new TestOutputStream[T](this.parent, this.output.map(_.flatten))
}
/**
* This is the base trait for Spark Streaming testsuites. This provides basic functionality
* to run user-defined set of input on user-defined stream operations, and verify the output.
*/
trait TestSuiteBase extends FunSuite with BeforeAndAfter with Logging {
// Name of the framework for Spark context
def framework = "TestSuiteBase"
// Master for Spark context
def master = "local[2]"
// Batch duration
def batchDuration = Seconds(1)
// Directory where the checkpoint data will be saved
def checkpointDir = "checkpoint"
// Number of partitions of the input parallel collections created for testing
def numInputPartitions = 2
// Maximum time to wait before the test times out
def maxWaitTimeMillis = 10000
// Whether to actually wait in real time before changing manual clock
def actuallyWait = false
/**
* Set up required DStreams to test the DStream operation using the two sequences
* of input collections.
*/
def setupStreams[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
numPartitions: Int = numInputPartitions
): StreamingContext = {
// Create StreamingContext
val ssc = new StreamingContext(master, framework, batchDuration)
if (checkpointDir != null) {
ssc.checkpoint(checkpointDir)
}
// Setup the stream computation
val inputStream = new TestInputStream(ssc, input, numPartitions)
val operatedStream = operation(inputStream)
val outputStream = new TestOutputStreamWithPartitions(operatedStream,
new ArrayBuffer[Seq[Seq[V]]] with SynchronizedBuffer[Seq[Seq[V]]])
ssc.registerInputStream(inputStream)
ssc.registerOutputStream(outputStream)
ssc
}
/**
* Set up required DStreams to test the binary operation using the sequence
* of input collections.
*/
def setupStreams[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W]
): StreamingContext = {
// Create StreamingContext
val ssc = new StreamingContext(master, framework, batchDuration)
if (checkpointDir != null) {
ssc.checkpoint(checkpointDir)
}
// Setup the stream computation
val inputStream1 = new TestInputStream(ssc, input1, numInputPartitions)
val inputStream2 = new TestInputStream(ssc, input2, numInputPartitions)
val operatedStream = operation(inputStream1, inputStream2)
val outputStream = new TestOutputStreamWithPartitions(operatedStream,
new ArrayBuffer[Seq[Seq[W]]] with SynchronizedBuffer[Seq[Seq[W]]])
ssc.registerInputStream(inputStream1)
ssc.registerInputStream(inputStream2)
ssc.registerOutputStream(outputStream)
ssc
}
/**
* Runs the streams set up in `ssc` on manual clock for `numBatches` batches and
* returns the collected output. It will wait until `numExpectedOutput` number of
* output data has been collected or timeout (set by `maxWaitTimeMillis`) is reached.
*
* Returns a sequence of items for each RDD.
*/
def runStreams[V: ClassTag](
ssc: StreamingContext,
numBatches: Int,
numExpectedOutput: Int
): Seq[Seq[V]] = {
// Flatten each RDD into a single Seq
runStreamsWithPartitions(ssc, numBatches, numExpectedOutput).map(_.flatten.toSeq)
}
/**
* Runs the streams set up in `ssc` on manual clock for `numBatches` batches and
* returns the collected output. It will wait until `numExpectedOutput` number of
* output data has been collected or timeout (set by `maxWaitTimeMillis`) is reached.
*
* Returns a sequence of RDD's. Each RDD is represented as several sequences of items, each
* representing one partition.
*/
def runStreamsWithPartitions[V: ClassTag](
ssc: StreamingContext,
numBatches: Int,
numExpectedOutput: Int
): Seq[Seq[Seq[V]]] = {
assert(numBatches > 0, "Number of batches to run stream computation is zero")
assert(numExpectedOutput > 0, "Number of expected outputs after " + numBatches + " is zero")
logInfo("numBatches = " + numBatches + ", numExpectedOutput = " + numExpectedOutput)
// Get the output buffer
val outputStream = ssc.graph.getOutputStreams.head.asInstanceOf[TestOutputStreamWithPartitions[V]]
val output = outputStream.output
try {
// Start computation
ssc.start()
// Advance manual clock
val clock = ssc.scheduler.clock.asInstanceOf[ManualClock]
logInfo("Manual clock before advancing = " + clock.time)
if (actuallyWait) {
for (i <- 1 to numBatches) {
logInfo("Actually waiting for " + batchDuration)
clock.addToTime(batchDuration.milliseconds)
Thread.sleep(batchDuration.milliseconds)
}
} else {
clock.addToTime(numBatches * batchDuration.milliseconds)
}
logInfo("Manual clock after advancing = " + clock.time)
// Wait until expected number of output items have been generated
val startTime = System.currentTimeMillis()
while (output.size < numExpectedOutput && System.currentTimeMillis() - startTime < maxWaitTimeMillis) {
logInfo("output.size = " + output.size + ", numExpectedOutput = " + numExpectedOutput)
Thread.sleep(100)
}
val timeTaken = System.currentTimeMillis() - startTime
assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms")
assert(output.size === numExpectedOutput, "Unexpected number of outputs generated")
Thread.sleep(500) // Give some time for the forgetting old RDDs to complete
} catch {
case e: Exception => {e.printStackTrace(); throw e}
} finally {
ssc.stop()
}
output
}
/**
* Verify whether the output values after running a DStream operation
* is same as the expected output values, by comparing the output
* collections either as lists (order matters) or sets (order does not matter)
*/
def verifyOutput[V: ClassTag](
output: Seq[Seq[V]],
expectedOutput: Seq[Seq[V]],
useSet: Boolean
) {
logInfo("--------------------------------")
logInfo("output.size = " + output.size)
logInfo("output")
output.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("expected output.size = " + expectedOutput.size)
logInfo("expected output")
expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("--------------------------------")
// Match the output with the expected output
assert(output.size === expectedOutput.size, "Number of outputs do not match")
for (i <- 0 until output.size) {
if (useSet) {
assert(output(i).toSet === expectedOutput(i).toSet)
} else {
assert(output(i).toList === expectedOutput(i).toList)
}
}
logInfo("Output verified successfully")
}
/**
* Test unary DStream operation with a list of inputs, with number of
* batches to run same as the number of expected output values
*/
def testOperation[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
useSet: Boolean = false
) {
testOperation[U, V](input, operation, expectedOutput, -1, useSet)
}
/**
* Test unary DStream operation with a list of inputs
* @param input Sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param numBatches Number of batches to run the operation for
* @param useSet Compare the output values with the expected output values
* as sets (order matters) or as lists (order does not matter)
*/
def testOperation[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
numBatches: Int,
useSet: Boolean
) {
val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size
val ssc = setupStreams[U, V](input, operation)
val output = runStreams[V](ssc, numBatches_, expectedOutput.size)
verifyOutput[V](output, expectedOutput, useSet)
}
/**
* Test binary DStream operation with two lists of inputs, with number of
* batches to run same as the number of expected output values
*/
def testOperation[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
useSet: Boolean
) {
testOperation[U, V, W](input1, input2, operation, expectedOutput, -1, useSet)
}
/**
* Test binary DStream operation with two lists of inputs
* @param input1 First sequence of input collections
* @param input2 Second sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param numBatches Number of batches to run the operation for
* @param useSet Compare the output values with the expected output values
* as sets (order matters) or as lists (order does not matter)
*/
def testOperation[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
numBatches: Int,
useSet: Boolean
) {
val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size
val ssc = setupStreams[U, V, W](input1, input2, operation)
val output = runStreams[W](ssc, numBatches_, expectedOutput.size)
verifyOutput[W](output, expectedOutput, useSet)
}
}
| mkolod/incubator-spark | streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala | Scala | apache-2.0 | 13,605 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog
package ragnarok
package test
object ArrayObjectSuite extends PerfTestSuite {
"array joining" := {
query("""
medals' := //summer_games/london_medals
medals'' := new medals'
medals'' ~ medals'
[medals', medals'', medals'] where medals'.Total = medals''.Total""")
}
"object joining" := {
query("""
medals' := //summer_games/london_medals
medals'' := new medals'
medals'' ~ medals'
{ a: medals', b: medals'', c: medals' } where medals'.Total = medals''.Total""")
}
}
| precog/platform | ragnarok/src/main/scala/com/precog/ragnarok/test/ArrayObjectSuite.scala | Scala | agpl-3.0 | 1,631 |
package org.lanyard.dist.disc
import org.scalatest.FunSpec
import org.scalatest.Matchers
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class DiscreteTest extends FunSpec with Matchers with GeneratorDrivenPropertyChecks {
describe( "The class for discrete types" ) {
sealed trait DNA
case object A extends DNA
case object C extends DNA
case object G extends DNA
case object T extends DNA
val dna = Discrete( A, C, G, T )
it( "can be constructed with variable size." ) {
dna.size should be( 4 )
}
it( "can be used to convert to integer." ) {
dna.asInt( A ) should be( 0 )
dna.asInt( C ) should be( 1 )
dna.asInt( G ) should be( 2 )
dna.asInt( T ) should be( 3 )
}
it( "can be used to convert from integer." ) {
dna.fromInt( 0 ) should be ( A )
dna.fromInt( 1 ) should be ( C )
dna.fromInt( 2 ) should be ( G )
dna.fromInt( 3 ) should be ( T )
}
}
}
| perian/Lanyard | src/test/scala/org/lanyard/dist/disc/DiscreteTest.scala | Scala | gpl-2.0 | 970 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import java.net.URI
import java.util.Locale
import java.util.concurrent.Callable
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
import com.google.common.cache.{Cache, CacheBuilder}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo, ImplicitCastInputTypes}
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParserInterface}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias, View}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.GLOBAL_TEMP_DATABASE
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
object SessionCatalog {
val DEFAULT_DATABASE = "default"
}
/**
* An internal catalog that is used by a Spark Session. This internal catalog serves as a
* proxy to the underlying metastore (e.g. Hive Metastore) and it also manages temporary
* views and functions of the Spark Session that it belongs to.
*
* This class must be thread-safe.
*/
class SessionCatalog(
externalCatalogBuilder: () => ExternalCatalog,
globalTempViewManagerBuilder: () => GlobalTempViewManager,
functionRegistry: FunctionRegistry,
conf: SQLConf,
hadoopConf: Configuration,
parser: ParserInterface,
functionResourceLoader: FunctionResourceLoader) extends Logging {
import SessionCatalog._
import CatalogTypes.TablePartitionSpec
// For testing only.
def this(
externalCatalog: ExternalCatalog,
functionRegistry: FunctionRegistry,
conf: SQLConf) {
this(
() => externalCatalog,
() => new GlobalTempViewManager(conf.getConf(GLOBAL_TEMP_DATABASE)),
functionRegistry,
conf,
new Configuration(),
new CatalystSqlParser(conf),
DummyFunctionResourceLoader)
}
// For testing only.
def this(externalCatalog: ExternalCatalog) {
this(
externalCatalog,
new SimpleFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
}
lazy val externalCatalog = externalCatalogBuilder()
lazy val globalTempViewManager = globalTempViewManagerBuilder()
/** List of temporary views, mapping from table name to their logical plan. */
@GuardedBy("this")
protected val tempViews = new mutable.HashMap[String, LogicalPlan]
// Note: we track current database here because certain operations do not explicitly
// specify the database (e.g. DROP TABLE my_table). In these cases we must first
// check whether the temporary view or function exists, then, if not, operate on
// the corresponding item in the current database.
@GuardedBy("this")
protected var currentDb: String = formatDatabaseName(DEFAULT_DATABASE)
private val validNameFormat = "([\\\\w_]+)".r
/**
* Checks if the given name conforms the Hive standard ("[a-zA-Z_0-9]+"),
* i.e. if this name only contains characters, numbers, and _.
*
* This method is intended to have the same behavior of
* org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName.
*/
private def validateName(name: String): Unit = {
if (!validNameFormat.pattern.matcher(name).matches()) {
throw new AnalysisException(s"`$name` is not a valid name for tables/databases. " +
"Valid names only contain alphabet characters, numbers and _.")
}
}
/**
* Format table name, taking into account case sensitivity.
*/
protected[this] def formatTableName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
/**
* Format database name, taking into account case sensitivity.
*/
protected[this] def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
private val tableRelationCache: Cache[QualifiedTableName, LogicalPlan] = {
val cacheSize = conf.tableRelationCacheSize
CacheBuilder.newBuilder().maximumSize(cacheSize).build[QualifiedTableName, LogicalPlan]()
}
/** This method provides a way to get a cached plan. */
def getCachedPlan(t: QualifiedTableName, c: Callable[LogicalPlan]): LogicalPlan = {
tableRelationCache.get(t, c)
}
/** This method provides a way to get a cached plan if the key exists. */
def getCachedTable(key: QualifiedTableName): LogicalPlan = {
tableRelationCache.getIfPresent(key)
}
/** This method provides a way to cache a plan. */
def cacheTable(t: QualifiedTableName, l: LogicalPlan): Unit = {
tableRelationCache.put(t, l)
}
/** This method provides a way to invalidate a cached plan. */
def invalidateCachedTable(key: QualifiedTableName): Unit = {
tableRelationCache.invalidate(key)
}
/** This method provides a way to invalidate all the cached plans. */
def invalidateAllCachedTables(): Unit = {
tableRelationCache.invalidateAll()
}
/**
* This method is used to make the given path qualified before we
* store this path in the underlying external catalog. So, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
private def makeQualifiedPath(path: URI): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(hadoopConf)
fs.makeQualified(hadoopPath).toUri
}
private def requireDbExists(db: String): Unit = {
if (!databaseExists(db)) {
throw new NoSuchDatabaseException(db)
}
}
private def requireTableExists(name: TableIdentifier): Unit = {
if (!tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new NoSuchTableException(db = db, table = name.table)
}
}
private def requireTableNotExists(name: TableIdentifier): Unit = {
if (tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new TableAlreadyExistsException(db = db, table = name.table)
}
}
// ----------------------------------------------------------------------------
// Databases
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// ----------------------------------------------------------------------------
def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot create a database with this name.")
}
validateName(dbName)
val qualifiedPath = makeQualifiedPath(dbDefinition.locationUri)
externalCatalog.createDatabase(
dbDefinition.copy(name = dbName, locationUri = qualifiedPath),
ignoreIfExists)
}
def dropDatabase(db: String, ignoreIfNotExists: Boolean, cascade: Boolean): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == DEFAULT_DATABASE) {
throw new AnalysisException(s"Can not drop default database")
}
if (cascade && databaseExists(dbName)) {
listTables(dbName).foreach { t =>
invalidateCachedTable(QualifiedTableName(dbName, t.table))
}
}
externalCatalog.dropDatabase(dbName, ignoreIfNotExists, cascade)
}
def alterDatabase(dbDefinition: CatalogDatabase): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
requireDbExists(dbName)
externalCatalog.alterDatabase(dbDefinition.copy(name = dbName))
}
def getDatabaseMetadata(db: String): CatalogDatabase = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
externalCatalog.getDatabase(dbName)
}
def databaseExists(db: String): Boolean = {
val dbName = formatDatabaseName(db)
externalCatalog.databaseExists(dbName)
}
def listDatabases(): Seq[String] = {
externalCatalog.listDatabases()
}
def listDatabases(pattern: String): Seq[String] = {
externalCatalog.listDatabases(pattern)
}
def getCurrentDatabase: String = synchronized { currentDb }
def setCurrentDatabase(db: String): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot use it as current database. To access global temporary views, you should " +
"use qualified name with the GLOBAL_TEMP_DATABASE, e.g. SELECT * FROM " +
s"${globalTempViewManager.database}.viewName.")
}
requireDbExists(dbName)
synchronized { currentDb = dbName }
}
/**
* Get the path for creating a non-default database when database location is not provided
* by users.
*/
def getDefaultDBPath(db: String): URI = {
val database = formatDatabaseName(db)
new Path(new Path(conf.warehousePath), database + ".db").toUri
}
// ----------------------------------------------------------------------------
// Tables
// ----------------------------------------------------------------------------
// There are two kinds of tables, temporary views and metastore tables.
// Temporary views are isolated across sessions and do not belong to any
// particular database. Metastore tables can be used across multiple
// sessions as their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// ----------------------------------------------------
// | Methods that interact with metastore tables only |
// ----------------------------------------------------
/**
* Create a metastore table in the database specified in `tableDefinition`.
* If no such database is specified, create it in the current database.
*/
def createTable(
tableDefinition: CatalogTable,
ignoreIfExists: Boolean,
validateLocation: Boolean = true): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
validateName(table)
val newTableDefinition = if (tableDefinition.storage.locationUri.isDefined
&& !tableDefinition.storage.locationUri.get.isAbsolute) {
// make the location of the table qualified.
val qualifiedTableLocation =
makeQualifiedPath(tableDefinition.storage.locationUri.get)
tableDefinition.copy(
storage = tableDefinition.storage.copy(locationUri = Some(qualifiedTableLocation)),
identifier = tableIdentifier)
} else {
tableDefinition.copy(identifier = tableIdentifier)
}
requireDbExists(db)
if (tableExists(newTableDefinition.identifier)) {
if (!ignoreIfExists) {
throw new TableAlreadyExistsException(db = db, table = table)
}
} else if (validateLocation) {
validateTableLocation(newTableDefinition)
}
externalCatalog.createTable(newTableDefinition, ignoreIfExists)
}
def validateTableLocation(table: CatalogTable): Unit = {
// SPARK-19724: the default location of a managed table should be non-existent or empty.
if (table.tableType == CatalogTableType.MANAGED &&
!conf.allowCreatingManagedTableUsingNonemptyLocation) {
val tableLocation =
new Path(table.storage.locationUri.getOrElse(defaultTablePath(table.identifier)))
val fs = tableLocation.getFileSystem(hadoopConf)
if (fs.exists(tableLocation) && fs.listStatus(tableLocation).nonEmpty) {
throw new AnalysisException(s"Can not create the managed table('${table.identifier}')" +
s". The associated location('${tableLocation.toString}') already exists.")
}
}
}
/**
* Alter the metadata of an existing metastore table identified by `tableDefinition`.
*
* If no database is specified in `tableDefinition`, assume the table is in the
* current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterTable(tableDefinition: CatalogTable): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
val newTableDefinition = tableDefinition.copy(identifier = tableIdentifier)
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTable(newTableDefinition)
}
/**
* Alter the data schema of a table identified by the provided table identifier. The new data
* schema should not have conflict column names with the existing partition columns, and should
* still contain all the existing data columns.
*
* @param identifier TableIdentifier
* @param newDataSchema Updated data schema to be used for the table
*/
def alterTableDataSchema(
identifier: TableIdentifier,
newDataSchema: StructType): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
val catalogTable = externalCatalog.getTable(db, table)
val oldDataSchema = catalogTable.dataSchema
// not supporting dropping columns yet
val nonExistentColumnNames =
oldDataSchema.map(_.name).filterNot(columnNameResolved(newDataSchema, _))
if (nonExistentColumnNames.nonEmpty) {
throw new AnalysisException(
s"""
|Some existing schema fields (${nonExistentColumnNames.mkString("[", ",", "]")}) are
|not present in the new schema. We don't support dropping columns yet.
""".stripMargin)
}
externalCatalog.alterTableDataSchema(db, table, newDataSchema)
}
private def columnNameResolved(schema: StructType, colName: String): Boolean = {
schema.fields.map(_.name).exists(conf.resolver(_, colName))
}
/**
* Alter Spark's statistics of an existing metastore table identified by the provided table
* identifier.
*/
def alterTableStats(identifier: TableIdentifier, newStats: Option[CatalogStatistics]): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTableStats(db, table, newStats)
// Invalidate the table relation cache
refreshTable(identifier)
}
/**
* Return whether a table/view with the specified name exists. If no database is specified, check
* with current database.
*/
def tableExists(name: TableIdentifier): Boolean = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
externalCatalog.tableExists(db, table)
}
/**
* Retrieve the metadata of an existing permanent table/view. If no database is specified,
* assume the table/view is in the current database.
*/
@throws[NoSuchDatabaseException]
@throws[NoSuchTableException]
def getTableMetadata(name: TableIdentifier): CatalogTable = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.getTable(db, table)
}
/**
* Retrieve all metadata of existing permanent tables/views. If no database is specified,
* assume the table/view is in the current database.
* Only the tables/views belong to the same database that can be retrieved are returned.
* For example, if none of the requested tables could be retrieved, an empty list is returned.
* There is no guarantee of ordering of the returned tables.
*/
@throws[NoSuchDatabaseException]
def getTablesByName(names: Seq[TableIdentifier]): Seq[CatalogTable] = {
if (names.nonEmpty) {
val dbs = names.map(_.database.getOrElse(getCurrentDatabase))
if (dbs.distinct.size != 1) {
val tables = names.map(name => formatTableName(name.table))
val qualifiedTableNames = dbs.zip(tables).map { case (d, t) => QualifiedTableName(d, t)}
throw new AnalysisException(
s"Only the tables/views belong to the same database can be retrieved. Querying " +
s"tables/views are $qualifiedTableNames"
)
}
val db = formatDatabaseName(dbs.head)
requireDbExists(db)
val tables = names.map(name => formatTableName(name.table))
externalCatalog.getTablesByName(db, tables)
} else {
Seq.empty
}
}
/**
* Load files stored in given path into an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadTable(
name: TableIdentifier,
loadPath: String,
isOverwrite: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.loadTable(db, table, loadPath, isOverwrite, isSrcLocal)
}
/**
* Load files stored in given path into the partition of an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadPartition(
name: TableIdentifier,
loadPath: String,
spec: TablePartitionSpec,
isOverwrite: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.loadPartition(
db, table, loadPath, spec, isOverwrite, inheritTableSpecs, isSrcLocal)
}
def defaultTablePath(tableIdent: TableIdentifier): URI = {
val dbName = formatDatabaseName(tableIdent.database.getOrElse(getCurrentDatabase))
val dbLocation = getDatabaseMetadata(dbName).locationUri
new Path(new Path(dbLocation), formatTableName(tableIdent.table)).toUri
}
// ----------------------------------------------
// | Methods that interact with temp views only |
// ----------------------------------------------
/**
* Create a local temporary view.
*/
def createTempView(
name: String,
tableDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = synchronized {
val table = formatTableName(name)
if (tempViews.contains(table) && !overrideIfExists) {
throw new TempTableAlreadyExistsException(name)
}
tempViews.put(table, tableDefinition)
}
/**
* Create a global temporary view.
*/
def createGlobalTempView(
name: String,
viewDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = {
globalTempViewManager.create(formatTableName(name), viewDefinition, overrideIfExists)
}
/**
* Alter the definition of a local/global temp view matching the given name, returns true if a
* temp view is matched and altered, false otherwise.
*/
def alterTempViewDefinition(
name: TableIdentifier,
viewDefinition: LogicalPlan): Boolean = synchronized {
val viewName = formatTableName(name.table)
if (name.database.isEmpty) {
if (tempViews.contains(viewName)) {
createTempView(viewName, viewDefinition, overrideIfExists = true)
true
} else {
false
}
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.update(viewName, viewDefinition)
} else {
false
}
}
/**
* Return a local temporary view exactly as it was stored.
*/
def getTempView(name: String): Option[LogicalPlan] = synchronized {
tempViews.get(formatTableName(name))
}
/**
* Return a global temporary view exactly as it was stored.
*/
def getGlobalTempView(name: String): Option[LogicalPlan] = {
globalTempViewManager.get(formatTableName(name))
}
/**
* Drop a local temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropTempView(name: String): Boolean = synchronized {
tempViews.remove(formatTableName(name)).isDefined
}
/**
* Drop a global temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropGlobalTempView(name: String): Boolean = {
globalTempViewManager.remove(formatTableName(name))
}
// -------------------------------------------------------------
// | Methods that interact with temporary and metastore tables |
// -------------------------------------------------------------
/**
* Retrieve the metadata of an existing temporary view or permanent table/view.
*
* If a database is specified in `name`, this will return the metadata of table/view in that
* database.
* If no database is specified, this will first attempt to get the metadata of a temporary view
* with the same name, then, if that does not exist, return the metadata of table/view in the
* current database.
*/
def getTempViewOrPermanentTableMetadata(name: TableIdentifier): CatalogTable = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
getTempView(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(getTableMetadata(name))
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table, Some(globalTempViewManager.database)),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(throw new NoSuchTableException(globalTempViewManager.database, table))
} else {
getTableMetadata(name)
}
}
/**
* Rename a table.
*
* If a database is specified in `oldName`, this will rename the table in that database.
* If no database is specified, this will first attempt to rename a temporary view with
* the same name, then, if that does not exist, rename the table in the current database.
*
* This assumes the database specified in `newName` matches the one in `oldName`.
*/
def renameTable(oldName: TableIdentifier, newName: TableIdentifier): Unit = synchronized {
val db = formatDatabaseName(oldName.database.getOrElse(currentDb))
newName.database.map(formatDatabaseName).foreach { newDb =>
if (db != newDb) {
throw new AnalysisException(
s"RENAME TABLE source and destination databases do not match: '$db' != '$newDb'")
}
}
val oldTableName = formatTableName(oldName.table)
val newTableName = formatTableName(newName.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.rename(oldTableName, newTableName)
} else {
requireDbExists(db)
if (oldName.database.isDefined || !tempViews.contains(oldTableName)) {
requireTableExists(TableIdentifier(oldTableName, Some(db)))
requireTableNotExists(TableIdentifier(newTableName, Some(db)))
validateName(newTableName)
validateNewLocationOfRename(oldName, newName)
externalCatalog.renameTable(db, oldTableName, newTableName)
} else {
if (newName.database.isDefined) {
throw new AnalysisException(
s"RENAME TEMPORARY VIEW from '$oldName' to '$newName': cannot specify database " +
s"name '${newName.database.get}' in the destination table")
}
if (tempViews.contains(newTableName)) {
throw new AnalysisException(s"RENAME TEMPORARY VIEW from '$oldName' to '$newName': " +
"destination table already exists")
}
val table = tempViews(oldTableName)
tempViews.remove(oldTableName)
tempViews.put(newTableName, table)
}
}
}
/**
* Drop a table.
*
* If a database is specified in `name`, this will drop the table from that database.
* If no database is specified, this will first attempt to drop a temporary view with
* the same name, then, if that does not exist, drop the table from the current database.
*/
def dropTable(
name: TableIdentifier,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
val viewExists = globalTempViewManager.remove(table)
if (!viewExists && !ignoreIfNotExists) {
throw new NoSuchTableException(globalTempViewManager.database, table)
}
} else {
if (name.database.isDefined || !tempViews.contains(table)) {
requireDbExists(db)
// When ignoreIfNotExists is false, no exception is issued when the table does not exist.
// Instead, log it as an error message.
if (tableExists(TableIdentifier(table, Option(db)))) {
externalCatalog.dropTable(db, table, ignoreIfNotExists = true, purge = purge)
} else if (!ignoreIfNotExists) {
throw new NoSuchTableException(db = db, table = table)
}
} else {
tempViews.remove(table)
}
}
}
/**
* Return a [[LogicalPlan]] that represents the given table or view.
*
* If a database is specified in `name`, this will return the table/view from that database.
* If no database is specified, this will first attempt to return a temporary view with
* the same name, then, if that does not exist, return the table/view from the current database.
*
* Note that, the global temp view database is also valid here, this will return the global temp
* view matching the given name.
*
* If the relation is a view, we generate a [[View]] operator from the view description, and
* wrap the logical plan in a [[SubqueryAlias]] which will track the name of the view.
* [[SubqueryAlias]] will also keep track of the name and database(optional) of the table/view
*
* @param name The name of the table/view that we look up.
*/
def lookupRelation(name: TableIdentifier): LogicalPlan = {
synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.get(table).map { viewDef =>
SubqueryAlias(table, db, viewDef)
}.getOrElse(throw new NoSuchTableException(db, table))
} else if (name.database.isDefined || !tempViews.contains(table)) {
val metadata = externalCatalog.getTable(db, table)
if (metadata.tableType == CatalogTableType.VIEW) {
val viewText = metadata.viewText.getOrElse(sys.error("Invalid view without text."))
logDebug(s"'$viewText' will be used for the view($table).")
// The relation is a view, so we wrap the relation by:
// 1. Add a [[View]] operator over the relation to keep track of the view desc;
// 2. Wrap the logical plan in a [[SubqueryAlias]] which tracks the name of the view.
val child = View(
desc = metadata,
output = metadata.schema.toAttributes,
child = parser.parsePlan(viewText))
SubqueryAlias(table, db, child)
} else {
SubqueryAlias(table, db, UnresolvedCatalogRelation(metadata))
}
} else {
SubqueryAlias(table, tempViews(table))
}
}
}
/**
* Return whether a table with the specified name is a temporary view.
*
* Note: The temporary view cache is checked only when database is not
* explicitly specified.
*/
def isTemporaryTable(name: TableIdentifier): Boolean = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
tempViews.contains(table)
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).isDefined
} else {
false
}
}
/**
* List all tables in the specified database, including local temporary views.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String): Seq[TableIdentifier] = listTables(db, "*")
/**
* List all matching tables in the specified database, including local temporary views.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String, pattern: String): Seq[TableIdentifier] = listTables(db, pattern, true)
/**
* List all matching tables in the specified database, including local temporary views
* if includeLocalTempViews is enabled.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(
db: String,
pattern: String,
includeLocalTempViews: Boolean): Seq[TableIdentifier] = {
val dbName = formatDatabaseName(db)
val dbTables = if (dbName == globalTempViewManager.database) {
globalTempViewManager.listViewNames(pattern).map { name =>
TableIdentifier(name, Some(globalTempViewManager.database))
}
} else {
requireDbExists(dbName)
externalCatalog.listTables(dbName, pattern).map { name =>
TableIdentifier(name, Some(dbName))
}
}
if (includeLocalTempViews) {
dbTables ++ listLocalTempViews(pattern)
} else {
dbTables
}
}
/**
* List all matching local temporary views.
*/
def listLocalTempViews(pattern: String): Seq[TableIdentifier] = {
synchronized {
StringUtils.filterPattern(tempViews.keys.toSeq, pattern).map { name =>
TableIdentifier(name)
}
}
}
/**
* Refresh the cache entry for a metastore table, if any.
*/
def refreshTable(name: TableIdentifier): Unit = synchronized {
val dbName = formatDatabaseName(name.database.getOrElse(currentDb))
val tableName = formatTableName(name.table)
// Go through temporary views and invalidate them.
// If the database is defined, this may be a global temporary view.
// If the database is not defined, there is a good chance this is a temp view.
if (name.database.isEmpty) {
tempViews.get(tableName).foreach(_.refresh())
} else if (dbName == globalTempViewManager.database) {
globalTempViewManager.get(tableName).foreach(_.refresh())
}
// Also invalidate the table relation cache.
val qualifiedTableName = QualifiedTableName(dbName, tableName)
tableRelationCache.invalidate(qualifiedTableName)
}
/**
* Drop all existing temporary views.
* For testing only.
*/
def clearTempTables(): Unit = synchronized {
tempViews.clear()
}
// ----------------------------------------------------------------------------
// Partitions
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// These methods are concerned with only metastore tables.
// ----------------------------------------------------------------------------
// TODO: We need to figure out how these methods interact with our data source
// tables. For such tables, we do not store values of partitioning columns in
// the metastore. For now, partition values of a data source table will be
// automatically discovered when we load the table.
/**
* Create partitions in an existing table, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def createPartitions(
tableName: TableIdentifier,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.createPartitions(db, table, parts, ignoreIfExists)
}
/**
* Drop partitions from a table, assuming they exist.
* If no database is specified, assume the table is in the current database.
*/
def dropPartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requirePartialMatchedPartitionSpec(specs, getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(specs)
externalCatalog.dropPartitions(db, table, specs, ignoreIfNotExists, purge, retainData)
}
/**
* Override the specs of one or many existing table partitions, assuming they exist.
*
* This assumes index i of `specs` corresponds to index i of `newSpecs`.
* If no database is specified, assume the table is in the current database.
*/
def renamePartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = {
val tableMetadata = getTableMetadata(tableName)
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(specs, tableMetadata)
requireExactMatchedPartitionSpec(newSpecs, tableMetadata)
requireNonEmptyValueInPartitionSpec(specs)
requireNonEmptyValueInPartitionSpec(newSpecs)
externalCatalog.renamePartitions(db, table, specs, newSpecs)
}
/**
* Alter one or many table partitions whose specs that match those specified in `parts`,
* assuming the partitions exist.
*
* If no database is specified, assume the table is in the current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterPartitions(tableName: TableIdentifier, parts: Seq[CatalogTablePartition]): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.alterPartitions(db, table, parts)
}
/**
* Retrieve the metadata of a table partition, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def getPartition(tableName: TableIdentifier, spec: TablePartitionSpec): CatalogTablePartition = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.getPartition(db, table, spec)
}
/**
* List the names of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitionNames(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitionNames(db, table, partialSpec)
}
/**
* List the metadata of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitions(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitions(db, table, partialSpec)
}
/**
* List the metadata of partitions that belong to the specified table, assuming it exists, that
* satisfy the given partition-pruning predicate expressions.
*/
def listPartitionsByFilter(
tableName: TableIdentifier,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
externalCatalog.listPartitionsByFilter(db, table, predicates, conf.sessionLocalTimeZone)
}
/**
* Verify if the input partition spec has any empty value.
*/
private def requireNonEmptyValueInPartitionSpec(specs: Seq[TablePartitionSpec]): Unit = {
specs.foreach { s =>
if (s.values.exists(_.isEmpty)) {
val spec = s.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
throw new AnalysisException(
s"Partition spec is invalid. The spec ($spec) contains an empty partition column value")
}
}
}
/**
* Verify if the input partition spec exactly matches the existing defined partition spec
* The columns must be the same but the orders could be different.
*/
private def requireExactMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames.sorted
specs.foreach { s =>
if (s.keys.toSeq.sorted != defined) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must match " +
s"the partition spec (${table.partitionColumnNames.mkString(", ")}) defined in " +
s"table '${table.identifier}'")
}
}
}
/**
* Verify if the input partition spec partially matches the existing defined partition spec
* That is, the columns of partition spec should be part of the defined partition spec.
*/
private def requirePartialMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames
specs.foreach { s =>
if (!s.keys.forall(defined.contains)) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must be contained " +
s"within the partition spec (${table.partitionColumnNames.mkString(", ")}) defined " +
s"in table '${table.identifier}'")
}
}
}
// ----------------------------------------------------------------------------
// Functions
// ----------------------------------------------------------------------------
// There are two kinds of functions, temporary functions and metastore
// functions (permanent UDFs). Temporary functions are isolated across
// sessions. Metastore functions can be used across multiple sessions as
// their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// -------------------------------------------------------
// | Methods that interact with metastore functions only |
// -------------------------------------------------------
/**
* Create a function in the database specified in `funcDefinition`.
* If no such database is specified, create it in the current database.
*
* @param ignoreIfExists: When true, ignore if the function with the specified name exists
* in the specified database.
*/
def createFunction(funcDefinition: CatalogFunction, ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (!functionExists(identifier)) {
externalCatalog.createFunction(db, newFuncDefinition)
} else if (!ignoreIfExists) {
throw new FunctionAlreadyExistsException(db = db, func = identifier.toString)
}
}
/**
* Drop a metastore function.
* If no database is specified, assume the function is in the current database.
*/
def dropFunction(name: FunctionIdentifier, ignoreIfNotExists: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = name.copy(database = Some(db))
if (functionExists(identifier)) {
if (functionRegistry.functionExists(identifier)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier)
}
externalCatalog.dropFunction(db, name.funcName)
} else if (!ignoreIfNotExists) {
throw new NoSuchPermanentFunctionException(db = db, func = identifier.toString)
}
}
/**
* overwrite a metastore function in the database specified in `funcDefinition`..
* If no database is specified, assume the function is in the current database.
*/
def alterFunction(funcDefinition: CatalogFunction): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (functionExists(identifier)) {
if (functionRegistry.functionExists(identifier)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier)
}
externalCatalog.alterFunction(db, newFuncDefinition)
} else {
throw new NoSuchPermanentFunctionException(db = db, func = identifier.toString)
}
}
/**
* Retrieve the metadata of a metastore function.
*
* If a database is specified in `name`, this will return the function in that database.
* If no database is specified, this will return the function in the current database.
*/
def getFunctionMetadata(name: FunctionIdentifier): CatalogFunction = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
externalCatalog.getFunction(db, name.funcName)
}
/**
* Check if the function with the specified name exists
*/
def functionExists(name: FunctionIdentifier): Boolean = {
functionRegistry.functionExists(name) || {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
externalCatalog.functionExists(db, name.funcName)
}
}
// ----------------------------------------------------------------
// | Methods that interact with temporary and metastore functions |
// ----------------------------------------------------------------
/**
* Constructs a [[FunctionBuilder]] based on the provided class that represents a function.
*/
private def makeFunctionBuilder(name: String, functionClassName: String): FunctionBuilder = {
val clazz = Utils.classForName(functionClassName)
(input: Seq[Expression]) => makeFunctionExpression(name, clazz, input)
}
/**
* Constructs a [[Expression]] based on the provided class that represents a function.
*
* This performs reflection to decide what type of [[Expression]] to return in the builder.
*/
protected def makeFunctionExpression(
name: String,
clazz: Class[_],
input: Seq[Expression]): Expression = {
// Unfortunately we need to use reflection here because UserDefinedAggregateFunction
// and ScalaUDAF are defined in sql/core module.
val clsForUDAF =
Utils.classForName("org.apache.spark.sql.expressions.UserDefinedAggregateFunction")
if (clsForUDAF.isAssignableFrom(clazz)) {
val cls = Utils.classForName("org.apache.spark.sql.execution.aggregate.ScalaUDAF")
val e = cls.getConstructor(classOf[Seq[Expression]], clsForUDAF, classOf[Int], classOf[Int])
.newInstance(input,
clazz.getConstructor().newInstance().asInstanceOf[Object], Int.box(1), Int.box(1))
.asInstanceOf[ImplicitCastInputTypes]
// Check input argument size
if (e.inputTypes.size != input.size) {
throw new AnalysisException(s"Invalid number of arguments for function $name. " +
s"Expected: ${e.inputTypes.size}; Found: ${input.size}")
}
e
} else {
throw new AnalysisException(s"No handler for UDAF '${clazz.getCanonicalName}'. " +
s"Use sparkSession.udf.register(...) instead.")
}
}
/**
* Loads resources such as JARs and Files for a function. Every resource is represented
* by a tuple (resource type, resource uri).
*/
def loadFunctionResources(resources: Seq[FunctionResource]): Unit = {
resources.foreach(functionResourceLoader.loadResource)
}
/**
* Registers a temporary or permanent function into a session-specific [[FunctionRegistry]]
*/
def registerFunction(
funcDefinition: CatalogFunction,
overrideIfExists: Boolean,
functionBuilder: Option[FunctionBuilder] = None): Unit = {
val func = funcDefinition.identifier
if (functionRegistry.functionExists(func) && !overrideIfExists) {
throw new AnalysisException(s"Function $func already exists")
}
val info = new ExpressionInfo(funcDefinition.className, func.database.orNull, func.funcName)
val builder =
functionBuilder.getOrElse {
val className = funcDefinition.className
if (!Utils.classIsLoadable(className)) {
throw new AnalysisException(s"Can not load class '$className' when registering " +
s"the function '$func', please make sure it is on the classpath")
}
makeFunctionBuilder(func.unquotedString, className)
}
functionRegistry.registerFunction(func, info, builder)
}
/**
* Drop a temporary function.
*/
def dropTempFunction(name: String, ignoreIfNotExists: Boolean): Unit = {
if (!functionRegistry.dropFunction(FunctionIdentifier(name)) && !ignoreIfNotExists) {
throw new NoSuchTempFunctionException(name)
}
}
/**
* Returns whether it is a temporary function. If not existed, returns false.
*/
def isTemporaryFunction(name: FunctionIdentifier): Boolean = {
// copied from HiveSessionCatalog
val hiveFunctions = Seq("histogram_numeric")
// A temporary function is a function that has been registered in functionRegistry
// without a database name, and is neither a built-in function nor a Hive function
name.database.isEmpty &&
functionRegistry.functionExists(name) &&
!FunctionRegistry.builtin.functionExists(name) &&
!hiveFunctions.contains(name.funcName.toLowerCase(Locale.ROOT))
}
/**
* Return whether this function has been registered in the function registry of the current
* session. If not existed, return false.
*/
def isRegisteredFunction(name: FunctionIdentifier): Boolean = {
functionRegistry.functionExists(name)
}
/**
* Returns whether it is a persistent function. If not existed, returns false.
*/
def isPersistentFunction(name: FunctionIdentifier): Boolean = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
databaseExists(db) && externalCatalog.functionExists(db, name.funcName)
}
protected[sql] def failFunctionLookup(
name: FunctionIdentifier, cause: Option[Throwable] = None): Nothing = {
throw new NoSuchFunctionException(
db = name.database.getOrElse(getCurrentDatabase), func = name.funcName, cause)
}
/**
* Look up the [[ExpressionInfo]] associated with the specified function, assuming it exists.
*/
def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = synchronized {
// TODO: just make function registry take in FunctionIdentifier instead of duplicating this
val database = name.database.orElse(Some(currentDb)).map(formatDatabaseName)
val qualifiedName = name.copy(database = database)
functionRegistry.lookupFunction(name)
.orElse(functionRegistry.lookupFunction(qualifiedName))
.getOrElse {
val db = qualifiedName.database.get
requireDbExists(db)
if (externalCatalog.functionExists(db, name.funcName)) {
val metadata = externalCatalog.getFunction(db, name.funcName)
new ExpressionInfo(
metadata.className,
qualifiedName.database.orNull,
qualifiedName.identifier)
} else {
failFunctionLookup(name)
}
}
}
/**
* Return an [[Expression]] that represents the specified function, assuming it exists.
*
* For a temporary function or a permanent function that has been loaded,
* this method will simply lookup the function through the
* FunctionRegistry and create an expression based on the builder.
*
* For a permanent function that has not been loaded, we will first fetch its metadata
* from the underlying external catalog. Then, we will load all resources associated
* with this function (i.e. jars and files). Finally, we create a function builder
* based on the function class and put the builder into the FunctionRegistry.
* The name of this function in the FunctionRegistry will be `databaseName.functionName`.
*/
def lookupFunction(
name: FunctionIdentifier,
children: Seq[Expression]): Expression = synchronized {
// Note: the implementation of this function is a little bit convoluted.
// We probably shouldn't use a single FunctionRegistry to register all three kinds of functions
// (built-in, temp, and external).
if (name.database.isEmpty && functionRegistry.functionExists(name)) {
// This function has been already loaded into the function registry.
return functionRegistry.lookupFunction(name, children)
}
// If the name itself is not qualified, add the current database to it.
val database = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val qualifiedName = name.copy(database = Some(database))
if (functionRegistry.functionExists(qualifiedName)) {
// This function has been already loaded into the function registry.
// Unlike the above block, we find this function by using the qualified name.
return functionRegistry.lookupFunction(qualifiedName, children)
}
// The function has not been loaded to the function registry, which means
// that the function is a permanent function (if it actually has been registered
// in the metastore). We need to first put the function in the FunctionRegistry.
// TODO: why not just check whether the function exists first?
val catalogFunction = try {
externalCatalog.getFunction(database, name.funcName)
} catch {
case _: AnalysisException => failFunctionLookup(name)
case _: NoSuchPermanentFunctionException => failFunctionLookup(name)
}
loadFunctionResources(catalogFunction.resources)
// Please note that qualifiedName is provided by the user. However,
// catalogFunction.identifier.unquotedString is returned by the underlying
// catalog. So, it is possible that qualifiedName is not exactly the same as
// catalogFunction.identifier.unquotedString (difference is on case-sensitivity).
// At here, we preserve the input from the user.
registerFunction(catalogFunction.copy(identifier = qualifiedName), overrideIfExists = false)
// Now, we need to create the Expression.
functionRegistry.lookupFunction(qualifiedName, children)
}
/**
* List all functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String): Seq[(FunctionIdentifier, String)] = listFunctions(db, "*")
/**
* List all matching functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String, pattern: String): Seq[(FunctionIdentifier, String)] = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
val dbFunctions = externalCatalog.listFunctions(dbName, pattern).map { f =>
FunctionIdentifier(f, Some(dbName)) }
val loadedFunctions = StringUtils
.filterPattern(functionRegistry.listFunction().map(_.unquotedString), pattern).map { f =>
// In functionRegistry, function names are stored as an unquoted format.
Try(parser.parseFunctionIdentifier(f)) match {
case Success(e) => e
case Failure(_) =>
// The names of some built-in functions are not parsable by our parser, e.g., %
FunctionIdentifier(f)
}
}
val functions = dbFunctions ++ loadedFunctions
// The session catalog caches some persistent functions in the FunctionRegistry
// so there can be duplicates.
functions.map {
case f if FunctionRegistry.functionSet.contains(f) => (f, "SYSTEM")
case f => (f, "USER")
}.distinct
}
// -----------------
// | Other methods |
// -----------------
/**
* Drop all existing databases (except "default"), tables, partitions and functions,
* and set the current database to "default".
*
* This is mainly used for tests.
*/
def reset(): Unit = synchronized {
setCurrentDatabase(DEFAULT_DATABASE)
externalCatalog.setCurrentDatabase(DEFAULT_DATABASE)
listDatabases().filter(_ != DEFAULT_DATABASE).foreach { db =>
dropDatabase(db, ignoreIfNotExists = false, cascade = true)
}
listTables(DEFAULT_DATABASE).foreach { table =>
dropTable(table, ignoreIfNotExists = false, purge = false)
}
listFunctions(DEFAULT_DATABASE).map(_._1).foreach { func =>
if (func.database.isDefined) {
dropFunction(func, ignoreIfNotExists = false)
} else {
dropTempFunction(func.funcName, ignoreIfNotExists = false)
}
}
clearTempTables()
globalTempViewManager.clear()
functionRegistry.clear()
tableRelationCache.invalidateAll()
// restore built-in functions
FunctionRegistry.builtin.listFunction().foreach { f =>
val expressionInfo = FunctionRegistry.builtin.lookupFunction(f)
val functionBuilder = FunctionRegistry.builtin.lookupFunctionBuilder(f)
require(expressionInfo.isDefined, s"built-in function '$f' is missing expression info")
require(functionBuilder.isDefined, s"built-in function '$f' is missing function builder")
functionRegistry.registerFunction(f, expressionInfo.get, functionBuilder.get)
}
}
/**
* Copy the current state of the catalog to another catalog.
*
* This function is synchronized on this [[SessionCatalog]] (the source) to make sure the copied
* state is consistent. The target [[SessionCatalog]] is not synchronized, and should not be
* because the target [[SessionCatalog]] should not be published at this point. The caller must
* synchronize on the target if this assumption does not hold.
*/
private[sql] def copyStateTo(target: SessionCatalog): Unit = synchronized {
target.currentDb = currentDb
// copy over temporary views
tempViews.foreach(kv => target.tempViews.put(kv._1, kv._2))
}
/**
* Validate the new locatoin before renaming a managed table, which should be non-existent.
*/
private def validateNewLocationOfRename(
oldName: TableIdentifier,
newName: TableIdentifier): Unit = {
val oldTable = getTableMetadata(oldName)
if (oldTable.tableType == CatalogTableType.MANAGED) {
val databaseLocation =
externalCatalog.getDatabase(oldName.database.getOrElse(currentDb)).locationUri
val newTableLocation = new Path(new Path(databaseLocation), formatTableName(newName.table))
val fs = newTableLocation.getFileSystem(hadoopConf)
if (fs.exists(newTableLocation)) {
throw new AnalysisException(s"Can not rename the managed table('$oldName')" +
s". The associated location('$newTableLocation') already exists.")
}
}
}
}
| kiszk/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala | Scala | apache-2.0 | 60,257 |
package com.esri.dbscan
import com.esri.dbscan.Status.Status
import scala.collection.mutable
/**
* Density Based Clusterer.
*
* @param minPoints the min number of points in a cluster.
* @param nnSearch a reference to a NNSearch implementation.
*/
class DBSCAN[T <: DBSCANPoint](minPoints: Int, nnSearch: NNSearch[T]) extends Serializable {
private case class State(status: Status = Status.UNK, clusterID: Int = -1)
private val stateMap = mutable.Map.empty[Long, State].withDefaultValue(State())
private def _expand(elem: T,
neighbors: Seq[T],
clusterID: Int
): Int = {
stateMap(elem.id) = State(Status.CLUSTERED, clusterID)
/*
val queue = new mutable.Queue[T]()
queue ++= neighbors
while (queue.nonEmpty) {
val neighbor = queue.dequeue
val status = stateMap(neighbor.id).status
if (status == Status.NOISE) {
stateMap(neighbor.id) = State(Status.CORE, clusterID)
}
else if (status == Status.UNK) {
stateMap(neighbor.id) = State(Status.CORE, clusterID)
val neighborNeighbors = nnSearch.neighborsOf(neighbor)
if (neighborNeighbors.size >= minPoints) {
queue ++= neighborNeighbors
}
}
}
*/
val queue = mutable.Queue[Seq[T]](neighbors)
while (queue.nonEmpty) {
queue.dequeue().foreach(neighbor => {
val status = stateMap(neighbor.id).status
status match {
case Status.CLUSTERED => // Do nothing if core
case _ =>
stateMap(neighbor.id) = State(Status.CLUSTERED, clusterID)
val neighbors = nnSearch.neighborsOf(neighbor)
if (neighbors.size >= minPoints) {
queue.enqueue(neighbors)
}
}
})
}
clusterID + 1
}
private def _dbscan(iter: Iterable[T]): Iterable[T] = {
var clusterID = 0
iter.map(elem => {
val status = stateMap(elem.id).status
if (status == Status.UNK) {
val neighbors = nnSearch.neighborsOf(elem)
if (neighbors.size < minPoints) {
stateMap(elem.id) = State(Status.NOISE)
} else {
clusterID = _expand(elem, neighbors, clusterID)
}
}
elem
})
}
/**
* Cluster the input points.
*
* @param iter the points to cluster.
* @return iterable of points to cluster id tuple.
*/
def dbscan(iter: Iterable[T]): Iterable[(T, Int)] = {
_dbscan(iter)
.map(elem => (elem, stateMap(elem.id).clusterID))
}
/**
* Cluster the input points.
*
* @param iter the points to cluster.
* @return iterable of Cluster instance.
*/
def cluster(iter: Iterable[T]): Iterable[Cluster[T]] = {
_dbscan(iter)
.map(elem => stateMap(elem.id).clusterID -> elem)
.groupBy(_._1)
.map {
case (clusterID, iter) => Cluster(clusterID, iter.map(_._2))
}
}
/**
* Cluster the input points, but do not return noise points.
*
* @param iter the points to cluster.
* @return iterable of Cluster instance.
*/
def clusters(iter: Iterable[T]): Iterable[Cluster[T]] = {
_dbscan(iter)
.map(elem => stateMap(elem.id).clusterID -> elem)
.filterNot(_._1 == -1)
.groupBy(_._1)
.map {
case (clusterID, iter) => Cluster(clusterID, iter.map(_._2))
}
}
}
/**
* Companion object.
*/
object DBSCAN extends Serializable {
/**
* Create a DBSCAN instance.
*
* @param minPoints the min number of points in the search distance to start or append to a cluster.
* @param nnSearch Implementation of NNSearch trait.
* @return a DBSCAN instance.
*/
def apply[T <: DBSCANPoint](minPoints: Int, nnSearch: NNSearch[T]): DBSCAN[T] = {
new DBSCAN[T](minPoints, nnSearch)
}
}
| mraad/dbscan-scala | src/main/scala/com/esri/dbscan/DBSCAN.scala | Scala | apache-2.0 | 3,810 |
package com.xah.chat.comms
import android.content.{Context, ServiceConnection, ComponentName}
import android.os.IBinder
import android.util.Log
class XServiceConnection extends ServiceConnection {
val TAG = "XServiceConnection"
var mService: XService = _
var mBound: Boolean = false
override def onServiceConnected(className: ComponentName, service: IBinder) = {
mService = service.asInstanceOf[XBinder].getService()
mBound = true
Log.d(TAG, "Service Connected")
}
override def onServiceDisconnected(className: ComponentName) = {
mService = null
mBound = false
Log.d(TAG, "Service Disconnected")
}
def isBound = mBound
def getService = mService
} | lemonxah/xaHChat | src/main/scala/com/xah/chat/comms/XServiceConnection.scala | Scala | mit | 695 |
import org.scalatest.{Matchers, FunSuite}
/** @version 1.1.0 */
class MinesweeperTest extends FunSuite with Matchers {
test("no rows") {
Minesweeper.annotate(List()) should be(List())
}
test("no columns") {
pending
Minesweeper.annotate(List("")) should be(List(""))
}
test("no mines") {
pending
Minesweeper.annotate(List(" ",
" ",
" ")) should be(
List(" ",
" ",
" "))
}
test("minefield with only mines") {
pending
Minesweeper.annotate(List("***",
"***",
"***")) should be(
List("***",
"***",
"***"))
}
test("mine surrounded by spaces") {
pending
Minesweeper.annotate(List(" ",
" * ",
" ")) should be(
List("111",
"1*1",
"111"))
}
test("space surrounded by mines") {
pending
Minesweeper.annotate(List("***",
"* *",
"***")) should be(
List("***",
"*8*",
"***"))
}
test("horizontal line") {
pending
Minesweeper.annotate(List(" * * ")) should be(List("1*2*1"))
}
test("horizontal line, mines at edges") {
pending
Minesweeper.annotate(List("* *")) should be(List("*1 1*"))
}
test("vertical line") {
pending
Minesweeper.annotate(List(" ",
"*",
" ",
"*",
" ")) should be(
List("1",
"*",
"2",
"*",
"1"))
}
test("vertical line, mines at edges") {
pending
Minesweeper.annotate(List("*",
" ",
" ",
" ",
"*")) should be(
List("*",
"1",
" ",
"1",
"*"))
}
test("cross") {
pending
Minesweeper.annotate(List(" * ",
" * ",
"*****",
" * ",
" * ")) should be(
List(" 2*2 ",
"25*52",
"*****",
"25*52",
" 2*2 "))
}
test("large minefield") {
pending
Minesweeper.annotate(List(" * * ",
" * ",
" * ",
" * *",
" * * ",
" ")) should be(
List("1*22*1",
"12*322",
" 123*2",
"112*4*",
"1*22*2",
"111111"))
}
}
| exercism/xscala | exercises/practice/minesweeper/src/test/scala/MinesweeperTest.scala | Scala | mit | 2,849 |
package com.roundeights.skene.response
import scala.io.Codec
import scala.actors.Actor
import scala.collection.mutable.MutableList
import java.util.concurrent.atomic.AtomicReference
import java.util.zip.GZIPOutputStream
import javax.servlet.http.{HttpServletResponse, HttpServletRequest}
import javax.servlet.AsyncContext
import com.roundeights.skene.{Response, Renderable, Recover, Cookie, Logger}
/**
* A response that wraps a servlet
*/
class ServletResponse (
async: AsyncContext,
request: HttpServletRequest,
response: HttpServletResponse,
logger: Logger,
requestID: Long
) extends ActorResponse {
/** The status code being sent back */
private val responseCode
= new AtomicReference[Response.Code]( Response.Code.OK )
/** Whether this request supports gzip encoding */
private lazy val gzip = request.getHeader("Accept-Encoding") match {
case null => false
case accept => accept.toLowerCase.contains("gzip")
}
/** The output stream for this response */
private lazy val stream = gzip match {
case true => {
response.setHeader("Content-Encoding", "gzip")
new GZIPOutputStream( response.getOutputStream )
}
case false => response.getOutputStream
}
/** An actor this is used to communicate with the Servlet Response */
protected val actor: Actor = Actor.actor {
val data = MutableList[Renderable]()
def flush(): Unit = {
if ( data.length > 0 ) {
data.map( _.render( stream, Codec.UTF8 ) )
data.clear()
stream.flush()
}
}
Actor.loop {
Actor.react {
case Response.Header( field, value ) =>
response.setHeader( field.toString, value )
case code: Response.Code => {
response.setStatus( code.code )
responseCode.set( code )
}
case content: Renderable if request.getMethod != "HEAD"
=> data += content
case cookie: Cookie => response.addCookie(cookie.toJavaCookie)
case _: Response.Flush => flush()
case _: Response.Done => {
flush()
stream.close()
async.complete()
logger.response( requestID, responseCode.get )
Actor.exit()
}
}
}
}
}
| Nycto/Skene | src/main/scala/skene/response/Servlet.scala | Scala | mit | 2,534 |
package com.github.novamage.svalidator.validation.simple.internals
class ConditionedGroupValidationRuleBuilderWrapper[A](conditionalExpression: A => Boolean, ruleBuilders: List[RuleBuilder[A]]) extends RuleBuilder[A] {
protected[validation] def buildRules(instance: A): RuleStreamCollection[A] = {
if (conditionalExpression(instance)) {
val ruleStreamCollections = ruleBuilders.map(_.buildRules(instance))
RuleStreamCollection(ruleStreamCollections.flatMap(_.chains))
} else {
RuleStreamCollection.Empty
}
}
}
| NovaMage/SValidator | src/main/scala/com/github/novamage/svalidator/validation/simple/internals/ConditionedGroupValidationRuleBuilderWrapper.scala | Scala | mit | 547 |
package slinky.native
import slinky.core.ExternalComponent
import slinky.core.annotations.react
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
import scala.scalajs.js.|
@react object Picker extends ExternalComponent {
case class Props(
onValueChange: js.UndefOr[(String | Int, Int) => Unit] = js.undefined,
selectedValue: js.UndefOr[String | Int] = js.undefined,
style: js.UndefOr[js.Object] = js.undefined,
testID: js.UndefOr[String] = js.undefined,
enabled: js.UndefOr[Boolean] = js.undefined,
mode: js.UndefOr[String] = js.undefined,
prompt: js.UndefOr[String] = js.undefined,
itemStyle: js.UndefOr[js.Object] = js.undefined
)
@js.native
@JSImport("react-native", "Picker")
object Component extends js.Object {
val Item: js.Object = js.native
}
override val component = Component
@react object Item extends ExternalComponent {
case class Props(label: String, value: String | Int)
override val component = Component.Item
}
}
| shadaj/slinky | native/src/main/scala/slinky/native/Picker.scala | Scala | mit | 1,017 |
package nars.logic.entity
import nars.io.Symbols
import TruthValue._
//remove if not needed
import scala.collection.JavaConversions._
object TruthValue {
/**
The charactor that marks the two ends of a truth value
*/
private val DELIMITER = Symbols.TRUTH_VALUE_MARK
/**
The charactor that separates the factors in a truth value
*/
private val SEPARATOR = Symbols.VALUE_SEPARATOR
}
/**
* Frequency and confidence.
*/
class TruthValue(f: Float, c: Float) extends Cloneable {
/**
The frequency factor of the truth value
*/
private var frequency: ShortFloat = new ShortFloat(f)
/**
The confidence factor of the truth value
*/
private var confidence: ShortFloat = if ((c < 1)) new ShortFloat(c) else new ShortFloat(0.9999f)
/**
* Constructor with a TruthValue to clone
* @param v The truth value to be cloned
*/
def this(v: TruthValue) {
this( (v.getFrequency), v.getConfidence )
}
/**
* Get the frequency value
* @return The frequency value
*/
def getFrequency(): Float = frequency.getValue
/**
* Get the confidence value
* @return The confidence value
*/
def getConfidence(): Float = confidence.getValue
/**
* Calculate the expectation value of the truth value
* @return The expectation value
*/
def getExpectation(): Float = {
(confidence.getValue * (frequency.getValue - 0.5) + 0.5).toFloat
}
/**
* Calculate the absolute difference of the expectation value and that of a given truth value
* @param t The given value
* @return The absolute difference
*/
def getExpDifAbs(t: TruthValue): Float = {
Math.abs(getExpectation - t.getExpectation)
}
/**
* Check if the truth value is negative
* @return True if the frequence is less than 1/2
*/
def isNegative(): Boolean = getFrequency < 0.5
/**
* Compare two truth values
* @param that The other TruthValue
* @return Whether the two are equivalent
*/
override def equals(that: Any): Boolean = {
((that.isInstanceOf[TruthValue]) &&
(getFrequency == that.asInstanceOf[TruthValue].getFrequency) &&
(getConfidence == that.asInstanceOf[TruthValue].getConfidence))
}
/**
* The hash code of a TruthValue
* @return The hash code
*/
override def hashCode(): Int = (getExpectation * 37).toInt
override def clone(): AnyRef = {
new TruthValue(getFrequency, getConfidence)
}
/**
* The String representation of a TruthValue
* @return The String
*/
override def toString(): String = {
DELIMITER + frequency.toString + SEPARATOR + confidence.toString +
DELIMITER
}
/**
* A simplified String representation of a TruthValue, where each factor is accruate to 1%
* @return The String
*/
def toStringBrief(): String = {
val s1 = DELIMITER + frequency.toStringBrief() + SEPARATOR
val s2 = confidence.toStringBrief()
if (s2 == "1.00") {
s1 + "0.99" + DELIMITER
} else {
s1 + s2 + DELIMITER
}
}
}
| printedheart/opennars | nars_lab_x/nars_scala/src/main/scala/nars/entity/TruthValue.scala | Scala | agpl-3.0 | 3,004 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.lang.reflect.Constructor
import org.apache.spark.mllib.linalg.DenseVector
import com.amd.aparapi.internal.model.ClassModel
class ObjectNativeInputBuffers[T](val N : Int, val structSize : Int,
val blockingCopies : Boolean, val constructor : Constructor[_],
val classModel : ClassModel, val structMemberTypes : Option[Array[Int]],
val structMemberOffsets : Option[Array[Long]], val dev_ctx : Long) extends NativeInputBuffers[T] {
val clBuffer : Long = OpenCLBridge.clMalloc(dev_ctx, N * structSize)
val buffer : Long = OpenCLBridge.pin(dev_ctx, clBuffer)
var tocopy : Int = -1
var iter : Int = 0
val bb : ByteBuffer = ByteBuffer.allocate(structSize)
bb.order(ByteOrder.LITTLE_ENDIAN)
override def releaseOpenCLArrays() {
OpenCLBridge.clFree(clBuffer, dev_ctx)
}
override def copyToDevice(argnum : Int, ctx : Long, dev_ctx : Long,
cacheID : CLCacheID, persistent : Boolean) : Int = {
OpenCLBridge.setNativePinnedArrayArg(ctx, dev_ctx, argnum, buffer, clBuffer,
tocopy * structSize)
return 1
}
override def next() : T = {
val new_obj : T = constructor.newInstance().asInstanceOf[T]
bb.clear
OpenCLBridge.copyNativeArrayToJVMArray(buffer, iter * structSize, bb.array,
structSize)
OpenCLBridgeWrapper.readObjectFromStream(new_obj, classModel, bb,
structMemberTypes.get, structMemberOffsets.get)
iter += 1
new_obj
}
override def hasNext() : Boolean = {
iter < tocopy
}
}
| agrippa/spark-swat | swat/src/main/scala/org/apache/spark/rdd/cl/ObjectNativeInputBuffers.scala | Scala | bsd-3-clause | 3,107 |
package skinny.mailer
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import javax.mail.Session
import org.joda.time.DateTime
import skinny.mailer.implicits.SkinnyMailerImplicits
class SkinnyMessageSpec extends FlatSpec with ShouldMatchers with SkinnyMailerImplicits {
behavior of "SkinnyMessage"
val session = Session.getInstance(new java.util.Properties())
it should "properties" in {
val msg = new SkinnyMessage(session)
msg.sentDate = new DateTime(2013, 12, 1, 0, 0)
msg.sentDate.get should be(new DateTime(2013, 12, 1, 0, 0))
msg.subject = "Subject1"
msg.subject should be(Some("Subject1"))
msg.subject = ("Subject2", "UTF-8")
msg.subject should be(Some("Subject2"))
msg.sender = "[email protected]"
msg.sender.toString should be("[email protected]")
msg.replyTo = "[email protected],[email protected]"
msg.replyTo(0).toString should be("[email protected]")
msg.replyTo(1).toString should be("[email protected]")
msg.header = "header1" -> "h1"
msg.header = "header2" -> "h2"
msg.header = Map("header3" -> "h3", "header4" -> "h4")
msg.header("header1")(0) should be("h1")
msg.header("header2")(0) should be("h2")
msg.header("header3")(0) should be("h3")
msg.header("header4")(0) should be("h4")
msg.from = "[email protected]"
msg.from.map(_.toString) should be(Some("[email protected]"))
msg.recipients = (javax.mail.Message.RecipientType.TO -> "[email protected],[email protected]")
msg.recipients(javax.mail.Message.RecipientType.TO)(0).toString should be("[email protected]")
msg.recipients(javax.mail.Message.RecipientType.TO)(1).toString should be("[email protected]")
msg.filename = "filename"
msg.filename should be("filename")
// data handler
msg.contentLanguage = Array("ja", "en")
msg.contentLanguage(0) should be("ja")
msg.contentLanguage(1) should be("en")
msg.contentMD5 = "".hashCode.toString
msg.contentMD5 should be("".hashCode.toString)
msg.contentID = "contentID"
msg.contentID should be("contentID")
msg.to = "[email protected],[email protected]"
msg.to.length should be(2)
msg.to(0).toString should be("[email protected]")
msg.to(1).toString should be("[email protected]")
msg.cc = "[email protected],[email protected]"
msg.cc.length should be(2)
msg.cc(0).toString should be("[email protected]")
msg.cc(1).toString should be("[email protected]")
msg.cc = Seq("[email protected]", "[email protected]")
msg.cc.length should be(2)
msg.cc(0).toString should be("[email protected]")
msg.cc(1).toString should be("[email protected]")
msg.disposition = "attachment"
msg.disposition should be("attachment")
msg.description = "description"
msg.description should be("description")
msg.mimeVersion = "1.0"
msg.mimeVersion should be("1.0")
}
}
| BlackPrincess/skinny-framework | mailer/src/test/scala/skinny/mailer/SkinnyMessageSpec.scala | Scala | mit | 3,104 |
package org.sisioh.aws4s.dynamodb.model
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest
import org.sisioh.aws4s.PimpedType
object DeleteTableRequestFactory {
def create(): DeleteTableRequest = new DeleteTableRequest()
def create(tableName: String): DeleteTableRequest = new DeleteTableRequest(tableName)
}
class RichDeleteTableRequest(val underlying: DeleteTableRequest) extends AnyVal with PimpedType[DeleteTableRequest] {
// ---
def tableNameOpt: Option[String] = Option(underlying.getTableName)
def tableNameOpt_=(value: Option[String]): Unit = underlying.setTableName(value.orNull)
def withTableNameOpt(value: Option[String]): DeleteTableRequest = underlying.withTableName(value.orNull)
// ---
}
| everpeace/aws4s | aws4s-dynamodb/src/main/scala/org/sisioh/aws4s/dynamodb/model/RichDeleteTableRequest.scala | Scala | mit | 744 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.text
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.io.{NullWritable, Text}
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.hadoop.mapreduce.{Job, RecordWriter, TaskAttemptContext}
import org.apache.hadoop.mapreduce.lib.output.{FileOutputFormat, TextOutputFormat}
import org.apache.hadoop.util.ReflectionUtils
import org.apache.spark.TaskContext
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.catalyst.expressions.codegen.{BufferHolder, UnsafeRowWriter}
import org.apache.spark.sql.catalyst.util.CompressionCodecs
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.util.SerializableConfiguration
/**
* A data source for reading text files.
*/
class TextFileFormat extends TextBasedFileFormat with DataSourceRegister {
override def shortName(): String = "text"
override def toString: String = "Text"
private def verifySchema(schema: StructType): Unit = {
if (schema.size != 1) {
throw new AnalysisException(
s"Text data source supports only a single column, and you have ${schema.size} columns.")
}
val tpe = schema(0).dataType
if (tpe != StringType) {
throw new AnalysisException(
s"Text data source supports only a string column, but you have ${tpe.simpleString}.")
}
}
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = Some(new StructType().add("value", StringType))
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
verifySchema(dataSchema)
val conf = job.getConfiguration
val compressionCodec = options.get("compression").map(CompressionCodecs.getCodecClassName)
compressionCodec.foreach { codec =>
CompressionCodecs.setCodecConfiguration(conf, codec)
}
new OutputWriterFactory {
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new TextOutputWriter(path, dataSchema, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
".txt" + TextOutputWriter.getCompressionExtension(context)
}
}
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): PartitionedFile => Iterator[InternalRow] = {
assert(
requiredSchema.length <= 1,
"Text data source only produces a single data column named \\"value\\".")
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
(file: PartitionedFile) => {
val reader = new HadoopFileLinesReader(file, broadcastedHadoopConf.value.value)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => reader.close()))
if (requiredSchema.isEmpty) {
val emptyUnsafeRow = new UnsafeRow(0)
reader.map(_ => emptyUnsafeRow)
} else {
val unsafeRow = new UnsafeRow(1)
val bufferHolder = new BufferHolder(unsafeRow)
val unsafeRowWriter = new UnsafeRowWriter(bufferHolder, 1)
reader.map { line =>
// Writes to an UnsafeRow directly
bufferHolder.reset()
unsafeRowWriter.write(0, line.getBytes, 0, line.getLength)
unsafeRow.setTotalSize(bufferHolder.totalSize())
unsafeRow
}
}
}
}
}
class TextOutputWriter(
path: String,
dataSchema: StructType,
context: TaskAttemptContext)
extends OutputWriter {
private[this] val buffer = new Text()
private val recordWriter: RecordWriter[NullWritable, Text] = {
new TextOutputFormat[NullWritable, Text]() {
override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
new Path(path)
}
}.getRecordWriter(context)
}
override def write(row: Row): Unit = throw new UnsupportedOperationException("call writeInternal")
override protected[sql] def writeInternal(row: InternalRow): Unit = {
val utf8string = row.getUTF8String(0)
buffer.set(utf8string.getBytes)
recordWriter.write(NullWritable.get(), buffer)
}
override def close(): Unit = {
recordWriter.close(context)
}
}
object TextOutputWriter {
/** Returns the compression codec extension to be used in a file name, e.g. ".gzip"). */
def getCompressionExtension(context: TaskAttemptContext): String = {
// Set the compression extension, similar to code in TextOutputFormat.getDefaultWorkFile
if (FileOutputFormat.getCompressOutput(context)) {
val codecClass = FileOutputFormat.getOutputCompressorClass(context, classOf[GzipCodec])
ReflectionUtils.newInstance(codecClass, context.getConfiguration).getDefaultExtension
} else {
""
}
}
}
| spark0001/spark2.1.1 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala | Scala | apache-2.0 | 6,220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.sql.{Connection, Driver, DriverManager, JDBCType, PreparedStatement, ResultSet, ResultSetMetaData, SQLException, SQLFeatureNotSupportedException}
import java.util.Locale
import scala.collection.JavaConverters._
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.spark.TaskContext
import org.apache.spark.executor.InputMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, DataFrame, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.connector.catalog.TableChange
import org.apache.spark.sql.execution.datasources.jdbc.connection.ConnectionProvider
import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects, JdbcType}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.NextIterator
/**
* Util functions for JDBC tables.
*/
object JdbcUtils extends Logging {
/**
* Returns a factory for creating connections to the given JDBC URL.
*
* @param options - JDBC options that contains url, table and other information.
* @throws IllegalArgumentException if the driver could not open a JDBC connection.
*/
def createConnectionFactory(options: JDBCOptions): () => Connection = {
val driverClass: String = options.driverClass
() => {
DriverRegistry.register(driverClass)
val driver: Driver = DriverManager.getDrivers.asScala.collectFirst {
case d: DriverWrapper if d.wrapped.getClass.getCanonicalName == driverClass => d
case d if d.getClass.getCanonicalName == driverClass => d
}.getOrElse {
throw new IllegalStateException(
s"Did not find registered driver with class $driverClass")
}
val connection = ConnectionProvider.create(driver, options).getConnection()
require(connection != null,
s"The driver could not open a JDBC connection. Check the URL: ${options.url}")
connection
}
}
/**
* Returns true if the table already exists in the JDBC database.
*/
def tableExists(conn: Connection, options: JdbcOptionsInWrite): Boolean = {
val dialect = JdbcDialects.get(options.url)
// Somewhat hacky, but there isn't a good way to identify whether a table exists for all
// SQL database systems using JDBC meta data calls, considering "table" could also include
// the database name. Query used to find table exists can be overridden by the dialects.
Try {
val statement = conn.prepareStatement(dialect.getTableExistsQuery(options.table))
try {
statement.setQueryTimeout(options.queryTimeout)
statement.executeQuery()
} finally {
statement.close()
}
}.isSuccess
}
/**
* Drops a table from the JDBC database.
*/
def dropTable(conn: Connection, table: String, options: JDBCOptions): Unit = {
executeStatement(conn, options, s"DROP TABLE $table")
}
/**
* Truncates a table from the JDBC database without side effects.
*/
def truncateTable(conn: Connection, options: JdbcOptionsInWrite): Unit = {
val dialect = JdbcDialects.get(options.url)
val statement = conn.createStatement
try {
statement.setQueryTimeout(options.queryTimeout)
val truncateQuery = if (options.isCascadeTruncate.isDefined) {
dialect.getTruncateQuery(options.table, options.isCascadeTruncate)
} else {
dialect.getTruncateQuery(options.table)
}
statement.executeUpdate(truncateQuery)
} finally {
statement.close()
}
}
def isCascadingTruncateTable(url: String): Option[Boolean] = {
JdbcDialects.get(url).isCascadingTruncateTable()
}
/**
* Returns an Insert SQL statement for inserting a row into the target table via JDBC conn.
*/
def getInsertStatement(
table: String,
rddSchema: StructType,
tableSchema: Option[StructType],
isCaseSensitive: Boolean,
dialect: JdbcDialect): String = {
val columns = if (tableSchema.isEmpty) {
rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
} else {
val columnNameEquality = if (isCaseSensitive) {
org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
} else {
org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
}
// The generated insert statement needs to follow rddSchema's column sequence and
// tableSchema's column names. When appending data into some case-sensitive DBMSs like
// PostgreSQL/Oracle, we need to respect the existing case-sensitive column names instead of
// RDD column names for user convenience.
val tableColumnNames = tableSchema.get.fieldNames
rddSchema.fields.map { col =>
val normalizedName = tableColumnNames.find(f => columnNameEquality(f, col.name)).getOrElse {
throw new AnalysisException(s"""Column "${col.name}" not found in schema $tableSchema""")
}
dialect.quoteIdentifier(normalizedName)
}.mkString(",")
}
val placeholders = rddSchema.fields.map(_ => "?").mkString(",")
s"INSERT INTO $table ($columns) VALUES ($placeholders)"
}
/**
* Retrieve standard jdbc types.
*
* @param dt The datatype (e.g. [[org.apache.spark.sql.types.StringType]])
* @return The default JdbcType for this DataType
*/
def getCommonJDBCType(dt: DataType): Option[JdbcType] = {
dt match {
case IntegerType => Option(JdbcType("INTEGER", java.sql.Types.INTEGER))
case LongType => Option(JdbcType("BIGINT", java.sql.Types.BIGINT))
case DoubleType => Option(JdbcType("DOUBLE PRECISION", java.sql.Types.DOUBLE))
case FloatType => Option(JdbcType("REAL", java.sql.Types.FLOAT))
case ShortType => Option(JdbcType("INTEGER", java.sql.Types.SMALLINT))
case ByteType => Option(JdbcType("BYTE", java.sql.Types.TINYINT))
case BooleanType => Option(JdbcType("BIT(1)", java.sql.Types.BIT))
case StringType => Option(JdbcType("TEXT", java.sql.Types.CLOB))
case BinaryType => Option(JdbcType("BLOB", java.sql.Types.BLOB))
case TimestampType => Option(JdbcType("TIMESTAMP", java.sql.Types.TIMESTAMP))
case DateType => Option(JdbcType("DATE", java.sql.Types.DATE))
case t: DecimalType => Option(
JdbcType(s"DECIMAL(${t.precision},${t.scale})", java.sql.Types.DECIMAL))
case _ => None
}
}
def getJdbcType(dt: DataType, dialect: JdbcDialect): JdbcType = {
dialect.getJDBCType(dt).orElse(getCommonJDBCType(dt)).getOrElse(
throw new IllegalArgumentException(s"Can't get JDBC type for ${dt.catalogString}"))
}
/**
* Maps a JDBC type to a Catalyst type. This function is called only when
* the JdbcDialect class corresponding to your database driver returns null.
*
* @param sqlType - A field of java.sql.Types
* @return The Catalyst type corresponding to sqlType.
*/
private def getCatalystType(
sqlType: Int,
precision: Int,
scale: Int,
signed: Boolean): DataType = {
val answer = sqlType match {
// scalastyle:off
case java.sql.Types.ARRAY => null
case java.sql.Types.BIGINT => if (signed) { LongType } else { DecimalType(20,0) }
case java.sql.Types.BINARY => BinaryType
case java.sql.Types.BIT => BooleanType // @see JdbcDialect for quirks
case java.sql.Types.BLOB => BinaryType
case java.sql.Types.BOOLEAN => BooleanType
case java.sql.Types.CHAR => StringType
case java.sql.Types.CLOB => StringType
case java.sql.Types.DATALINK => null
case java.sql.Types.DATE => DateType
case java.sql.Types.DECIMAL
if precision != 0 || scale != 0 => DecimalType.bounded(precision, scale)
case java.sql.Types.DECIMAL => DecimalType.SYSTEM_DEFAULT
case java.sql.Types.DISTINCT => null
case java.sql.Types.DOUBLE => DoubleType
case java.sql.Types.FLOAT => FloatType
case java.sql.Types.INTEGER => if (signed) { IntegerType } else { LongType }
case java.sql.Types.JAVA_OBJECT => null
case java.sql.Types.LONGNVARCHAR => StringType
case java.sql.Types.LONGVARBINARY => BinaryType
case java.sql.Types.LONGVARCHAR => StringType
case java.sql.Types.NCHAR => StringType
case java.sql.Types.NCLOB => StringType
case java.sql.Types.NULL => null
case java.sql.Types.NUMERIC
if precision != 0 || scale != 0 => DecimalType.bounded(precision, scale)
case java.sql.Types.NUMERIC => DecimalType.SYSTEM_DEFAULT
case java.sql.Types.NVARCHAR => StringType
case java.sql.Types.OTHER => null
case java.sql.Types.REAL => DoubleType
case java.sql.Types.REF => StringType
case java.sql.Types.REF_CURSOR => null
case java.sql.Types.ROWID => LongType
case java.sql.Types.SMALLINT => IntegerType
case java.sql.Types.SQLXML => StringType
case java.sql.Types.STRUCT => StringType
case java.sql.Types.TIME => TimestampType
case java.sql.Types.TIME_WITH_TIMEZONE
=> null
case java.sql.Types.TIMESTAMP => TimestampType
case java.sql.Types.TIMESTAMP_WITH_TIMEZONE
=> null
case java.sql.Types.TINYINT => IntegerType
case java.sql.Types.VARBINARY => BinaryType
case java.sql.Types.VARCHAR => StringType
case _ =>
throw new SQLException("Unrecognized SQL type " + sqlType)
// scalastyle:on
}
if (answer == null) {
throw new SQLException("Unsupported type " + JDBCType.valueOf(sqlType).getName)
}
answer
}
/**
* Returns the schema if the table already exists in the JDBC database.
*/
def getSchemaOption(conn: Connection, options: JDBCOptions): Option[StructType] = {
val dialect = JdbcDialects.get(options.url)
try {
val statement = conn.prepareStatement(dialect.getSchemaQuery(options.tableOrQuery))
try {
statement.setQueryTimeout(options.queryTimeout)
Some(getSchema(statement.executeQuery(), dialect))
} catch {
case _: SQLException => None
} finally {
statement.close()
}
} catch {
case _: SQLException => None
}
}
/**
* Takes a [[ResultSet]] and returns its Catalyst schema.
*
* @param alwaysNullable If true, all the columns are nullable.
* @return A [[StructType]] giving the Catalyst schema.
* @throws SQLException if the schema contains an unsupported type.
*/
def getSchema(
resultSet: ResultSet,
dialect: JdbcDialect,
alwaysNullable: Boolean = false): StructType = {
val rsmd = resultSet.getMetaData
val ncols = rsmd.getColumnCount
val fields = new Array[StructField](ncols)
var i = 0
while (i < ncols) {
val columnName = rsmd.getColumnLabel(i + 1)
val dataType = rsmd.getColumnType(i + 1)
val typeName = rsmd.getColumnTypeName(i + 1)
val fieldSize = rsmd.getPrecision(i + 1)
val fieldScale = rsmd.getScale(i + 1)
val isSigned = {
try {
rsmd.isSigned(i + 1)
} catch {
// Workaround for HIVE-14684:
case e: SQLException if
e.getMessage == "Method not supported" &&
rsmd.getClass.getName == "org.apache.hive.jdbc.HiveResultSetMetaData" => true
}
}
val nullable = if (alwaysNullable) {
true
} else {
rsmd.isNullable(i + 1) != ResultSetMetaData.columnNoNulls
}
val metadata = new MetadataBuilder().putLong("scale", fieldScale)
val columnType =
dialect.getCatalystType(dataType, typeName, fieldSize, metadata).getOrElse(
getCatalystType(dataType, fieldSize, fieldScale, isSigned))
fields(i) = StructField(columnName, columnType, nullable)
i = i + 1
}
new StructType(fields)
}
/**
* Convert a [[ResultSet]] into an iterator of Catalyst Rows.
*/
def resultSetToRows(resultSet: ResultSet, schema: StructType): Iterator[Row] = {
val inputMetrics =
Option(TaskContext.get()).map(_.taskMetrics().inputMetrics).getOrElse(new InputMetrics)
val fromRow = RowEncoder(schema).resolveAndBind().createDeserializer()
val internalRows = resultSetToSparkInternalRows(resultSet, schema, inputMetrics)
internalRows.map(fromRow)
}
private[spark] def resultSetToSparkInternalRows(
resultSet: ResultSet,
schema: StructType,
inputMetrics: InputMetrics): Iterator[InternalRow] = {
new NextIterator[InternalRow] {
private[this] val rs = resultSet
private[this] val getters: Array[JDBCValueGetter] = makeGetters(schema)
private[this] val mutableRow = new SpecificInternalRow(schema.fields.map(x => x.dataType))
override protected def close(): Unit = {
try {
rs.close()
} catch {
case e: Exception => logWarning("Exception closing resultset", e)
}
}
override protected def getNext(): InternalRow = {
if (rs.next()) {
inputMetrics.incRecordsRead(1)
var i = 0
while (i < getters.length) {
getters(i).apply(rs, mutableRow, i)
if (rs.wasNull) mutableRow.setNullAt(i)
i = i + 1
}
mutableRow
} else {
finished = true
null.asInstanceOf[InternalRow]
}
}
}
}
// A `JDBCValueGetter` is responsible for getting a value from `ResultSet` into a field
// for `MutableRow`. The last argument `Int` means the index for the value to be set in
// the row and also used for the value in `ResultSet`.
private type JDBCValueGetter = (ResultSet, InternalRow, Int) => Unit
/**
* Creates `JDBCValueGetter`s according to [[StructType]], which can set
* each value from `ResultSet` to each field of [[InternalRow]] correctly.
*/
private def makeGetters(schema: StructType): Array[JDBCValueGetter] =
schema.fields.map(sf => makeGetter(sf.dataType, sf.metadata))
private def makeGetter(dt: DataType, metadata: Metadata): JDBCValueGetter = dt match {
case BooleanType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setBoolean(pos, rs.getBoolean(pos + 1))
case DateType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
// DateTimeUtils.fromJavaDate does not handle null value, so we need to check it.
val dateVal = rs.getDate(pos + 1)
if (dateVal != null) {
row.setInt(pos, DateTimeUtils.fromJavaDate(dateVal))
} else {
row.update(pos, null)
}
// When connecting with Oracle DB through JDBC, the precision and scale of BigDecimal
// object returned by ResultSet.getBigDecimal is not correctly matched to the table
// schema reported by ResultSetMetaData.getPrecision and ResultSetMetaData.getScale.
// If inserting values like 19999 into a column with NUMBER(12, 2) type, you get through
// a BigDecimal object with scale as 0. But the dataframe schema has correct type as
// DecimalType(12, 2). Thus, after saving the dataframe into parquet file and then
// retrieve it, you will get wrong result 199.99.
// So it is needed to set precision and scale for Decimal based on JDBC metadata.
case DecimalType.Fixed(p, s) =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
val decimal =
nullSafeConvert[java.math.BigDecimal](rs.getBigDecimal(pos + 1), d => Decimal(d, p, s))
row.update(pos, decimal)
case DoubleType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setDouble(pos, rs.getDouble(pos + 1))
case FloatType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setFloat(pos, rs.getFloat(pos + 1))
case IntegerType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setInt(pos, rs.getInt(pos + 1))
case LongType if metadata.contains("binarylong") =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
val bytes = rs.getBytes(pos + 1)
var ans = 0L
var j = 0
while (j < bytes.length) {
ans = 256 * ans + (255 & bytes(j))
j = j + 1
}
row.setLong(pos, ans)
case LongType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setLong(pos, rs.getLong(pos + 1))
case ShortType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setShort(pos, rs.getShort(pos + 1))
case ByteType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setByte(pos, rs.getByte(pos + 1))
case StringType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
// TODO(davies): use getBytes for better performance, if the encoding is UTF-8
row.update(pos, UTF8String.fromString(rs.getString(pos + 1)))
case TimestampType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
val t = rs.getTimestamp(pos + 1)
if (t != null) {
row.setLong(pos, DateTimeUtils.fromJavaTimestamp(t))
} else {
row.update(pos, null)
}
case BinaryType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.update(pos, rs.getBytes(pos + 1))
case ArrayType(et, _) =>
val elementConversion = et match {
case TimestampType =>
(array: Object) =>
array.asInstanceOf[Array[java.sql.Timestamp]].map { timestamp =>
nullSafeConvert(timestamp, DateTimeUtils.fromJavaTimestamp)
}
case StringType =>
(array: Object) =>
// some underling types are not String such as uuid, inet, cidr, etc.
array.asInstanceOf[Array[java.lang.Object]]
.map(obj => if (obj == null) null else UTF8String.fromString(obj.toString))
case DateType =>
(array: Object) =>
array.asInstanceOf[Array[java.sql.Date]].map { date =>
nullSafeConvert(date, DateTimeUtils.fromJavaDate)
}
case dt: DecimalType =>
(array: Object) =>
array.asInstanceOf[Array[java.math.BigDecimal]].map { decimal =>
nullSafeConvert[java.math.BigDecimal](
decimal, d => Decimal(d, dt.precision, dt.scale))
}
case LongType if metadata.contains("binarylong") =>
throw new IllegalArgumentException(s"Unsupported array element " +
s"type ${dt.catalogString} based on binary")
case ArrayType(_, _) =>
throw new IllegalArgumentException("Nested arrays unsupported")
case _ => (array: Object) => array.asInstanceOf[Array[Any]]
}
(rs: ResultSet, row: InternalRow, pos: Int) =>
val array = nullSafeConvert[java.sql.Array](
input = rs.getArray(pos + 1),
array => new GenericArrayData(elementConversion.apply(array.getArray)))
row.update(pos, array)
case _ => throw new IllegalArgumentException(s"Unsupported type ${dt.catalogString}")
}
private def nullSafeConvert[T](input: T, f: T => Any): Any = {
if (input == null) {
null
} else {
f(input)
}
}
// A `JDBCValueSetter` is responsible for setting a value from `Row` into a field for
// `PreparedStatement`. The last argument `Int` means the index for the value to be set
// in the SQL statement and also used for the value in `Row`.
private type JDBCValueSetter = (PreparedStatement, Row, Int) => Unit
private def makeSetter(
conn: Connection,
dialect: JdbcDialect,
dataType: DataType): JDBCValueSetter = dataType match {
case IntegerType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setInt(pos + 1, row.getInt(pos))
case LongType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setLong(pos + 1, row.getLong(pos))
case DoubleType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setDouble(pos + 1, row.getDouble(pos))
case FloatType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setFloat(pos + 1, row.getFloat(pos))
case ShortType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setInt(pos + 1, row.getShort(pos))
case ByteType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setInt(pos + 1, row.getByte(pos))
case BooleanType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setBoolean(pos + 1, row.getBoolean(pos))
case StringType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setString(pos + 1, row.getString(pos))
case BinaryType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setBytes(pos + 1, row.getAs[Array[Byte]](pos))
case TimestampType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setTimestamp(pos + 1, row.getAs[java.sql.Timestamp](pos))
case DateType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setDate(pos + 1, row.getAs[java.sql.Date](pos))
case t: DecimalType =>
(stmt: PreparedStatement, row: Row, pos: Int) =>
stmt.setBigDecimal(pos + 1, row.getDecimal(pos))
case ArrayType(et, _) =>
// remove type length parameters from end of type name
val typeName = getJdbcType(et, dialect).databaseTypeDefinition
.toLowerCase(Locale.ROOT).split("\\\\(")(0)
(stmt: PreparedStatement, row: Row, pos: Int) =>
val array = conn.createArrayOf(
typeName,
row.getSeq[AnyRef](pos).toArray)
stmt.setArray(pos + 1, array)
case _ =>
(_: PreparedStatement, _: Row, pos: Int) =>
throw new IllegalArgumentException(
s"Can't translate non-null value for field $pos")
}
/**
* Saves a partition of a DataFrame to the JDBC database. This is done in
* a single database transaction (unless isolation level is "NONE")
* in order to avoid repeatedly inserting data as much as possible.
*
* It is still theoretically possible for rows in a DataFrame to be
* inserted into the database more than once if a stage somehow fails after
* the commit occurs but before the stage can return successfully.
*
* This is not a closure inside saveTable() because apparently cosmetic
* implementation changes elsewhere might easily render such a closure
* non-Serializable. Instead, we explicitly close over all variables that
* are used.
*
* Note that this method records task output metrics. It assumes the method is
* running in a task. For now, we only records the number of rows being written
* because there's no good way to measure the total bytes being written. Only
* effective outputs are taken into account: for example, metric will not be updated
* if it supports transaction and transaction is rolled back, but metric will be
* updated even with error if it doesn't support transaction, as there're dirty outputs.
*/
def savePartition(
getConnection: () => Connection,
table: String,
iterator: Iterator[Row],
rddSchema: StructType,
insertStmt: String,
batchSize: Int,
dialect: JdbcDialect,
isolationLevel: Int,
options: JDBCOptions): Unit = {
val outMetrics = TaskContext.get().taskMetrics().outputMetrics
val conn = getConnection()
var committed = false
var finalIsolationLevel = Connection.TRANSACTION_NONE
if (isolationLevel != Connection.TRANSACTION_NONE) {
try {
val metadata = conn.getMetaData
if (metadata.supportsTransactions()) {
// Update to at least use the default isolation, if any transaction level
// has been chosen and transactions are supported
val defaultIsolation = metadata.getDefaultTransactionIsolation
finalIsolationLevel = defaultIsolation
if (metadata.supportsTransactionIsolationLevel(isolationLevel)) {
// Finally update to actually requested level if possible
finalIsolationLevel = isolationLevel
} else {
logWarning(s"Requested isolation level $isolationLevel is not supported; " +
s"falling back to default isolation level $defaultIsolation")
}
} else {
logWarning(s"Requested isolation level $isolationLevel, but transactions are unsupported")
}
} catch {
case NonFatal(e) => logWarning("Exception while detecting transaction support", e)
}
}
val supportsTransactions = finalIsolationLevel != Connection.TRANSACTION_NONE
var totalRowCount = 0L
try {
if (supportsTransactions) {
conn.setAutoCommit(false) // Everything in the same db transaction.
conn.setTransactionIsolation(finalIsolationLevel)
}
val stmt = conn.prepareStatement(insertStmt)
val setters = rddSchema.fields.map(f => makeSetter(conn, dialect, f.dataType))
val nullTypes = rddSchema.fields.map(f => getJdbcType(f.dataType, dialect).jdbcNullType)
val numFields = rddSchema.fields.length
try {
var rowCount = 0
stmt.setQueryTimeout(options.queryTimeout)
while (iterator.hasNext) {
val row = iterator.next()
var i = 0
while (i < numFields) {
if (row.isNullAt(i)) {
stmt.setNull(i + 1, nullTypes(i))
} else {
setters(i).apply(stmt, row, i)
}
i = i + 1
}
stmt.addBatch()
rowCount += 1
totalRowCount += 1
if (rowCount % batchSize == 0) {
stmt.executeBatch()
rowCount = 0
}
}
if (rowCount > 0) {
stmt.executeBatch()
}
} finally {
stmt.close()
}
if (supportsTransactions) {
conn.commit()
}
committed = true
} catch {
case e: SQLException =>
val cause = e.getNextException
if (cause != null && e.getCause != cause) {
// If there is no cause already, set 'next exception' as cause. If cause is null,
// it *may* be because no cause was set yet
if (e.getCause == null) {
try {
e.initCause(cause)
} catch {
// Or it may be null because the cause *was* explicitly initialized, to *null*,
// in which case this fails. There is no other way to detect it.
// addSuppressed in this case as well.
case _: IllegalStateException => e.addSuppressed(cause)
}
} else {
e.addSuppressed(cause)
}
}
throw e
} finally {
if (!committed) {
// The stage must fail. We got here through an exception path, so
// let the exception through unless rollback() or close() want to
// tell the user about another problem.
if (supportsTransactions) {
conn.rollback()
} else {
outMetrics.setRecordsWritten(totalRowCount)
}
conn.close()
} else {
outMetrics.setRecordsWritten(totalRowCount)
// The stage must succeed. We cannot propagate any exception close() might throw.
try {
conn.close()
} catch {
case e: Exception => logWarning("Transaction succeeded, but closing failed", e)
}
}
}
}
/**
* Compute the schema string for this RDD.
*/
def schemaString(
schema: StructType,
caseSensitive: Boolean,
url: String,
createTableColumnTypes: Option[String] = None): String = {
val sb = new StringBuilder()
val dialect = JdbcDialects.get(url)
val userSpecifiedColTypesMap = createTableColumnTypes
.map(parseUserSpecifiedCreateTableColumnTypes(schema, caseSensitive, _))
.getOrElse(Map.empty[String, String])
schema.fields.foreach { field =>
val name = dialect.quoteIdentifier(field.name)
val typ = userSpecifiedColTypesMap
.getOrElse(field.name, getJdbcType(field.dataType, dialect).databaseTypeDefinition)
val nullable = if (field.nullable) "" else "NOT NULL"
sb.append(s", $name $typ $nullable")
}
if (sb.length < 2) "" else sb.substring(2)
}
/**
* Parses the user specified createTableColumnTypes option value string specified in the same
* format as create table ddl column types, and returns Map of field name and the data type to
* use in-place of the default data type.
*/
private def parseUserSpecifiedCreateTableColumnTypes(
schema: StructType,
caseSensitive: Boolean,
createTableColumnTypes: String): Map[String, String] = {
def typeName(f: StructField): String = {
// char/varchar gets translated to string type. Real data type specified by the user
// is available in the field metadata as HIVE_TYPE_STRING
if (f.metadata.contains(HIVE_TYPE_STRING)) {
f.metadata.getString(HIVE_TYPE_STRING)
} else {
f.dataType.catalogString
}
}
val userSchema = CatalystSqlParser.parseTableSchema(createTableColumnTypes)
val nameEquality = if (caseSensitive) {
org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
} else {
org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
}
// checks duplicate columns in the user specified column types.
SchemaUtils.checkColumnNameDuplication(
userSchema.map(_.name), "in the createTableColumnTypes option value", nameEquality)
// checks if user specified column names exist in the DataFrame schema
userSchema.fieldNames.foreach { col =>
schema.find(f => nameEquality(f.name, col)).getOrElse {
throw new AnalysisException(
s"createTableColumnTypes option column $col not found in schema " +
schema.catalogString)
}
}
val userSchemaMap = userSchema.fields.map(f => f.name -> typeName(f)).toMap
if (caseSensitive) userSchemaMap else CaseInsensitiveMap(userSchemaMap)
}
/**
* Parses the user specified customSchema option value to DataFrame schema, and
* returns a schema that is replaced by the custom schema's dataType if column name is matched.
*/
def getCustomSchema(
tableSchema: StructType,
customSchema: String,
nameEquality: Resolver): StructType = {
if (null != customSchema && customSchema.nonEmpty) {
val userSchema = CatalystSqlParser.parseTableSchema(customSchema)
SchemaUtils.checkSchemaColumnNameDuplication(
userSchema,
"in the customSchema option value",
nameEquality)
// This is resolved by names, use the custom filed dataType to replace the default dataType.
val newSchema = tableSchema.map { col =>
userSchema.find(f => nameEquality(f.name, col.name)) match {
case Some(c) => col.copy(dataType = c.dataType)
case None => col
}
}
StructType(newSchema)
} else {
tableSchema
}
}
/**
* Saves the RDD to the database in a single transaction.
*/
def saveTable(
df: DataFrame,
tableSchema: Option[StructType],
isCaseSensitive: Boolean,
options: JdbcOptionsInWrite): Unit = {
val url = options.url
val table = options.table
val dialect = JdbcDialects.get(url)
val rddSchema = df.schema
val getConnection: () => Connection = createConnectionFactory(options)
val batchSize = options.batchSize
val isolationLevel = options.isolationLevel
val insertStmt = getInsertStatement(table, rddSchema, tableSchema, isCaseSensitive, dialect)
val repartitionedDF = options.numPartitions match {
case Some(n) if n <= 0 => throw new IllegalArgumentException(
s"Invalid value `$n` for parameter `${JDBCOptions.JDBC_NUM_PARTITIONS}` in table writing " +
"via JDBC. The minimum value is 1.")
case Some(n) if n < df.rdd.getNumPartitions => df.coalesce(n)
case _ => df
}
repartitionedDF.rdd.foreachPartition { iterator => savePartition(
getConnection, table, iterator, rddSchema, insertStmt, batchSize, dialect, isolationLevel,
options)
}
}
/**
* Creates a table with a given schema.
*/
def createTable(
conn: Connection,
tableName: String,
schema: StructType,
caseSensitive: Boolean,
options: JdbcOptionsInWrite): Unit = {
val strSchema = schemaString(
schema, caseSensitive, options.url, options.createTableColumnTypes)
val createTableOptions = options.createTableOptions
// Create the table if the table does not exist.
// To allow certain options to append when create a new table, which can be
// table_options or partition_options.
// E.g., "CREATE TABLE t (name string) ENGINE=InnoDB DEFAULT CHARSET=utf8"
val sql = s"CREATE TABLE $tableName ($strSchema) $createTableOptions"
executeStatement(conn, options, sql)
}
/**
* Rename a table from the JDBC database.
*/
def renameTable(
conn: Connection,
oldTable: String,
newTable: String,
options: JDBCOptions): Unit = {
val dialect = JdbcDialects.get(options.url)
executeStatement(conn, options, dialect.renameTable(oldTable, newTable))
}
/**
* Update a table from the JDBC database.
*/
def alterTable(
conn: Connection,
tableName: String,
changes: Seq[TableChange],
options: JDBCOptions): Unit = {
val dialect = JdbcDialects.get(options.url)
if (changes.length == 1) {
executeStatement(conn, options, dialect.alterTable(tableName, changes)(0))
} else {
val metadata = conn.getMetaData
if (!metadata.supportsTransactions) {
throw new SQLFeatureNotSupportedException("The target JDBC server does not support " +
"transaction and can only support ALTER TABLE with a single action.")
} else {
conn.setAutoCommit(false)
val statement = conn.createStatement
try {
statement.setQueryTimeout(options.queryTimeout)
for (sql <- dialect.alterTable(tableName, changes)) {
statement.executeUpdate(sql)
}
conn.commit()
} catch {
case e: Exception =>
if (conn != null) conn.rollback()
throw e
} finally {
statement.close()
conn.setAutoCommit(true)
}
}
}
}
private def executeStatement(conn: Connection, options: JDBCOptions, sql: String): Unit = {
val statement = conn.createStatement
try {
statement.setQueryTimeout(options.queryTimeout)
statement.executeUpdate(sql)
} finally {
statement.close()
}
}
}
| rednaxelafx/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala | Scala | apache-2.0 | 36,209 |
/*
* ParticleBeliefPropagation.scala
* Trait to TBD
*
* Created By: Brian Ruttenberg ([email protected])
* Creation Date: Oct 20, 2014
*
* Copyright 2014 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.experimental.particlebp
import com.cra.figaro.language._
import com.cra.figaro.algorithm.factored.FactoredAlgorithm
import com.cra.figaro.algorithm.factored.factors.{ Factory, DivideableSemiRing, Factor, LogSumProductSemiring, Variable }
import com.cra.figaro.algorithm.lazyfactored.LazyValues
import com.cra.figaro.algorithm.OneTime
import com.cra.figaro.algorithm.Anytime
import com.cra.figaro.algorithm.ProbQueryAlgorithm
import com.cra.figaro.algorithm.OneTimeProbQuery
import scala.collection.immutable.Set
import scala.collection.mutable.Map
import com.cra.figaro.algorithm.factored.beliefpropagation.InnerBPHandler
import com.cra.figaro.algorithm.factored.beliefpropagation.OneTimeInnerBPHandler
import com.cra.figaro.algorithm.factored.beliefpropagation.VariableNode
import com.cra.figaro.algorithm.factored.ParticleGenerator
import com.cra.figaro.algorithm.factored.DensityEstimator
import com.cra.figaro.algorithm.AnytimeProbQuery
import com.cra.figaro.algorithm.factored.beliefpropagation.AnytimeInnerBPHandler
import com.cra.figaro.algorithm.factored.beliefpropagation.FactorNode
import com.cra.figaro.algorithm.factored.beliefpropagation.Node
import breeze.linalg.normalize
import com.cra.figaro.algorithm.UnsupportedAlgorithmException
import com.cra.figaro.algorithm.sampling.ProbEvidenceSampler
/**
* Trait for performing particle belief propagation.
*
* Only supports Double factors at the moment (i.e., no support for utilities or sufficient statistics)
*/
trait ParticleBeliefPropagation extends FactoredAlgorithm[Double] with InnerBPHandler {
/**
* By default, implementations that inherit this trait have no debug information.
* Override this if you want a debugging option.
*/
val debug: Boolean = false
/**
* The universe on which this belief propagation algorithm should be applied.
*/
val universe: Universe
/**
* Target elements that should not be eliminated but should be available for querying.
*/
val targetElements: List[Element[_]]
/**
* Since BP uses division to compute messages, the semiring has to have a division function defined
*/
override val semiring: DivideableSemiRing[Double]
/**
* The density estimator that will estimate the density of a particle. used for resampling.
*/
val densityEstimator: DensityEstimator
/**
* A particle generator to generate particles and do resampling.
*/
val pbpSampler: ParticleGenerator
/**
* Elements towards which queries are directed. By default, these are the target elements.
*/
def starterElements: List[Element[_]] = targetElements
/**
* A list of universes that depend on this universe such that evidence on those universes should be taken into
* account in this universe.
*/
val dependentUniverses: List[(Universe, List[NamedEvidence[_]])]
/**
* The algorithm to compute probability of specified evidence in a dependent universe.
* We use () => Double to represent this algorithm instead of an instance of ProbEvidenceAlgorithm.
* Typical usage is to return the result of ProbEvidenceAlgorithm.computeProbEvidence when invoked.
*/
val dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double
/*
* Runs the inner loop of PBP.
*
*/
private[figaro] def runInnerLoop(elemsWithPosteriors: Set[Element[_]], dependentElems: Set[Element[_]]) = {
currentUniverse = universe
// Remove factors on all elements that can possibly change during resampluing
dependentElems.foreach(Factory.removeFactors(_))
// Clear the variable and values caches
Variable.clearCache
LazyValues.clear(universe)
// Create BP.
createBP(targetElements, dependentUniverses, dependentAlgorithm)
// run BP
runBP()
}
/*
* The resample function. All sampled elements are resampled. For each element that is resampled,
* we record the dependent elements on those elemens since that portion the factor graph will
* have to be removed (since resampling can change the structure).
*/
private[figaro] def resample(): (Set[Element[_]], Set[Element[_]]) = {
val needsToBeResampled = pbpSampler.sampledElements.filter(e => bp.factorGraph.contains(VariableNode(Variable(e))))
val dependentElems = needsToBeResampled.flatMap { elem =>
elem match {
case a: Atomic[_] =>
case _ => throw new UnsupportedAlgorithmException(elem)
}
// get the beliefs for the element as computed by BP
val oldBeliefs = bp.getBeliefsForElement(elem)
// get the last messages to the node that will be used to compute the density of new samples
val factors = getLastMessagesToNode(elem)
val factorBeliefs = factors.map(bp.factorToBeliefs(_))
// estimate the bandwidth of the proposal using the old belieds
val bw = proposalEstimator(oldBeliefs)
// generate new samples
val newSamples = pbpSampler.resample(elem, oldBeliefs, factorBeliefs, bw)
// return the set of dependent elements (and the element itself) that factors will need ot be wipted
universe.usedBy(elem) + elem
}
(needsToBeResampled, dependentElems)
}
/* For purposes of resampling, we want to find the belief of the element WITHOUT
* the original factor. That is, we will incorporate that information using the exact
* density of the element, we don't need to estimate it from a factor.
*
* So this function will return all of the last messages to the element node and divide out
* the original factor
*
*/
private[figaro] def getLastMessagesToNode(elem: Element[_]): List[Factor[Double]] = {
// find the node in the graph corresponding to the element
val elemNode = bp.findNodeForElement(elem)
val neighbors = bp.factorGraph.getNeighbors(elemNode).toList
// get the last messages sent to the node
val lastMessages = neighbors.map(n => (n, bp.factorGraph.getLastMessage(n, elemNode)))
// find the single variable factor for this node (
val singleFactorIndex = lastMessages.indexWhere(e => e._1.asInstanceOf[FactorNode].variables.size == 1)
val singleFactor = if (singleFactorIndex >= 0) lastMessages(singleFactorIndex)
else throw new UnsupportedAlgorithmException(elem)
// Get the original factor for this element
val originalFactor = Factory.makeNonConstraintFactors(elem)
if (originalFactor.size > 1) throw new UnsupportedAlgorithmException(elem)
// Take the single factor, and divide out the original factor. We do this since the single factor in the graph
// can have evidence multiplied in, so we only want to remove the original factor for it. We will use the original
// density instead of the factor to estimate densities during resampling
val factors = lastMessages.patch(singleFactorIndex, Nil, 1).map(_._2) :+ singleFactor._2.combination(bp.makeLogarithmic(originalFactor(0)), bp.semiring.divide)
factors
}
/*
* Runs the outer loop of PBP.
*/
private[figaro] def runOuterLoop() = {
val (needsToBeResampled, dependentElems): (Set[Element[_]], Set[Element[_]]) = if (bp != null) resample() else (Set(), Set())
val elemsWithPosteriors: Set[Element[_]] = if (bp != null) bp.neededElements.toSet -- dependentElems -- needsToBeResampled else Set()
runInnerLoop(elemsWithPosteriors, dependentElems)
}
/*
* Estimates the proposal distribution using the variance of the samples
*/
private def proposalEstimator(beliefs: List[(Double, _)]): Double = {
val percentOfStd = .1
beliefs.head._2 match {
case i: Int => 1.0
case d: Double => {
val bd = beliefs.asInstanceOf[List[(Double, Double)]]
val mean = (0.0 /: bd)((c: Double, n: (Double, Double)) => c + n._1 * n._2)
val std = math.sqrt((0.0 /: bd)((c: Double, n: (Double, Double)) => c + (n._2 - mean) * (n._2 - mean) * n._1))
std * .1
}
}
}
/**
* Runs this particle belief propagation algorithm for one iteration. An iteration here is
* one iteration of the outer loop. This means that the inner BP loop may run several iterations.
*/
def runStep() {
runOuterLoop()
}
}
/**
* Trait for One time PBP algorithms
*/
trait OneTimeParticleBeliefPropagation extends ParticleBeliefPropagation with OneTime with OneTimeInnerBPHandler {
val outerIterations: Int
def run() = {
for { i <- 1 to outerIterations } { runStep() }
}
}
/**
* Trait for Anytime PBP algorithms
*/
trait AnytimeParticleBeliefPropagation extends ParticleBeliefPropagation with Anytime with AnytimeInnerBPHandler {
override def cleanUp() = if (bp != null) bp.kill
}
/**
* Class to implement a probability query BP algorithm
*/
abstract class ProbQueryParticleBeliefPropagation(numArgSamples: Int, numTotalSamples: Int,
override val universe: Universe, targets: Element[_]*)(
val dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
val dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double,
depth: Int = Int.MaxValue, upperBounds: Boolean = false)
extends ProbQueryAlgorithm
with ParticleBeliefPropagation { //with ProbEvidenceBeliefPropagation {
val targetElements = targets.toList
val queryTargets = targetElements
val semiring = LogSumProductSemiring()
val densityEstimator = new AutomaticDensityEstimator
val pbpSampler = ParticleGenerator(universe, densityEstimator, numArgSamples, numTotalSamples)
/**
* Getting factors for PBP returns an empty list, since all of the factor creation is handled inside of
* the BP instances
*/
def getFactors(neededElements: List[Element[_]],
targetElements: List[Element[_]], upperBounds: Boolean = false): List[Factor[Double]] = List()
def computeDistribution[T](target: Element[T]): Stream[(Double, T)] = bp.getBeliefsForElement(target).toStream
def computeExpectation[T](target: Element[T], function: T => Double): Double = {
computeDistribution(target).map((pair: (Double, T)) => pair._1 * function(pair._2)).sum
}
}
object ParticleBeliefPropagation {
/**
* Creates a One Time belief propagation computer in the current default universe.
*/
def apply(myOuterIterations: Int, myInnerIterations: Int, targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(ParticleGenerator.defaultArgSamples, ParticleGenerator.defaultTotalSamples,
universe, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeParticleBeliefPropagation with OneTimeProbQuery {
val outerIterations = myOuterIterations
val innerIterations = myInnerIterations
}
/**
* Creates a One Time belief propagation computer in the current default universe. Use the dependent universe and algorithm to compute prob of evidence in dependent universe
*/
def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, myOuterIterations: Int, myInnerIterations: Int, targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(ParticleGenerator.defaultArgSamples, ParticleGenerator.defaultTotalSamples,
universe, targets: _*)(dependentUniverses, dependentAlgorithm) with OneTimeParticleBeliefPropagation with OneTimeProbQuery {
val outerIterations = myOuterIterations
val innerIterations = myInnerIterations
}
/**
* Creates a One Time belief propagation computer in the current default universe that specifies the number of samples to take for each element.
*/
def apply(myOuterIterations: Int, myInnerIterations: Int, argSamples: Int, totalSamples: Int,
targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(argSamples, totalSamples, universe, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeParticleBeliefPropagation with OneTimeProbQuery {
val outerIterations = myOuterIterations
val innerIterations = myInnerIterations
}
/**
* Creates a One Time belief propagation computer in the current default universe that specifies the number of samples to take for each element.
* Use the dependent universe and algorithm to compute prob of evidence in dependent universe
*/
def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, myOuterIterations: Int, myInnerIterations: Int, argSamples: Int, totalSamples: Int,
targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(argSamples, totalSamples, universe, targets: _*)(dependentUniverses, dependentAlgorithm) with OneTimeParticleBeliefPropagation with OneTimeProbQuery {
val outerIterations = myOuterIterations
val innerIterations = myInnerIterations
}
/**
* Creates a Anytime belief propagation computer in the current default universe.
*/
def apply(stepTimeMillis: Long, targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(ParticleGenerator.defaultArgSamples, ParticleGenerator.defaultTotalSamples,
universe, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with AnytimeParticleBeliefPropagation with AnytimeProbQuery {
val myStepTimeMillis = stepTimeMillis
}
/**
* Creates a Anytime belief propagation computer in the current default universe. Use the dependent universe and algorithm to compute prob of evidence in dependent universe
*/
def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, stepTimeMillis: Long, targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(ParticleGenerator.defaultArgSamples, ParticleGenerator.defaultTotalSamples,
universe, targets: _*)(dependentUniverses, dependentAlgorithm) with AnytimeParticleBeliefPropagation with AnytimeProbQuery {
val myStepTimeMillis = stepTimeMillis
}
/**
* Creates a Anytime belief propagation computer in the current default universe that specifies the number of samples to take for each element.
*/
def apply(stepTimeMillis: Long, argSamples: Int, totalSamples: Int, targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(argSamples, totalSamples, universe, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with AnytimeParticleBeliefPropagation with AnytimeProbQuery {
val myStepTimeMillis = stepTimeMillis
}
/**
* Creates a Anytime belief propagation computer in the current default universe that specifies the number of samples to take for each element.
* Use the dependent universe and algorithm to compute prob of evidence in dependent universe
*/
def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, stepTimeMillis: Long, argSamples: Int, totalSamples: Int, targets: Element[_]*)(implicit universe: Universe) =
new ProbQueryParticleBeliefPropagation(argSamples, totalSamples, universe, targets: _*)(dependentUniverses, dependentAlgorithm) with AnytimeParticleBeliefPropagation with AnytimeProbQuery {
val myStepTimeMillis = stepTimeMillis
}
}
| agarbuno/figaro | Figaro/src/main/scala/com/cra/figaro/experimental/particlebp/ParticleBeliefPropagation.scala | Scala | bsd-3-clause | 16,059 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.Random
import scala.collection.Map
import scala.collection.JavaConversions.mapAsScalaMap
import scala.collection.mutable.ArrayBuffer
import scala.reflect.{classTag, ClassTag}
import org.apache.hadoop.io.BytesWritable
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.TextOutputFormat
import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap}
import com.clearspring.analytics.stream.cardinality.HyperLogLog
import org.apache.spark.Partitioner._
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.partial.BoundedDouble
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.partial.GroupedCountEvaluator
import org.apache.spark.partial.PartialResult
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.{Utils, BoundedPriorityQueue, SerializableHyperLogLog}
import org.apache.spark.SparkContext._
import org.apache.spark._
/**
* A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable,
* partitioned collection of elements that can be operated on in parallel. This class contains the
* basic operations available on all RDDs, such as `map`, `filter`, and `persist`. In addition,
* [[org.apache.spark.rdd.PairRDDFunctions]] contains operations available only on RDDs of key-value
* pairs, such as `groupByKey` and `join`;
* [[org.apache.spark.rdd.DoubleRDDFunctions]] contains operations available only on RDDs of
* Doubles; and
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that
* can be saved as SequenceFiles.
* These operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
* through implicit conversions when you `import org.apache.spark.SparkContext._`.
*
* Internally, each RDD is characterized by five main properties:
*
* - A list of partitions
* - A function for computing each split
* - A list of dependencies on other RDDs
* - Optionally, a Partitioner for key-value RDDs (e.g. to say that the RDD is hash-partitioned)
* - Optionally, a list of preferred locations to compute each split on (e.g. block locations for
* an HDFS file)
*
* All of the scheduling and execution in Spark is done based on these methods, allowing each RDD
* to implement its own way of computing itself. Indeed, users can implement custom RDDs (e.g. for
* reading data from a new storage system) by overriding these functions. Please refer to the
* [[http://www.cs.berkeley.edu/~matei/papers/2012/nsdi_spark.pdf Spark paper]] for more details
* on RDD internals.
*/
abstract class RDD[T: ClassTag](
@transient private var sc: SparkContext,
@transient private var deps: Seq[Dependency[_]]
) extends Serializable with Logging {
/** Construct an RDD with just a one-to-one dependency on one parent */
def this(@transient oneParent: RDD[_]) =
this(oneParent.context , List(new OneToOneDependency(oneParent)))
private[spark] def conf = sc.conf
// =======================================================================
// Methods that should be implemented by subclasses of RDD
// =======================================================================
/** Implemented by subclasses to compute a given partition. */
def compute(split: Partition, context: TaskContext): Iterator[T]
/**
* Implemented by subclasses to return the set of partitions in this RDD. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getPartitions: Array[Partition]
/**
* Implemented by subclasses to return how this RDD depends on parent RDDs. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getDependencies: Seq[Dependency[_]] = deps
/** Optionally overridden by subclasses to specify placement preferences. */
protected def getPreferredLocations(split: Partition): Seq[String] = Nil
/** Optionally overridden by subclasses to specify how they are partitioned. */
@transient val partitioner: Option[Partitioner] = None
// =======================================================================
// Methods and fields available on all RDDs
// =======================================================================
/** The SparkContext that created this RDD. */
def sparkContext: SparkContext = sc
/** A unique ID for this RDD (within its SparkContext). */
val id: Int = sc.newRddId()
/** A friendly name for this RDD */
@transient var name: String = null
/** Assign a name to this RDD */
def setName(_name: String) = {
name = _name
this
}
/** User-defined generator of this RDD*/
@transient var generator = Utils.getCallSiteInfo.firstUserClass
/** Reset generator*/
def setGenerator(_generator: String) = {
generator = _generator
}
/**
* Set this RDD's storage level to persist its values across operations after the first time
* it is computed. This can only be used to assign a new storage level if the RDD does not
* have a storage level set yet..
*/
def persist(newLevel: StorageLevel): RDD[T] = {
// TODO: Handle changes of StorageLevel
if (storageLevel != StorageLevel.NONE && newLevel != storageLevel) {
throw new UnsupportedOperationException(
"Cannot change storage level of an RDD after it was already assigned a level")
}
storageLevel = newLevel
// Register the RDD with the SparkContext
sc.persistentRdds(id) = this
this
}
/** Persist this RDD with the default storage level (`MEMORY_ONLY`). */
def persist(): RDD[T] = persist(StorageLevel.MEMORY_ONLY)
/** Persist this RDD with the default storage level (`MEMORY_ONLY`). */
def cache(): RDD[T] = persist()
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
* @return This RDD.
*/
def unpersist(blocking: Boolean = true): RDD[T] = {
logInfo("Removing RDD " + id + " from persistence list")
sc.env.blockManager.master.removeRdd(id, blocking)
sc.persistentRdds.remove(id)
storageLevel = StorageLevel.NONE
this
}
/** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */
def getStorageLevel = storageLevel
// Our dependencies and partitions will be gotten by calling subclass's methods below, and will
// be overwritten when we're checkpointed
private var dependencies_ : Seq[Dependency[_]] = null
@transient private var partitions_ : Array[Partition] = null
/** An Option holding our checkpoint RDD, if we are checkpointed */
private def checkpointRDD: Option[RDD[T]] = checkpointData.flatMap(_.checkpointRDD)
/**
* Get the list of dependencies of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def dependencies: Seq[Dependency[_]] = {
checkpointRDD.map(r => List(new OneToOneDependency(r))).getOrElse {
if (dependencies_ == null) {
dependencies_ = getDependencies
}
dependencies_
}
}
/**
* Get the array of partitions of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def partitions: Array[Partition] = {
checkpointRDD.map(_.partitions).getOrElse {
if (partitions_ == null) {
partitions_ = getPartitions
}
partitions_
}
}
/**
* Get the preferred locations of a partition (as hostnames), taking into account whether the
* RDD is checkpointed.
*/
final def preferredLocations(split: Partition): Seq[String] = {
checkpointRDD.map(_.getPreferredLocations(split)).getOrElse {
getPreferredLocations(split)
}
}
/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* subclasses of RDD.
*/
final def iterator(split: Partition, context: TaskContext): Iterator[T] = {
if (storageLevel != StorageLevel.NONE) {
SparkEnv.get.cacheManager.getOrCompute(this, split, context, storageLevel)
} else {
computeOrReadCheckpoint(split, context)
}
}
/**
* Compute an RDD partition or read it from a checkpoint if the RDD is checkpointing.
*/
private[spark] def computeOrReadCheckpoint(split: Partition, context: TaskContext): Iterator[T] =
{
if (isCheckpointed) firstParent[T].iterator(split, context) else compute(split, context)
}
// Transformations (return a new RDD)
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[U: ClassTag](f: T => U): RDD[U] = new MappedRDD(this, sc.clean(f))
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[U: ClassTag](f: T => TraversableOnce[U]): RDD[U] =
new FlatMappedRDD(this, sc.clean(f))
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: T => Boolean): RDD[T] = new FilteredRDD(this, sc.clean(f))
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(numPartitions: Int): RDD[T] =
map(x => (x, null)).reduceByKey((x, y) => x, numPartitions).map(_._1)
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(): RDD[T] = distinct(partitions.size)
/**
* Return a new RDD that has exactly numPartitions partitions.
*
* Can increase or decrease the level of parallelism in this RDD. Internally, this uses
* a shuffle to redistribute data.
*
* If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
* which can avoid performing a shuffle.
*/
def repartition(numPartitions: Int): RDD[T] = {
coalesce(numPartitions, shuffle = true)
}
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*
* This results in a narrow dependency, e.g. if you go from 1000 partitions
* to 100 partitions, there will not be a shuffle, instead each of the 100
* new partitions will claim 10 of the current partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can pass shuffle = true. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* Note: With shuffle = true, you can actually coalesce to a larger number
* of partitions. This is useful if you have a small number of partitions,
* say 100, potentially with a few partitions being abnormally large. Calling
* coalesce(1000, shuffle = true) will result in 1000 partitions with the
* data distributed using a hash partitioner.
*/
def coalesce(numPartitions: Int, shuffle: Boolean = false): RDD[T] = {
if (shuffle) {
// include a shuffle step so that our upstream tasks are still distributed
new CoalescedRDD(
new ShuffledRDD[T, Null, (T, Null)](map(x => (x, null)),
new HashPartitioner(numPartitions)),
numPartitions).keys
} else {
new CoalescedRDD(this, numPartitions)
}
}
/**
* Return a sampled subset of this RDD.
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Int): RDD[T] = {
require(fraction >= 0.0, "Invalid fraction value: " + fraction)
new SampledRDD(this, withReplacement, fraction, seed)
}
def takeSample(withReplacement: Boolean, num: Int, seed: Int): Array[T] = {
var fraction = 0.0
var total = 0
val multiplier = 3.0
val initialCount = this.count()
var maxSelected = 0
if (num < 0) {
throw new IllegalArgumentException("Negative number of elements requested")
}
if (initialCount == 0) {
return new Array[T](0)
}
if (initialCount > Integer.MAX_VALUE - 1) {
maxSelected = Integer.MAX_VALUE - 1
} else {
maxSelected = initialCount.toInt
}
if (num > initialCount && !withReplacement) {
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
} else {
fraction = multiplier * (num + 1) / initialCount
total = num
}
val rand = new Random(seed)
var samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
// If the first sample didn't turn out large enough, keep trying to take samples;
// this shouldn't happen often because we use a big multiplier for the initial size
while (samples.length < total) {
samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
}
Utils.randomizeInPlace(samples, rand).take(total)
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def union(other: RDD[T]): RDD[T] = new UnionRDD(sc, Array(this, other))
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def ++(other: RDD[T]): RDD[T] = this.union(other)
/**
* Return an RDD created by coalescing all elements within each partition into an array.
*/
def glom(): RDD[Array[T]] = new GlommedRDD(this)
/**
* Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of
* elements (a, b) where a is in `this` and b is in `other`.
*/
def cartesian[U: ClassTag](other: RDD[U]): RDD[(T, U)] = new CartesianRDD(sc, this, other)
/**
* Return an RDD of grouped items.
*/
def groupBy[K: ClassTag](f: T => K): RDD[(K, Seq[T])] =
groupBy[K](f, defaultPartitioner(this))
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key.
*/
def groupBy[K: ClassTag](f: T => K, numPartitions: Int): RDD[(K, Seq[T])] =
groupBy(f, new HashPartitioner(numPartitions))
/**
* Return an RDD of grouped items.
*/
def groupBy[K: ClassTag](f: T => K, p: Partitioner): RDD[(K, Seq[T])] = {
val cleanF = sc.clean(f)
this.map(t => (cleanF(t), t)).groupByKey(p)
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String): RDD[String] = new PipedRDD(this, command)
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String, env: Map[String, String]): RDD[String] =
new PipedRDD(this, command, env)
/**
* Return an RDD created by piping elements to a forked external process.
* The print behavior can be customized by providing two functions.
*
* @param command command to run in forked process.
* @param env environment variables to set.
* @param printPipeContext Before piping elements, this function is called as an oppotunity
* to pipe context data. Print line function (like out.println) will be
* passed as printPipeContext's parameter.
* @param printRDDElement Use this function to customize how to pipe elements. This function
* will be called with each RDD element as the 1st parameter, and the
* print line function (like out.println()) as the 2nd parameter.
* An example of pipe the RDD data of groupBy() in a streaming way,
* instead of constructing a huge String to concat all the elements:
* def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
* for (e <- record._2){f(e)}
* @return the result RDD
*/
def pipe(
command: Seq[String],
env: Map[String, String] = Map(),
printPipeContext: (String => Unit) => Unit = null,
printRDDElement: (T, String => Unit) => Unit = null): RDD[String] = {
new PipedRDD(this, command, env,
if (printPipeContext ne null) sc.clean(printPipeContext) else null,
if (printRDDElement ne null) sc.clean(printRDDElement) else null)
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions[U: ClassTag](
f: Iterator[T] => Iterator[U], preservesPartitioning: Boolean = false): RDD[U] = {
val func = (context: TaskContext, index: Int, iter: Iterator[T]) => f(iter)
new MapPartitionsRDD(this, sc.clean(func), preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*/
def mapPartitionsWithIndex[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U], preservesPartitioning: Boolean = false): RDD[U] = {
val func = (context: TaskContext, index: Int, iter: Iterator[T]) => f(index, iter)
new MapPartitionsRDD(this, sc.clean(func), preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD. This is a variant of
* mapPartitions that also passes the TaskContext into the closure.
*/
def mapPartitionsWithContext[U: ClassTag](
f: (TaskContext, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = {
val func = (context: TaskContext, index: Int, iter: Iterator[T]) => f(context, iter)
new MapPartitionsRDD(this, sc.clean(func), preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*/
@deprecated("use mapPartitionsWithIndex", "0.7.0")
def mapPartitionsWithSplit[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U], preservesPartitioning: Boolean = false): RDD[U] = {
mapPartitionsWithIndex(f, preservesPartitioning)
}
/**
* Maps f over this RDD, where f takes an additional parameter of type A. This
* additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def mapWith[A: ClassTag, U: ClassTag]
(constructA: Int => A, preservesPartitioning: Boolean = false)
(f: (T, A) => U): RDD[U] = {
mapPartitionsWithIndex((index, iter) => {
val a = constructA(index)
iter.map(t => f(t, a))
}, preservesPartitioning)
}
/**
* FlatMaps f over this RDD, where f takes an additional parameter of type A. This
* additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def flatMapWith[A: ClassTag, U: ClassTag]
(constructA: Int => A, preservesPartitioning: Boolean = false)
(f: (T, A) => Seq[U]): RDD[U] = {
mapPartitionsWithIndex((index, iter) => {
val a = constructA(index)
iter.flatMap(t => f(t, a))
}, preservesPartitioning)
}
/**
* Applies f to each element of this RDD, where f takes an additional parameter of type A.
* This additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def foreachWith[A: ClassTag](constructA: Int => A)(f: (T, A) => Unit) {
mapPartitionsWithIndex { (index, iter) =>
val a = constructA(index)
iter.map(t => {f(t, a); t})
}.foreach(_ => {})
}
/**
* Filters this RDD with p, where p takes an additional parameter of type A. This
* additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def filterWith[A: ClassTag](constructA: Int => A)(p: (T, A) => Boolean): RDD[T] = {
mapPartitionsWithIndex((index, iter) => {
val a = constructA(index)
iter.filter(t => p(t, a))
}, preservesPartitioning = true)
}
/**
* Zips this RDD with another one, returning key-value pairs with the first element in each RDD,
* second element in each RDD, etc. Assumes that the two RDDs have the *same number of
* partitions* and the *same number of elements in each partition* (e.g. one was made through
* a map on the other).
*/
def zip[U: ClassTag](other: RDD[U]): RDD[(T, U)] = new ZippedRDD(sc, this, other)
/**
* Zip this RDD's partitions with one (or more) RDD(s) and return a new RDD by
* applying a function to the zipped partitions. Assumes that all the RDDs have the
* *same number of partitions*, but does *not* require them to have the same number
* of elements in each partition.
*/
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] =
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2, preservesPartitioning)
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B])
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] =
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2, false)
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] =
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3, preservesPartitioning)
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C])
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] =
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3, false)
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] =
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4, preservesPartitioning)
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] =
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4, false)
// Actions (launch a job to return a value to the user program)
/**
* Applies a function f to all elements of this RDD.
*/
def foreach(f: T => Unit) {
sc.runJob(this, (iter: Iterator[T]) => iter.foreach(f))
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: Iterator[T] => Unit) {
sc.runJob(this, (iter: Iterator[T]) => f(iter))
}
/**
* Return an array that contains all of the elements in this RDD.
*/
def collect(): Array[T] = {
val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
Array.concat(results: _*)
}
/**
* Return an array that contains all of the elements in this RDD.
*/
def toArray(): Array[T] = collect()
/**
* Return an RDD that contains all matching values by applying `f`.
*/
def collect[U: ClassTag](f: PartialFunction[T, U]): RDD[U] = {
filter(f.isDefinedAt).map(f)
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtract(other: RDD[T]): RDD[T] =
subtract(other, partitioner.getOrElse(new HashPartitioner(partitions.size)))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], numPartitions: Int): RDD[T] =
subtract(other, new HashPartitioner(numPartitions))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], p: Partitioner): RDD[T] = {
if (partitioner == Some(p)) {
// Our partitioner knows how to handle T (which, since we have a partitioner, is
// really (K, V)) so make a new Partitioner that will de-tuple our fake tuples
val p2 = new Partitioner() {
override def numPartitions = p.numPartitions
override def getPartition(k: Any) = p.getPartition(k.asInstanceOf[(Any, _)]._1)
}
// Unfortunately, since we're making a new p2, we'll get ShuffleDependencies
// anyway, and when calling .keys, will not have a partitioner set, even though
// the SubtractedRDD will, thanks to p2's de-tupled partitioning, already be
// partitioned by the right/real keys (e.g. p).
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p2).keys
} else {
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p).keys
}
}
/**
* Reduces the elements of this RDD using the specified commutative and
* associative binary operator.
*/
def reduce(f: (T, T) => T): T = {
val cleanF = sc.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
var jobResult: Option[T] = None
val mergeResult = (index: Int, taskResult: Option[T]) => {
if (taskResult.isDefined) {
jobResult = jobResult match {
case Some(value) => Some(f(value, taskResult.get))
case None => taskResult
}
}
}
sc.runJob(this, reducePartition, mergeResult)
// Get the final result out of our Option, or throw an exception if the RDD was empty
jobResult.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using a
* given associative function and a neutral "zero value". The function op(t1, t2) is allowed to
* modify t1 and return it as its result value to avoid object allocation; however, it should not
* modify t2.
*/
def fold(zeroValue: T)(op: (T, T) => T): T = {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanOp = sc.clean(op)
val foldPartition = (iter: Iterator[T]) => iter.fold(zeroValue)(cleanOp)
val mergeResult = (index: Int, taskResult: T) => jobResult = op(jobResult, taskResult)
sc.runJob(this, foldPartition, mergeResult)
jobResult
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using
* given combine functions and a neutral "zero value". This function can return a different result
* type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U
* and one operation for merging two U's, as in scala.TraversableOnce. Both of these functions are
* allowed to modify and return their first argument instead of creating a new U to avoid memory
* allocation.
*/
def aggregate[U: ClassTag](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U): U = {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanSeqOp = sc.clean(seqOp)
val cleanCombOp = sc.clean(combOp)
val aggregatePartition = (it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
val mergeResult = (index: Int, taskResult: U) => jobResult = combOp(jobResult, taskResult)
sc.runJob(this, aggregatePartition, mergeResult)
jobResult
}
/**
* Return the number of elements in the RDD.
*/
def count(): Long = {
sc.runJob(this, (iter: Iterator[T]) => {
// Use a while loop to count the number of elements rather than iter.size because
// iter.size uses a for loop, which is slightly slower in current version of Scala.
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}).sum
}
/**
* (Experimental) Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*/
def countApprox(timeout: Long, confidence: Double = 0.95): PartialResult[BoundedDouble] = {
val countElements: (TaskContext, Iterator[T]) => Long = { (ctx, iter) =>
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}
val evaluator = new CountEvaluator(partitions.size, confidence)
sc.runApproximateJob(this, countElements, evaluator, timeout)
}
/**
* Return the count of each unique value in this RDD as a map of (value, count) pairs. The final
* combine step happens locally on the master, equivalent to running a single reduce task.
*/
def countByValue(): Map[T, Long] = {
if (elementClassTag.runtimeClass.isArray) {
throw new SparkException("countByValue() does not support arrays")
}
// TODO: This should perhaps be distributed by default.
def countPartition(iter: Iterator[T]): Iterator[OLMap[T]] = {
val map = new OLMap[T]
while (iter.hasNext) {
val v = iter.next()
map.put(v, map.getLong(v) + 1L)
}
Iterator(map)
}
def mergeMaps(m1: OLMap[T], m2: OLMap[T]): OLMap[T] = {
val iter = m2.object2LongEntrySet.fastIterator()
while (iter.hasNext) {
val entry = iter.next()
m1.put(entry.getKey, m1.getLong(entry.getKey) + entry.getLongValue)
}
m1
}
val myResult = mapPartitions(countPartition).reduce(mergeMaps)
myResult.asInstanceOf[java.util.Map[T, Long]] // Will be wrapped as a Scala mutable Map
}
/**
* (Experimental) Approximate version of countByValue().
*/
def countByValueApprox(
timeout: Long,
confidence: Double = 0.95
): PartialResult[Map[T, BoundedDouble]] = {
if (elementClassTag.runtimeClass.isArray) {
throw new SparkException("countByValueApprox() does not support arrays")
}
val countPartition: (TaskContext, Iterator[T]) => OLMap[T] = { (ctx, iter) =>
val map = new OLMap[T]
while (iter.hasNext) {
val v = iter.next()
map.put(v, map.getLong(v) + 1L)
}
map
}
val evaluator = new GroupedCountEvaluator[T](partitions.size, confidence)
sc.runApproximateJob(this, countPartition, evaluator, timeout)
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The accuracy of approximation can be controlled through the relative standard deviation
* (relativeSD) parameter, which also controls the amount of memory used. Lower values result in
* more accurate counts but increase the memory footprint and vise versa. The default value of
* relativeSD is 0.05.
*/
def countApproxDistinct(relativeSD: Double = 0.05): Long = {
val zeroCounter = new SerializableHyperLogLog(new HyperLogLog(relativeSD))
aggregate(zeroCounter)(_.add(_), _.merge(_)).value.cardinality()
}
/**
* Take the first num elements of the RDD. It works by first scanning one partition, and use the
* results from that partition to estimate the number of additional partitions needed to satisfy
* the limit.
*/
def take(num: Int): Array[T] = {
if (num == 0) {
return new Array[T](0)
}
val buf = new ArrayBuffer[T]
val totalParts = this.partitions.length
var partsScanned = 0
while (buf.size < num && partsScanned < totalParts) {
// The number of partitions to try in this iteration. It is ok for this number to be
// greater than totalParts because we actually cap it at totalParts in runJob.
var numPartsToTry = 1
if (partsScanned > 0) {
// If we didn't find any rows after the first iteration, just try all partitions next.
// Otherwise, interpolate the number of partitions we need to try, but overestimate it
// by 50%.
if (buf.size == 0) {
numPartsToTry = totalParts - 1
} else {
numPartsToTry = (1.5 * num * partsScanned / buf.size).toInt
}
}
numPartsToTry = math.max(0, numPartsToTry) // guard against negative num of partitions
val left = num - buf.size
val p = partsScanned until math.min(partsScanned + numPartsToTry, totalParts)
val res = sc.runJob(this, (it: Iterator[T]) => it.take(left).toArray, p, allowLocal = true)
res.foreach(buf ++= _.take(num - buf.size))
partsScanned += numPartsToTry
}
buf.toArray
}
/**
* Return the first element in this RDD.
*/
def first(): T = take(1) match {
case Array(t) => t
case _ => throw new UnsupportedOperationException("empty collection")
}
/**
* Returns the top K elements from this RDD as defined by
* the specified implicit Ordering[T].
* @param num the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = {
mapPartitions { items =>
val queue = new BoundedPriorityQueue[T](num)
queue ++= items
Iterator.single(queue)
}.reduce { (queue1, queue2) =>
queue1 ++= queue2
queue1
}.toArray.sorted(ord.reverse)
}
/**
* Returns the first K elements from this RDD as defined by
* the specified implicit Ordering[T] and maintains the
* ordering.
* @param num the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T] = top(num)(ord.reverse)
/**
* Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String) {
this.map(x => (NullWritable.get(), new Text(x.toString)))
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path)
}
/**
* Save this RDD as a compressed text file, using string representations of elements.
*/
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]) {
this.map(x => (NullWritable.get(), new Text(x.toString)))
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path, codec)
}
/**
* Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String) {
this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
.map(x => (NullWritable.get(), new BytesWritable(Utils.serialize(x))))
.saveAsSequenceFile(path)
}
/**
* Creates tuples of the elements in this RDD by applying `f`.
*/
def keyBy[K](f: T => K): RDD[(K, T)] = {
map(x => (f(x), x))
}
/** A private method for tests, to look at the contents of each partition */
private[spark] def collectPartitions(): Array[Array[T]] = {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
}
/**
* Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint
* directory set with SparkContext.setCheckpointDir() and all references to its parent
* RDDs will be removed. This function must be called before any job has been
* executed on this RDD. It is strongly recommended that this RDD is persisted in
* memory, otherwise saving it on a file will require recomputation.
*/
def checkpoint() {
if (context.checkpointDir.isEmpty) {
throw new Exception("Checkpoint directory has not been set in the SparkContext")
} else if (checkpointData.isEmpty) {
checkpointData = Some(new RDDCheckpointData(this))
checkpointData.get.markForCheckpoint()
}
}
/**
* Return whether this RDD has been checkpointed or not
*/
def isCheckpointed: Boolean = {
checkpointData.map(_.isCheckpointed).getOrElse(false)
}
/**
* Gets the name of the file to which this RDD was checkpointed
*/
def getCheckpointFile: Option[String] = {
checkpointData.flatMap(_.getCheckpointFile)
}
// =======================================================================
// Other internal methods and fields
// =======================================================================
private var storageLevel: StorageLevel = StorageLevel.NONE
/** Record user function generating this RDD. */
@transient private[spark] val origin = sc.getCallSite()
private[spark] def elementClassTag: ClassTag[T] = classTag[T]
private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None
/** Returns the first parent RDD */
protected[spark] def firstParent[U: ClassTag] = {
dependencies.head.rdd.asInstanceOf[RDD[U]]
}
/** The [[org.apache.spark.SparkContext]] that this RDD was created on. */
def context = sc
// Avoid handling doCheckpoint multiple times to prevent excessive recursion
@transient private var doCheckpointCalled = false
/**
* Performs the checkpointing of this RDD by saving this. It is called by the DAGScheduler
* after a job using this RDD has completed (therefore the RDD has been materialized and
* potentially stored in memory). doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint() {
if (!doCheckpointCalled) {
doCheckpointCalled = true
if (checkpointData.isDefined) {
checkpointData.get.doCheckpoint()
} else {
dependencies.foreach(_.rdd.doCheckpoint())
}
}
}
/**
* Changes the dependencies of this RDD from its original parents to a new RDD (`newRDD`)
* created from the checkpoint file, and forget its old dependencies and partitions.
*/
private[spark] def markCheckpointed(checkpointRDD: RDD[_]) {
clearDependencies()
partitions_ = null
deps = null // Forget the constructor argument for dependencies too
}
/**
* Clears the dependencies of this RDD. This method must ensure that all references
* to the original parent RDDs is removed to enable the parent RDDs to be garbage
* collected. Subclasses of RDD may override this method for implementing their own cleaning
* logic. See [[org.apache.spark.rdd.UnionRDD]] for an example.
*/
protected def clearDependencies() {
dependencies_ = null
}
/** A description of this RDD and its recursive dependencies for debugging. */
def toDebugString: String = {
def debugString(rdd: RDD[_], prefix: String = ""): Seq[String] = {
Seq(prefix + rdd + " (" + rdd.partitions.size + " partitions)") ++
rdd.dependencies.flatMap(d => debugString(d.rdd, prefix + " "))
}
debugString(this).mkString("\\n")
}
override def toString: String = "%s%s[%d] at %s".format(
Option(name).map(_ + " ").getOrElse(""),
getClass.getSimpleName,
id,
origin)
def toJavaRDD() : JavaRDD[T] = {
new JavaRDD(this)(elementClassTag)
}
}
| iiisthu/sparkSdn | core/src/main/scala/org/apache/spark/rdd/RDD.scala | Scala | apache-2.0 | 39,761 |
// Copyright 2017 EPFL DATA Lab (data.epfl.ch)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package squid
package feature
class NestedQuoting extends MyFunSuite {
import TestDSL.Predef._
test("Simple Nesting") {
assert(code"42.toString * 2" =~= code"42.toString * 2")
assert(code"42.toString * 2" =~= code"${ code"42.toString" } * 2")
}
test("Block Nesting") {
assert(code"42.toString * 2" =~= code"${ val n = code"42"; code"$n.toString" } * 2")
assert(code"42.toDouble.toString * 2" =~= code"${ val n = code"42.toDouble"; code"$n.toString" } * 2")
}
test("Double Nesting") {
assert(code"42.toDouble.toString * 2" =~= code"${ val str = code"${ val n = code"42"; code"$n.toDouble" }.toString"; str } * 2")
assert(code"42.toDouble.toString * 2" =~= code"${ val n = code"42"; val str = code"${ code"$n.toDouble" }.toString"; str } * 2")
}
}
| epfldata/squid | src/test/scala/squid/feature/NestedQuoting.scala | Scala | apache-2.0 | 1,474 |
package infra.piece.text
import infra.piece.core.{Piece, PieceKind}
import play.api.{Mode, Plugin}
import play.api.libs.json.{Json, Format}
import play.api.templates.Html
import scala.concurrent.{Future, ExecutionContext}
import akka.actor.{PoisonPill, Props, ActorRef, Actor}
import org.pegdown.{Extensions, PegDownProcessor}
import play.api.libs.concurrent.Akka
import akka.util.Timeout
import akka.pattern.ask
/**
* @author alari ([email protected])
* @since 08.05.14 14:46
*/
class TextKind(implicit app: play.api.Application) extends PieceKind("text") with Plugin {
private var processor: ActorRef = _
override def onStart() {
super.onStart()
try {
processor = Akka.system.actorOf(Props[MarkdownActor])
} catch {
case e: IllegalStateException if app.mode == Mode.Test =>
play.api.Logger.info("Creating an actor while shutting akka system down", e)
}
}
override def onStop() {
super.onStop()
if(processor != null) {
processor ! PoisonPill
processor = null
}
}
implicit val timeout = {
import scala.concurrent.duration._
Timeout(1 second)
}
override type P = TextPiece
override def handlePiece(implicit ec: ExecutionContext): PartialFunction[Piece, Future[P]] = {
case p: P if p.source.exists(_.trim.length > 0) && p.engine == "markdown" =>
(processor ? p.source.get).mapTo[String]
.map(t => p.copy(processed = Some(t)))
}
override def html(piece: P): Html = infra.piece.text.html.text(piece)
override val format: Format[P] = Json.format[P]
}
class MarkdownActor extends Actor {
val Processor = new PegDownProcessor(Extensions.ALL)
override def receive: Receive = {
case s: String => sender ! Processor.markdownToHtml(s)
}
} | alari/play-content | module-code/app/infra/piece/text/TextKind.scala | Scala | mit | 1,764 |
package com.github.akajuvonen.neuralnet_scala
import scala.math.exp
/** Sigmoid help tools used by NeuralNetwork class. */
object SigmoidTools {
/** Sigmoid function 1 / (1 + exp(-x)).
*
* @param x Input parameter x.
* @return Result of the sigmoid function.
*/
def sigmoid(x: Double): Double =
1.0 / (1 + exp(-x))
/** Sigmoid derivative function.
*
* @param x Sigmoid function result.
* @return Derivative of the sigmoid function.
*/
def sigmoidDerivative(x: Double): Double =
x * (1- x)
}
| akajuvonen/simple-neuralnet-scala | src/main/scala/SigmoidTools.scala | Scala | gpl-3.0 | 538 |
package mesosphere.util.state.mesos
import mesosphere.marathon.StoreCommandFailedException
import mesosphere.util.BackToTheFuture.Timeout
import mesosphere.util.ThreadPoolContext
import mesosphere.util.state.{ PersistentEntity, PersistentStore }
import org.apache.log4j.Logger
import org.apache.mesos.state.{ Variable, State }
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
class MesosStateStore(state: State, timeout: Duration) extends PersistentStore {
private[this] val log = Logger.getLogger(getClass)
implicit val timeoutDuration = Timeout(timeout)
implicit val ec = ThreadPoolContext.context
import mesosphere.util.BackToTheFuture.futureToFuture
override def load(key: ID): Future[Option[PersistentEntity]] = {
futureToFuture(state.fetch(key))
.map(throwOnNull)
.map { variable => if (entityExists(variable)) Some(MesosStateEntity(key, variable)) else None }
.recover(mapException(s"Can not load entity with key $key"))
}
override def create(key: ID, content: IndexedSeq[Byte]): Future[PersistentEntity] = {
futureToFuture(state.fetch(key))
.map(throwOnNull)
.flatMap { variable =>
if (entityExists(variable)) throw new StoreCommandFailedException(s"Entity with id $key already exists!")
else futureToFuture(state.store(variable.mutate(content.toArray))).map(MesosStateEntity(key, _))
}
.recover(mapException(s"Can not create entity with key $key"))
}
override def update(entity: PersistentEntity): Future[PersistentEntity] = entity match {
case MesosStateEntity(id, v) =>
futureToFuture(state.store(v))
.recover(mapException(s"Can not update entity with key ${entity.id}"))
.map(throwOnNull)
.map(MesosStateEntity(id, _))
case _ => throw new IllegalArgumentException("Can not handle this kind of entity")
}
override def delete(key: ID): Future[Boolean] = {
futureToFuture(state.fetch(key))
.map(throwOnNull)
.flatMap { variable =>
futureToFuture(state.expunge(variable))
.map{
case java.lang.Boolean.TRUE => true
case java.lang.Boolean.FALSE => false
}
}
.recover(mapException(s"Can not delete entity with key $key"))
}
override def allIds(): Future[Seq[ID]] = {
futureToFuture(state.names())
.map(_.asScala.toSeq)
.recover {
case NonFatal(ex) =>
// TODO: Currently this code path is taken when the zookeeper path does not exist yet. It would be nice
// to not log this as a warning.
//
// Unfortunately, this results in a NullPointerException in `throw e.getCause()` in BackToTheFuture because
// the native mesos code returns an ExecutionException without cause. Therefore, we cannot robustly
// differentiate between exceptions which are "normal" and exceptions which indicate real errors
// and we have to log them all.
log.warn(s"Exception while calling $getClass.allIds(). " +
s"This problem should occur only with an empty zookeeper state. " +
s"In that case, you can ignore this message", ex)
Seq.empty[ID]
}
}
private[this] def entityExists(variable: Variable): Boolean = variable.value().nonEmpty
private[this] def throwOnNull[T](t: T): T = {
Option(t) match {
case Some(value) => value
case None => throw new StoreCommandFailedException("Null returned from state store!")
}
}
private[this] def mapException[T](message: String): PartialFunction[Throwable, T] = {
case NonFatal(ex) => throw new StoreCommandFailedException(message, ex)
}
}
case class MesosStateEntity(id: String, variable: Variable) extends PersistentEntity {
override def bytes: IndexedSeq[Byte] = variable.value()
override def withNewContent(bytes: IndexedSeq[Byte]): PersistentEntity = {
copy(variable = variable.mutate(bytes.toArray))
}
}
| EasonYi/marathon | src/main/scala/mesosphere/util/state/mesos/MesosStateStore.scala | Scala | apache-2.0 | 4,050 |
package by.verkpavel.grafolnet.service
import by.verkpavel.grafolnet.model.{ImageRequest, ImageResponse}
import spray.json.{DefaultJsonProtocol, JsFalse, JsNumber, JsString, JsTrue, JsValue, JsonFormat}
trait ServiceJsonProtocol extends DefaultJsonProtocol {
implicit object AnyJsonFormat extends JsonFormat[Any] {
def write(x: Any) = x match {
case n: Int => JsNumber(n)
case s: String => JsString(s)
case b: Boolean if b == true => JsTrue
case b: Boolean if b == false => JsFalse
case _ => JsString("Parse error")
}
def read(value: JsValue) = value match {
case JsNumber(n) => n.intValue()
case JsString(s) => s
case JsTrue => true
case JsFalse => false
}
}
implicit val publicItemFmt = jsonFormat2(ImageResponse)
implicit val publicItemSummaryFmt = jsonFormat3(ImageRequest)
}
| VerkhovtsovPavel/BSUIR_Labs | Diploma/diploma-latex/src/fulllisting/ServiceJsonProtocol.scala | Scala | mit | 864 |
package org.monarchinitiative.dosdp
import zio.logging._
import zio.test.Assertion._
import zio.test._
object RegexTest extends DefaultRunnableSpec {
def spec = suite("Missing columns and cell values")(
testM("RegexSub should replace values correctly") {
val definition = RegexSub("regulated_activity", "regulated_activity_munged", "(.+) activity", raw"\1")
val eDefinition = ExpandedRegexSub(definition)
for {
a <- eDefinition.substitute("kinase activity")
b <- eDefinition.substitute("foo")
c <- eDefinition.substitute("activity kinase")
} yield assertTrue(a == "kinase") && assertTrue(b == "foo") && assertTrue(c == "activity kinase")
},
testM("RegexSub should replace multiple values correctly") {
val definition = RegexSub("regulated_activity", "regulated_activity_munged", "(.+) activity (.+)", raw"\2 and then \1")
val eDefinition = ExpandedRegexSub(definition)
assertM(eDefinition.substitute("kinase activity promoter"))(equalTo("promoter and then kinase"))
}
).provideCustomLayer(Logging.consoleErr())
}
| INCATools/dosdp-tools | src/test/scala/org/monarchinitiative/dosdp/RegexTest.scala | Scala | mit | 1,103 |
Subsets and Splits