code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
class IntFloatSupervisedTrainingSet(val _data: IntDenseMatrix, val _labels: FloatDenseVector)
| tesendic/Relite | src/generated/scala/IntFloatSupervisedTrainingSet.scala | Scala | agpl-3.0 | 1,108 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import kafka.log.LogConfig
import kafka.server.KafkaConfig
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.TopicPartition
import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows}
import org.junit.jupiter.api.Test
import java.util
import java.util.{Collections, Optional, Properties}
import scala.annotation.nowarn
import scala.jdk.CollectionConverters._
class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTest {
override protected def brokerPropertyOverrides(properties: Properties): Unit = {
// legacy message formats are only supported with IBP < 3.0
properties.put(KafkaConfig.InterBrokerProtocolVersionProp, "2.8")
}
@nowarn("cat=deprecation")
@Test
def testOffsetsForTimes(): Unit = {
val numParts = 2
val topic1 = "part-test-topic-1"
val topic2 = "part-test-topic-2"
val topic3 = "part-test-topic-3"
val props = new Properties()
props.setProperty(LogConfig.MessageFormatVersionProp, "0.9.0")
createTopic(topic1, numParts, 1)
// Topic2 is in old message format.
createTopic(topic2, numParts, 1, props)
createTopic(topic3, numParts, 1)
val consumer = createConsumer()
// Test negative target time
assertThrows(classOf[IllegalArgumentException],
() => consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(topic1, 0), -1)))
val producer = createProducer()
val timestampsToSearch = new util.HashMap[TopicPartition, java.lang.Long]()
var i = 0
for (topic <- List(topic1, topic2, topic3)) {
for (part <- 0 until numParts) {
val tp = new TopicPartition(topic, part)
// In sendRecords(), each message will have key, value and timestamp equal to the sequence number.
sendRecords(producer, numRecords = 100, tp, startingTimestamp = 0)
timestampsToSearch.put(tp, (i * 20).toLong)
i += 1
}
}
// The timestampToSearch map should contain:
// (topic1Partition0 -> 0,
// topic1Partitoin1 -> 20,
// topic2Partition0 -> 40,
// topic2Partition1 -> 60,
// topic3Partition0 -> 80,
// topic3Partition1 -> 100)
val timestampOffsets = consumer.offsetsForTimes(timestampsToSearch)
val timestampTopic1P0 = timestampOffsets.get(new TopicPartition(topic1, 0))
assertEquals(0, timestampTopic1P0.offset)
assertEquals(0, timestampTopic1P0.timestamp)
assertEquals(Optional.of(0), timestampTopic1P0.leaderEpoch)
val timestampTopic1P1 = timestampOffsets.get(new TopicPartition(topic1, 1))
assertEquals(20, timestampTopic1P1.offset)
assertEquals(20, timestampTopic1P1.timestamp)
assertEquals(Optional.of(0), timestampTopic1P1.leaderEpoch)
assertNull(timestampOffsets.get(new TopicPartition(topic2, 0)), "null should be returned when message format is 0.9.0")
assertNull(timestampOffsets.get(new TopicPartition(topic2, 1)), "null should be returned when message format is 0.9.0")
val timestampTopic3P0 = timestampOffsets.get(new TopicPartition(topic3, 0))
assertEquals(80, timestampTopic3P0.offset)
assertEquals(80, timestampTopic3P0.timestamp)
assertEquals(Optional.of(0), timestampTopic3P0.leaderEpoch)
assertNull(timestampOffsets.get(new TopicPartition(topic3, 1)))
}
@nowarn("cat=deprecation")
@Test
def testEarliestOrLatestOffsets(): Unit = {
val topic0 = "topicWithNewMessageFormat"
val topic1 = "topicWithOldMessageFormat"
val prop = new Properties()
// idempotence producer doesn't support old version of messages
prop.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false")
val producer = createProducer(configOverrides = prop)
createTopicAndSendRecords(producer, topicName = topic0, numPartitions = 2, recordsPerPartition = 100)
val props = new Properties()
props.setProperty(LogConfig.MessageFormatVersionProp, "0.9.0")
createTopic(topic1, numPartitions = 1, replicationFactor = 1, props)
sendRecords(producer, numRecords = 100, new TopicPartition(topic1, 0))
val t0p0 = new TopicPartition(topic0, 0)
val t0p1 = new TopicPartition(topic0, 1)
val t1p0 = new TopicPartition(topic1, 0)
val partitions = Set(t0p0, t0p1, t1p0).asJava
val consumer = createConsumer()
val earliests = consumer.beginningOffsets(partitions)
assertEquals(0L, earliests.get(t0p0))
assertEquals(0L, earliests.get(t0p1))
assertEquals(0L, earliests.get(t1p0))
val latests = consumer.endOffsets(partitions)
assertEquals(100L, latests.get(t0p0))
assertEquals(100L, latests.get(t0p1))
assertEquals(100L, latests.get(t1p0))
}
}
| TiVo/kafka | core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala | Scala | apache-2.0 | 5,470 |
object test {
// this
class adfa(a: Int) {
def this() {
}
}
new adfa()
val x = new adfa(2)
val y = new /*caret*/adfa()
}
/*
object test {
// this
class NameAfterRename(a: Int) {
def this() {
}
}
new NameAfterRename()
val x = new NameAfterRename(2)
val y = new /*caret*/NameAfterRename()
}
*/ | ilinum/intellij-scala | testdata/rename/class/Constructor2.scala | Scala | apache-2.0 | 337 |
package scrabble;
case class Game private (
players: Map[Int, Player],
dictionary: Dictionary,
board: Board,
playersMove: Int, // Index into the list of players
bag: LetterBag,
consecutivePasses: Int,
moves: Int) {
def getPlayer(playerNo: Int) : Option[Player] = players get playerNo
val currentPlayer = players get playersMove
val nextPlayerNo: Int = (playersMove + 1) % (players.size)
}
object Game {
/**
* Initialises a new game, with a list of player names, with a language specific dictionary and letter bag.
* There must be between 2 and 4 players. Returns None if this condition isn't met.
*/
def make(playerNames: List[String], dictionary: Dictionary, letterbag: LetterBag): Option[Game] = {
if (!(2 to 4 contains playerNames.size)) None else {
// Distribute letters from the bag to the players
val (players: List[(Int, Player)], remainingBag: LetterBag, player_no: Int) =
playerNames.foldLeft((List.empty[(Int, Player)], letterbag, 0)) {
case ((playerList, thebag, player_no), name) =>
val (letters: List[Tile], bag) = thebag.remove(7)
val player = Player(letters, name, 0)
((player_no -> player) :: playerList, bag, player_no + 1)
}
Some(Game(players.toMap, dictionary, Board.init, 0, remainingBag, 0, 0))
}
}
}
| ornicar/scalascrabble | src/main/scala/Game.scala | Scala | gpl-2.0 | 1,360 |
/**
* Copyright (C) 2010-2012 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.config
/**
* Interface to the administration of Service Limits.
*
* Limits may be applied to any operation supported by the real-time event,
* adapter scanning or inventory submission services. The meaning of any
* limit is tied to the limiter that uses it, which is outside the
* responsibility of the ServiceLimitsStore.
* The responsibilities of a ServiceLimitsStore are to: provide mechanisms to
* define new limits, set limits at each scope (see below), cascade hard limit
* changes down through the chain, and report the effective limit value -
* typically with respect to a pair associated with the report from the
* representative of the client application (e.g. scan adapter).
*
* There are three scopes for limits: System, Domain and Pair.
*
* <h3>Configuration</h3>
* A System Hard Limit constrains all more specific limits of the same name both
* initially (at the time the other limits are set) and retrospectively
* (whenever the System Hard Limit is changed). The limits it constrains are:
* SystemDefaultLimit, DomainHardLimit, DomainDefaultLimit and PairLimit.
*
* Similarly, a Domain Hard Limit constrains the value of the following limits:
* DomainDefaultLimit and PairLimit.
*
* <h3>Effective Limit</h3>
* In determining an effective limit for a pair, the following strategy should
* apply:
<ol>
<li>If there is a corresponding PairLimit defined, then the value of that
limit is the effective limit;</li>
<li>Otherwise, if there is a DomainDefaultLimit corresponding to the domain
of the pair, then the value of that limit is the effective limit;</li>
<li>Otherwise, the value of the relevant SystemDefaultLimit is the effective
limit.</li>
</ol>
*/
import net.lshift.diffa.schema.servicelimits._
trait ServiceLimitsStore extends ServiceLimitsView {
def defineLimit(limit: ServiceLimit): Unit
def deleteDomainLimits(space:Long): Unit
def deletePairLimitsByDomain(space:Long): Unit
def setSystemHardLimit(limit: ServiceLimit, limitValue: Int): Unit
def setSystemDefaultLimit(limit: ServiceLimit, limitValue: Int): Unit
def setDomainHardLimit(space:Long, limit: ServiceLimit, limitValue: Int): Unit
def setDomainDefaultLimit(space:Long, limit: ServiceLimit, limitValue: Int): Unit
def setPairLimit(space:Long, pairKey: String, limit: ServiceLimit, limitValue: Int): Unit
def getSystemHardLimitForName(limit: ServiceLimit): Option[Int]
def getSystemDefaultLimitForName(limit: ServiceLimit): Option[Int]
def getDomainHardLimitForDomainAndName(space:Long, limit: ServiceLimit): Option[Int]
def getDomainDefaultLimitForDomainAndName(space:Long, limit: ServiceLimit): Option[Int]
def getPairLimitForPairAndName(space:Long, pairKey: String, limit: ServiceLimit): Option[Int]
def getEffectiveLimitByName(limit: ServiceLimit) : Int = getSystemDefaultLimitForName(limit).getOrElse(Unlimited.value)
def getEffectiveLimitByNameForDomain(space:Long, limit: ServiceLimit) : Int =
getDomainDefaultLimitForDomainAndName(space, limit).getOrElse(
getEffectiveLimitByName(limit))
def getEffectiveLimitByNameForPair(space:Long, pairKey: String, limit: ServiceLimit) : Int =
getPairLimitForPairAndName(space, pairKey, limit).getOrElse(
getEffectiveLimitByNameForDomain(space, limit))
}
trait ServiceLimitsView extends SystemServiceLimitsView with DomainServiceLimitsView with PairServiceLimitsView
trait SystemServiceLimitsView {
def getEffectiveLimitByName(limit: ServiceLimit) : Int
}
trait DomainServiceLimitsView {
def getEffectiveLimitByNameForDomain(space:Long, limit: ServiceLimit) : Int
}
trait PairServiceLimitsView {
def getEffectiveLimitByNameForPair(space:Long, pairKey: String, limit:ServiceLimit) : Int
}
| 0x6e6562/diffa | kernel/src/main/scala/net/lshift/diffa/kernel/config/ServiceLimitsStore.scala | Scala | apache-2.0 | 4,378 |
/** Implemented the RNG and LCG classes
* for the package fpinscala.state.rand.
*
* Note: Rand is a type alias for
* fpinscala.state.State[RNG, A].
* We define this package-wide in
* the rand package object.
*
*/
package grokScala.grok.rand
/** Base class for the RNG class for
* pseudo-random number generators.
*/
abstract class RNG {
def nextInt: (Int, RNG)
}
/** Companion object for RNG class */
object RNG {
/** State action to generate a random Int.
*
* State((state: RNG) => (value: Int, nextState: RNG))
*
* Aside: int(rng: RNG) is a random variable, in the sense
* of probability theory. It is a function which maps
* values from some probability space (the possible
* values of some subclass of RNG) to the space of
* 32-bit signed integer values (Int).
*
* For the LCG subclass, this mapping is uniform, i.e.
* it is equally likey to get any possible Int value.
*
* Note: Have to use the "new" keyword otherwise the
* compiler thinks I am trying to use the apply
* method on the State companion object instead of
* the State[RNG, Int] case class constructor.
*
*/
def int: Rand[Int] = new Rand[Int](_.nextInt)
/** Random action to generate a list of Int */
def ints(count: Int): Rand[List[Int]] =
Rand.sequence(List.fill(count)(int))
/** Generate a random integer between
* 0 and Int.maxValue (inclusive).
*/
def nonNegativeInt: Rand[Int] = new Rand(rng =>
rng.nextInt match {
case (ran, rng2) if ran >= 0 => ( ran, rng2)
case (ran, rng2) if ran == Int.MinValue => ( 0, rng2)
case (ran, rng2) => (-ran, rng2)
})
/** Generate an even random integer between
* 0 and Int.maxValue (inclusive).
*/
def nonNegativeEvenInt: Rand[Int] =
nonNegativeInt map { ii => ii - ii%2 }
/** Random action non-negative Int less than n
*
* 1. Keeps things uniformly distributed over
* the range of the random variable even
* in the case n does not evenly divide
* the integer value Int.MaxValue + 1.
* 2. Algorithm assumes that n > 0.
* 3. Stack overflow can happen if n < 0.
* 4. For n = 0, you will get a divide by 0
* runtime java.lang.ArithmeticException.
* 5. For efficiency, client responsible to
* ensure that n > 0.
*
* Caller be warned, don't call with n <= 0.
*
*/
def nonNegativeIntLessThan(n: Int): Rand[Int] =
nonNegativeInt flatMap { ii =>
val mod = ii % n
if (ii + ((n-1) - mod) >= 0)
Rand.unit(mod)
else
nonNegativeIntLessThan(n)
}
/** Random Int within the range start <= random_variable < end
*
* Pathological cases:
* If start = end, always generate start.
* If statt > end, generate start >= randome_variable > end
*
* Works great unless (end - start) > Int.Maxvalue, then things
* no longer transparently simple. If only java had unsigned types!
*
*/
def exclusiveIntRange(start: Int, end: Int): Rand[Int] = {
val len = if (start != end) end - start else 1
val sign = len/len.abs
nonNegativeIntLessThan(len.abs) map {
(ii: Int) => start + sign*ii
}
}
/** Generate a random Double between
* 0 (inclusive) and 1 (exclusive).
*/
def double: Rand[Double] = {
val d = Int.MaxValue.toDouble + 1.0
nonNegativeInt map { _.toDouble/d }
}
/** Generate a random boolean */
def boolean: Rand[Boolean] =
int map {ii => ii % 2 == 0}
}
/** Extend RNG by Implementing the same Linear Congruence
* Generator based pseudo-random number generating
* algorithm used by java.util.Random and glibc.
*
* newSeed = (a*seed + c) % m
*
* where
* a = 25214903917 = 5DEECE66D
* c = 11 = B
* m = 2^48 = 281474976710656 = FFFFFFFFFFFF + 1
*
* Pseudo-random int value = bits 47...16 of newSeed.
*
* The higher order bits are less correlated than the
* lower order bits. We shift by 16 bits to get a
* 32-bit value.
*
* A bit-& optimization is being used for the mod
* operator. Basically we are (_ % 2^48) by ignoring
* all the digits of a long value past bit-47.
*
* According to Knuth, you will get the maximum period
* of m, if and only if, the following conditions hold:
*
* 1. m and c are relatively prime,
* 2. a-1 is divisible by all prime factors of m,
* 3. a-1 is divisible by 4 if m is divisible by 4.
*/
case class LCG(seed: Long) extends RNG {
private val a = 0x5DEECE66DL
private val c = 0xBL
private val modMask = 0xFFFFFFFFFFFFL
def nextInt: (Int,RNG) = {
val newSeed = (a*seed + c) & modMask
val nextRNG = LCG(newSeed)
val n = (newSeed >>> 16).toInt
(n, nextRNG)
}
}
| grscheller/scheller-linux-archive | grok/Scala2/learnScala/multiPackage/src/main/scala/multiPackage/rand/RNG.scala | Scala | bsd-3-clause | 4,873 |
package org.fusesource.scalate.filter.less
import org.fusesource.scalate.test.TemplateTestSupport
class LessFilterTest extends TemplateTestSupport {
test("inline") {
assertUriOutputContains("/org/fusesource/scalate/filter/less/inline.scaml", """
<style type="text/css">
p {
text-color: #0000ff;
}
</style>
""".trim)
}
test("inline include") {
assertUriOutputContains("/org/fusesource/scalate/filter/less/inline_include.scaml", """
<style type="text/css">
div.section {
border: solid 1px blue;
border-radius: 5px;
-moz-border-radius: 5px;
-webkit-border-radius: 5px;
}
</style>
""".trim)
}
} | janurag/scalate | scalate-less/src/test/scala/org/fusesource/scalate/filter/less/LessFilterTest.scala | Scala | apache-2.0 | 621 |
package mesosphere.marathon.core.appinfo
import mesosphere.marathon.core.readiness.ReadinessCheckResult
import mesosphere.marathon.state.{ AppDefinition, Identifiable, TaskFailure }
import scala.collection.immutable.Seq
/**
* An app definition with optional additional data.
*
* You can specify which data you want via the AppInfo.Embed types.
*/
case class AppInfo(
app: AppDefinition,
maybeTasks: Option[Seq[EnrichedTask]] = None,
maybeCounts: Option[TaskCounts] = None,
maybeDeployments: Option[Seq[Identifiable]] = None,
maybeReadinessCheckResults: Option[Seq[ReadinessCheckResult]] = None,
maybeLastTaskFailure: Option[TaskFailure] = None,
maybeTaskStats: Option[TaskStatsByVersion] = None)
object AppInfo {
sealed trait Embed
object Embed {
case object Tasks extends Embed
case object Deployments extends Embed
case object Readiness extends Embed
case object Counts extends Embed
case object LastTaskFailure extends Embed
case object TaskStats extends Embed
}
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/appinfo/AppInfo.scala | Scala | apache-2.0 | 1,024 |
import spray.json._
trait JsonProtocols extends DefaultJsonProtocol {
protected implicit val emailFormat = new JsonFormat[EmailAddress] {
override def write(obj: EmailAddress): JsValue = JsString(obj.address)
override def read(json: JsValue): EmailAddress = json match {
case JsString(value) => EmailAddress(value)
case _ => deserializationError("Email address expected")
}
}
protected implicit val passwordRegisterRequestFormat = jsonFormat2(PasswordRegisterRequest)
protected implicit val passwordLoginRequestFormat = jsonFormat2(PasswordLoginRequest)
protected implicit val resetRequestFormat = jsonFormat2(PasswordResetRequest)
protected implicit val identityFormat = jsonFormat1(Identity)
protected implicit val tokenFormat = jsonFormat4(Token)
protected implicit val loginRequestFormat = jsonFormat2(InternalLoginRequest)
protected implicit val reloginRequestFormat = jsonFormat2(InternalReloginRequest)
}
| ldrygala/reactive-microservices | auth-password/src/main/scala/JsonProtocols.scala | Scala | mit | 957 |
package tk.monnef.mcmapper.test
import org.scalatest.{FlatSpec, Matchers}
import tk.monnef.mcmapper._
import tk.monnef.mcmapper.ClassMapping
import tk.monnef.mcmapper.FieldMapping
import tk.monnef.mcmapper.RawDataMerger.SubMerge
import scala.collection.immutable.{HashSet, HashMap}
class RawDataMergerTests extends FlatSpec with Matchers {
import TestHelper._
import tk.monnef.mcmapper.MappingSide._
def o(a: MappingObject, b: MappingObject) = a.obf.whole < b.obf.whole
def removeCommentAndFullName(in: FieldMapping): FieldMapping = in.copy(comment = "", full = PathItem.empty)
def removeCommentAndFullName(in: MethodMapping): MethodMapping = in.copy(comment = "", full = PathItem.empty)
"RawDataMerger" should "construct classes" in {
val r = RawDataMerger.merge(
List(
List("CL:", "a", "x/y"),
List("CL:", "b", "q/w", "#C")
)
, List(), List() /*, skipFinalCheck = true*/)
val c = r.classes
c.size shouldBe 2
c.toList.sortWith(o) shouldEqual List(ClassMapping("a", "x/y", BOTH), ClassMapping("b", "q/w", CLIENT)).sortWith(o)
}
it should "construct partially fields" in {
val r = RawDataMerger.merge(
List(
List("FD:", "w/t", "n/m/f0", "#C"),
List("FD:", "q/t", "n/m/f1")
), List(), List() /*, skipFinalCheck = true*/
)
val c = r.fields
c.size shouldBe 2
c.toList.sortWith(o).map(removeCommentAndFullName) shouldEqual List(FieldMapping("w/t", "n/m/f0", "", "", CLIENT), FieldMapping("q/t", "n/m/f1", "", "", BOTH)).sortWith(o)
}
it should "construct partially methods" in {
val r = RawDataMerger.merge(
List(
List("MD:", "w/t", "(ZLlx;)Z", "n/m/func_0", "(ZLw/t/c0;)Z", "#C"),
List("MD:", "q/t", "(ZLq;)V", "n/m/func_1", "(ZLw/t/c1;)V")
), List(), List() /*, skipFinalCheck = true*/
)
val c = r.methods
c.size shouldBe 2
val expected = List(MethodMapping("w/t", "n/m/func_0", "", "(ZLlx;)Z", "(ZLw/t/c0;)Z", "", CLIENT), MethodMapping("q/t", "n/m/func_1", "", "(ZLq;)V", "(ZLw/t/c1;)V", "", BOTH))
c.toList.sortWith(o).map(removeCommentAndFullName) shouldEqual expected.sortWith(o)
}
it should "construct fully fields" in {
val r = RawDataMerger.merge(
List(
List("FD:", "w/t", "n/m/f0", "#C"),
List("FD:", "q/t", "n/m/f1")
), List(
//srg, name, side, comment
List("f1", "full_1", "0", ""),
List("f0", "full_0", "1", "xxx")
)
, List()
)
val c = r.fields
c.size shouldBe 2
c.toList.sortWith(o) shouldEqual List(FieldMapping("w/t", "n/m/f0", "full_0", "xxx", CLIENT), FieldMapping("q/t", "n/m/f1", "full_1", "", BOTH)).map(_.constructWholeFull).sortWith(o)
}
it should "construct fully methods" in {
val r = RawDataMerger.merge(
List(
List("MD:", "w/t", "(ZLlx;)Z", "n/m/func_0", "(ZLw/t/c0;)Z", "#C"),
List("MD:", "q/t", "(ZLq;)V", "n/m/func_1", "(ZLw/t/c1;)V")
), List(), List(
List("func_0", "full_0", "0", "c"),
List("func_1", "full_1", "2", "")
)
)
val c = r.methods
c.size shouldBe 2
val expected = List(MethodMapping("w/t", "n/m/func_0", "full_0", "(ZLlx;)Z", "(ZLw/t/c0;)Z", "c", CLIENT), MethodMapping("q/t", "n/m/func_1", "full_1", "(ZLq;)V", "(ZLw/t/c1;)V", "", BOTH)).map(_.constructWholeFull)
c.toList.sortWith(o) shouldEqual expected.sortWith(o)
}
"SubMerge" should "support adding mappings to a cache map" in {
val item = FieldMapping("xxx/a", "f0", "", "", BOTH)
var r = SubMerge.addMapMapping(HashMap(), item)
r.size shouldBe 1
r shouldEqual HashMap("f0" -> HashSet(item))
val item2 = FieldMapping("yyy/b", "f1", "", "", CLIENT)
r = SubMerge.addMapMapping(r, item2)
r.size shouldBe 2
r shouldEqual HashMap("f0" -> HashSet(item), "f1" -> HashSet(item2))
val item3 = FieldMapping("zzz/qwe", "x/y/z/f1", "", "", SERVER) // item2.shortSrg == item3.shortSrg
r = SubMerge.addMapMapping(r, item3)
r.size shouldBe 2
r shouldEqual HashMap("f0" -> HashSet(item), "f1" -> HashSet(item2, item3))
}
it should "support removing mappings from a cache map" in {
val item = FieldMapping("xxx/a", "f0", "", "", BOTH)
val item2 = FieldMapping("yyy/b", "f1", "", "", CLIENT)
val item3 = FieldMapping("zzz/qwe", "x/y/z/f1", "", "", SERVER) // item2.shortSrg == item3.shortSrg
val m = HashMap("f0" -> HashSet(item), "f1" -> HashSet(item2, item3))
var r = SubMerge.removeMapMapping(m, item2)
r.size shouldBe 2
r shouldEqual HashMap("f0" -> HashSet(item), "f1" -> HashSet(item3))
r = SubMerge.removeMapMapping(r, item3)
r.size shouldBe 1
r shouldEqual HashMap("f0" -> HashSet(item))
}
it should "find mappings from short srg" in {
val item = FieldMapping("xxx/a", "f0", "", "", BOTH)
val item2 = FieldMapping("yyy/b", "f1", "", "", CLIENT)
val item3 = FieldMapping("zzz/qwe", "x/y/z/f1", "", "", SERVER) // item2.shortSrg == item3.shortSrg
val m = HashMap("f0" -> HashSet(item), "f1" -> HashSet(item2, item3))
SubMerge.findByShortSrg("f0", m) shouldEqual HashSet(item)
SubMerge.findByShortSrg("f1", m) shouldEqual HashSet(item2, item3)
SubMerge.findByShortSrg("f999", m) shouldEqual HashSet.empty
}
it should "properly form caching mappings" in {
val item = FieldMapping("xxx/a", "f0", "", "", BOTH)
val item2 = FieldMapping("yyy/b", "f1", "", "", CLIENT)
val item3 = FieldMapping("zzz/qwe", "x/y/z/f1", "", "", SERVER) // item2.shortSrg == item3.shortSrg
var a = SubMerge()
a.fieldMapping.size shouldBe 0
a.fieldShortSrgToObj.size shouldBe 0
a += item
a += item2
a += item3
a.fieldMapping.size shouldBe 3
a.fieldShortSrgToObj.size shouldBe 2
a.findFieldByShortSrg("f0") shouldEqual HashSet(item)
a.findFieldByShortSrg("f1") shouldEqual HashSet(item2, item3)
a -= item2
a.fieldMapping.size shouldBe 2
a.fieldShortSrgToObj.size shouldBe 2
a.findFieldByShortSrg("f0") shouldEqual HashSet(item)
a.findFieldByShortSrg("f1") shouldEqual HashSet(item3)
a -= item
a.fieldMapping.size shouldBe 1
a.fieldShortSrgToObj.size shouldBe 1
a.findFieldByShortSrg("f1") shouldEqual HashSet(item3)
a -= item3
a.fieldMapping.size shouldBe 0
a.fieldShortSrgToObj.size shouldBe 0
}
}
| mnn/mcMapperLib | src/test/scala/tk/monnef/mcmapper/test/RawDataMergerTests.scala | Scala | apache-2.0 | 6,348 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.domain
import cmwell.syntaxutils._
import java.nio.ByteBuffer
import java.security.MessageDigest
import java.util
import com.typesafe.scalalogging.LazyLogging
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat, ISODateTimeFormat}
import scala.language.implicitConversions
import scala.collection.immutable.SortedSet
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
/**
* User: israel
* Date: 10/1/14
* Time: 17:25
*/
trait Formattable
trait Jsonable
object Infoton {
def getParent(path: String): String = {
// check if we are in /
if (path.endsWith("/") & path.length == 1)
"$root"
else {
val p = path.endsWith("/") match {
case true => path.take(path.take(path.length - 1).lastIndexOf("/"))
case false => path.take(path.lastIndexOf("/"))
}
if (p.isEmpty)
"/"
else {
var index = p.length - 1
while (p(index) == '/') {
index = index - 1
}
if (index == p.length - 1)
p
else {
p.take(index + 1)
}
}
}
}
val https = Some("https")
}
case class SystemFields (path: String, lastModified: DateTime, lastModifiedBy: String,
dc: String, indexTime: Option[Long], indexName: String, protocol: String){
val name = path.drop(path.lastIndexOf("/") + 1)
}
sealed trait Infoton extends Formattable { self =>
def kind = self.getClass.getSimpleName
def systemFields: SystemFields
def fields: Option[Map[String, Set[FieldValue]]] = None
def extraBytesForDigest: Seq[Array[Byte]] = Seq.empty
def extraLengthForWeight: Long = 0
def copyInfoton(systemFields: SystemFields = this.systemFields,
fields: Option[Map[String, Set[FieldValue]]] = this.fields): Infoton = this match {
case oi: ObjectInfoton =>
oi.copy(systemFields = systemFields, fields = fields)
case fi: FileInfoton =>
fi.copy(systemFields = systemFields, fields = fields)
case li: LinkInfoton =>
li.copy(systemFields = systemFields, fields = fields)
case di: DeletedInfoton =>
di.copy(systemFields = systemFields)
case ci: CompoundInfoton =>
ci.copy(systemFields = systemFields, fields = fields)
case gi: GhostInfoton => gi.copy(systemFields = systemFields)
}
def overrideUuid(forcedUuid: String) = this match {
case oi: ObjectInfoton =>
new ObjectInfoton(oi.systemFields, oi.fields) {
override def uuid = forcedUuid
}
case fi: FileInfoton =>
new FileInfoton(fi.systemFields, fi.fields, fi.content) {
override def uuid = forcedUuid
}
case li: LinkInfoton =>
new LinkInfoton(li.systemFields, li.fields, li.linkTo, li.linkType) {
override def uuid = forcedUuid
}
case di: DeletedInfoton =>
new DeletedInfoton(di.systemFields) {
override def uuid = forcedUuid }
case ci: CompoundInfoton =>
new CompoundInfoton(ci.systemFields,
ci.fields,
ci.children,
ci.offset,
ci.length,
ci.total) { override def uuid = forcedUuid }
case gi: GhostInfoton => new GhostInfoton(gi.systemFields) {
override def uuid = forcedUuid }
}
def uuid = uuid_
final def weight = weight_
final def parent = parent_
def replaceIndexTime(newIndextime: Option[Long]): Infoton = copyInfoton(systemFields.copy(indexTime = newIndextime))
/* calculate uuid and weight */
def longToByteArray(l: Long): Array[Byte] = {
val bb = ByteBuffer.allocate(8)
bb.putLong(l)
bb.flip()
bb.array()
}
/* internal variables and their counterparty methods for calculated fields*/
private var parent_ = ""
private val (uuid_, weight_) = {
var weight_ = 0L
val digest = MessageDigest.getInstance("MD5")
val pathBytes_ = systemFields.path.getBytes("UTF-8")
digest.update(pathBytes_)
weight_ += pathBytes_.length
val lastModifiedBytes_ = longToByteArray(systemFields.lastModified.getMillis)
digest.update(lastModifiedBytes_)
weight_ += lastModifiedBytes_.length
fields.foreach { f =>
f.map { case (k, v) => (k, SortedSet(v.map(_.payload).toSeq: _*)) }.toSeq.sortBy(_._1).foreach {
case (k, v) =>
val keyBytes_ = k.getBytes("UTF-8")
digest.update(keyBytes_)
weight_ += keyBytes_.length
v.foreach { q =>
val valueBytes_ = q.getBytes("UTF-8")
digest.update(valueBytes_)
weight_ += valueBytes_.length
}
}
}
extraBytesForDigest.foreach { bytes =>
digest.update(bytes)
weight_ += bytes.length
}
val uuid_ = digest.digest().map("%02x".format(_)).mkString
weight_ += extraLengthForWeight
(uuid_, weight_)
}
/* calculate parent*/
parent_ = Infoton.getParent(systemFields.path)
// check if we are in /
/*
if ( path.endsWith("/") & path.length == 1 )
parent_ = "$root"
else {
val p = path.endsWith("/") match {
case true => path.take(path.take(path.length-1).lastIndexOf("/"))
case false => path.take(path.lastIndexOf("/"))
}
if ( p.isEmpty )
parent_ = "/"
else {
var index = p.length - 1
while ( p(index) == '/') {
index = index - 1
}
if ( index == p.length - 1)
parent_ = p
else {ObjectInfoton(path:String, lastModified:DateTime = new DateTime, override val fields:Option[Map[String, Set[FieldValue]]] = None) extends Infoton
parent_ = p.take(index + 1)
}
}
}
*/
def isSameAs(that: Infoton) = {
this.uuid == that.uuid || (
this.kind == that.kind &&
this.systemFields.protocol == that.systemFields.protocol &&
this.fields == that.fields &&
this.extraBytesForDigest == that.extraBytesForDigest
)
}
// def ⊆(that: Infoton) = (this.fields, that.fields) match {
// case (Some(f1),Some(f2)) =>
// val (f1s,f2s) = (f1.toSeq,f2.toSeq)
// f1s.intersect(f2s) == f1s
// case (None,_) => true
// case _ => false
// }
def masked(fieldsMask: Set[String], allowEmpty: Boolean = false): Infoton =
if (fieldsMask.isEmpty && !allowEmpty) this else getMasked(fieldsMask)
protected def getMasked(fieldsMask: Set[String]): Infoton
protected def maskedFields(fieldsMask: Set[String]) = fields.map(flds => flds -- flds.keySet.diff(fieldsMask))
}
case class ObjectInfoton(systemFields: SystemFields, override val fields: Option[Map[String, Set[FieldValue]]] = None)
extends Infoton {
override def getMasked(fieldsMask: Set[String]): Infoton = {
val originalUuid = uuid
new ObjectInfoton(systemFields, maskedFields(fieldsMask)) {
override val uuid = originalUuid
override def kind = "ObjectInfoton"
}
}
}
object ObjectInfoton {
def apply(systemFields: SystemFields, fields: Map[String, Set[FieldValue]]) =
new ObjectInfoton(systemFields, Some(fields))
def apply(path: String, lastModified: DateTime, lastModifiedBy: String, dc: String, indexTime: Option[Long], indexName: String, protocol: String,
fields: Map[String, Set[FieldValue]]) =
new ObjectInfoton(SystemFields(path, lastModified, lastModifiedBy, dc, indexTime, indexName, protocol), Some(fields))
}
case class CompoundInfoton(systemFields: SystemFields,
override val fields: Option[Map[String, Set[FieldValue]]] = None,
children: Seq[Infoton],
offset: Long,
length: Long,
total: Long)
extends Infoton {
override def getMasked(fieldsMask: Set[String]): Infoton = {
val originalUuid = uuid
new CompoundInfoton(systemFields, maskedFields(fieldsMask), children, offset, length, total) {
override val uuid = originalUuid
override def kind = "CompoundInfoton"
}
}
}
object CompoundInfoton {
def apply(systemFields: SystemFields, fields: Map[String, Set[FieldValue]], children: Seq[Infoton], offset: Long, length: Long, total: Long) =
new CompoundInfoton(systemFields, Some(fields), children, offset, length, total)
def apply(path: String, lastModified: DateTime, lastModifiedBy: String,
dc: String, indexTime: Option[Long], indexName: String, protocol: String,
fields: Map[String, Set[FieldValue]], children: Seq[Infoton], offset: Long, length: Long, total: Long) =
new CompoundInfoton(SystemFields(path, lastModified, lastModifiedBy, dc, indexTime, indexName, protocol), Some(fields),
children, offset, length, total)
}
object LinkType {
val Permanent: Int = 0
val Temporary: Int = 1
val Forward: Int = 2
}
case class LinkInfoton(systemFields: SystemFields,
override val fields: Option[Map[String, Set[FieldValue]]] = None,
linkTo: String,
linkType: Int)
extends Infoton {
override def extraBytesForDigest: Seq[Array[Byte]] = {
Seq(linkTo.getBytes("UTF-8"), linkType.toString.getBytes("UTF-8"))
}
override def extraLengthForWeight: Long = linkTo.getBytes.length + 1
override def getMasked(fieldsMask: Set[String]): Infoton = {
val originalUuid = uuid
new LinkInfoton(systemFields, maskedFields(fieldsMask), linkTo, linkType) {
override val uuid = originalUuid
override def kind = "LinkInfoton"
}
}
}
object LinkInfoton {
def apply(systemFields: SystemFields, fields: Map[String, Set[FieldValue]], linkTo: String, linkType: Int) =
new LinkInfoton(systemFields, Some(fields), linkTo, linkType)
def apply(path: String, lastModified: DateTime, lastModifiedBy: String, dc: String, indexTime: Option[Long], indexName: String, protocol: String,
fields: Map[String, Set[FieldValue]],linkTo: String, linkType: Int) =
new LinkInfoton(SystemFields(path, lastModified, lastModifiedBy, dc, indexTime, indexName, protocol), Some(fields), linkTo, linkType)
}
case class DeletedInfoton(systemFields: SystemFields)
extends Infoton {
override def getMasked(fieldsMask: Set[String]): Infoton = this
}
object DeletedInfoton {
def apply(systemFields: SystemFields, fields: Map[String, Set[FieldValue]]) =
new DeletedInfoton(systemFields)
def apply(path: String, lastModified: DateTime, lastModifiedBy: String,
dc: String, indexTime: Option[Long], indexName: String, protocol: String) =
new DeletedInfoton(SystemFields(path, lastModified, lastModifiedBy, dc, indexTime, indexName, protocol))
}
case class GhostInfoton(systemFields: SystemFields) extends Infoton {
override protected def getMasked(fieldsMask: Set[String]): Infoton = this
}
object GhostInfoton {
val zeroTime = new DateTime(0L)
def apply(systemFields: SystemFields) = new GhostInfoton(systemFields)
def apply(path: String, dc: String = "N/A", indexTime: Option[Long] = None, lastModified: DateTime = GhostInfoton.zeroTime,
lastModifiedBy: String = "", protocol: String, indexName: String = "") =
new GhostInfoton(SystemFields(path, lastModified, lastModifiedBy, dc, indexTime, indexName, protocol))
def ghost(path: String, protocol: String): Infoton = GhostInfoton(SystemFields(path, zeroTime, "anonymous", "N/A", None, "", protocol))
}
case class FileInfoton(systemFields: SystemFields,
override val fields: Option[Map[String, Set[FieldValue]]] = None,
content: Option[FileContent] = None)
extends Infoton {
def hasData = content.exists(_.data.isDefined)
def hasDataPointer = content.exists(_.dataPointer.isDefined)
override def extraBytesForDigest: Seq[Array[Byte]] = {
val dataRepr = if (content.exists(_.dataPointer.isDefined)) {
Seq(content.get.dataPointer.get.getBytes("UTF-8"))
} else {
val d = content.flatMap(_.data).getOrElse(Array.emptyByteArray)
Seq(longToByteArray(d.length), d)
}
content.fold(Seq.empty[Array[Byte]]) { c =>
val mime = c.mimeType.getBytes("UTF-8")
mime +: dataRepr
}
}
override def extraLengthForWeight = {
content
.map { c =>
c.data.map { _.length.toLong }.getOrElse(0L)
}
.getOrElse(0L)
}
override def getMasked(fieldsMask: Set[String]): Infoton = {
val originalUuid = uuid
new FileInfoton(systemFields, maskedFields(fieldsMask), content) {
override val uuid = originalUuid
override def kind = "FileInfoton"
}
}
def withoutData: FileInfoton = {
require(content.isDefined && (content.get.data.isDefined || content.get.dataPointer.isDefined),
"content must be defined with either data or dataPointer")
val originalUuid = uuid
val hash = content.flatMap(_.dataPointer).orElse(content.flatMap(_.data).map(cmwell.util.string.Hash.sha1))
new FileInfoton(systemFields,
fields,
content.map(c => FileContent(None, c.mimeType, content.get.dataLength, hash))) {
override val uuid = originalUuid
override def kind = "FileInfoton"
}
}
def populateDataFromPointerBy(
fetchFunc: (String) => Future[Array[Byte]]
)(implicit ec: ExecutionContext): Future[FileInfoton] = {
require(content.isDefined && content.get.dataPointer.isDefined, "dataPointer must exist")
val originalUuid = uuid
val hashOpt = content.flatMap(_.dataPointer)
val dataFut = content.flatMap(_.data).map(Future.successful).getOrElse(fetchFunc(hashOpt.get))
dataFut.map(
data =>
new FileInfoton(systemFields,
fields,
content.map(c => FileContent(Some(data), c.mimeType, data.length, hashOpt))) {
override val uuid = originalUuid
override def kind = "FileInfoton"
}
)
}
}
object FileInfoton {
def apply(systemFields: SystemFields,
fields: Map[String, Set[FieldValue]],
content: FileContent) =
new FileInfoton(systemFields, Some(fields), Some(content))
def apply(path: String, lastModified: DateTime, lastModifiedBy: String,
dc: String, indexTime: Option[Long], indexName: String, protocol: String,
fields: Map[String, Set[FieldValue]],
content: FileContent) =
new FileInfoton(SystemFields(path, lastModified, lastModifiedBy, dc, indexTime, indexName, protocol), Some(fields), Some(content))
}
case class FileContent(data: Option[Array[Byte]],
mimeType: String,
dataLength: Int,
dataPointer: Option[String] = None) {
override def equals(other: Any) = other match {
case fc: FileContent =>
util.Arrays.equals(this.data.orNull, fc.data.orNull) && this.mimeType.equals(fc.mimeType) && this.dataPointer
.equals(fc.dataPointer)
case _ => false
}
def length = data.fold(dataLength)(_.length)
override def hashCode() = 0
def asString: String = { new String(data.getOrElse(Array[Byte]()), "UTF-8") }
}
object FileContent {
def apply(data: Array[Byte], mimeType: String) = new FileContent(Some(data), mimeType, data.length)
def apply(mimeType: String, length: Long) = new FileContent(None, mimeType, length.toInt)
}
case class VirtualInfoton(infoton: Infoton) {
require(!infoton.isInstanceOf[VirtualInfoton], "youtube.com/watch?v=v2FMqtC1x9Y")
def getInfoton = infoton match {
case ObjectInfoton(systemFields, fields) =>
new ObjectInfoton(systemFields, fields) {
override def kind = "VirtualObjectInfoton"
override def uuid = "0"
}
case CompoundInfoton(systemFields, fields, children, offset, length, total) =>
new CompoundInfoton(systemFields, fields, children, offset, length, total) {
override def kind = "VirtualCompoundInfoton"
override def uuid = "0"
}
case LinkInfoton(systemFields, fields, linkTo, linkType) =>
new LinkInfoton(systemFields, fields, linkTo, linkType) {
override def kind = "VirtualLinkInfoton"
override def uuid = "0"
}
case FileInfoton(systemFields, fields, content) =>
new FileInfoton(systemFields, fields, content) {
override def kind = "VirtualFileInfoton"
override def uuid = "0"
}
case _ => ???
}
}
object VirtualInfoton {
implicit def v2i(v: VirtualInfoton): Infoton = v.getInfoton
}
case class BagOfInfotons(infotons: Seq[Infoton]) extends Formattable {
def masked(fieldsMask: Set[String]): BagOfInfotons = BagOfInfotons(infotons.map(_.masked(fieldsMask)))
}
case class RetrievablePaths(infotons: Seq[Infoton], irretrievablePaths: Seq[String]) extends Formattable {
def masked(fieldsMask: Set[String]): RetrievablePaths = copy(infotons = infotons.map(_.masked(fieldsMask)))
}
case class InfotonHistoryVersions(versions: Seq[Infoton]) extends Formattable {
def masked(fieldsMask: Set[String]): InfotonHistoryVersions =
InfotonHistoryVersions(versions.map(_.masked(fieldsMask)))
}
case class InfotonPaths(paths: Seq[String]) extends Formattable
object ContentPortion {
def everything(infoton: Infoton): ContentPortion = Everything(infoton)
def unknownNestedContent(infoton: Infoton): ContentPortion = UnknownNestedContent(infoton)
}
sealed trait ContentPortion { def infoton: Infoton }
case class UnknownNestedContent(infoton: Infoton) extends ContentPortion
case class Everything(infoton: Infoton) extends ContentPortion
final class ComparisonImpossible private (val valueType: String, val input: String, cause: Throwable)
extends Exception(s"can't compare [$input] with values of type [$valueType]", cause)
object ComparisonImpossible {
def apply(valueType: String, input: String): ComparisonImpossible = new ComparisonImpossible(valueType, input, null)
def apply(valueType: String, input: String, cause: Throwable): ComparisonImpossible =
new ComparisonImpossible(valueType, input, cause)
def unapply(ex: ComparisonImpossible): Option[(String, String, Option[Throwable])] =
Some((ex.valueType, ex.input, Option(ex.getCause)))
}
sealed trait FieldValue {
def value: Any
def size: Long
def quad: Option[String]
def sType: String = this.getClass.getSimpleName.substring(1)
def compareToString(unparsedValue: String): Try[Int]
private[domain] def payload: String = s"${this.getClass.getName}$value${quad.getOrElse("")}"
}
case class FNull(quad: Option[String]) extends FieldValue {
def value = null
def size = 0
override def compareToString(unparsedValue: String): Try[Int] = Failure(ComparisonImpossible("FNull", unparsedValue))
}
case class FExtra[T](value: T, quad: Option[String]) extends FieldValue {
def size = 0
override def toString(): String = value.toString
override def compareToString(unparsedValue: String): Try[Int] = Failure(ComparisonImpossible("FExtra", unparsedValue))
}
object FieldValue {
def prefixByType(fValue: FieldValue): Char = fValue match {
case _: FString | _: FReference | _: FExternal => 's'
case _: FInt => 'i'
case _: FLong | _: FBigInt => 'l'
case _: FBigDecimal | _: FDouble => 'w'
case _: FBoolean => 'b'
case _: FDate => 'd'
case _: FFloat => 'f'
case _: FNull => !!!
case _: FExtra[_] => !!!
}
def parseString(s: String): FieldValue = {
if (FReference.isUriRef(s)) FReference(s, None)
else if (FDate.isDate(s)) FDate(s, None)
else FString(s, None, None)
}
def apply(value: String, dataTypeURI: String): FieldValue = this.apply(value, dataTypeURI, None)
def apply(value: String, dataTypeURI: String, quad: Option[String]): FieldValue = FExternal(value, dataTypeURI, quad)
def apply(num: Int): FieldValue = this.apply(num, None)
def apply(num: Int, quad: Option[String]): FieldValue = FInt(num, quad)
def apply(num: Long): FieldValue = this.apply(num, None)
def apply(num: Long, quad: Option[String]): FieldValue = FLong(num, quad)
def apply(num: java.math.BigInteger): FieldValue = this.apply(num, None)
def apply(num: java.math.BigInteger, quad: Option[String]): FieldValue = FBigInt(num, quad)
def apply(num: Float): FieldValue = this.apply(num, None)
def apply(num: Float, quad: Option[String]): FieldValue = FFloat(num, quad)
def apply(num: Double): FieldValue = this.apply(num, None)
def apply(num: Double, quad: Option[String]): FieldValue = FDouble(num, quad)
def apply(num: java.math.BigDecimal): FieldValue = this.apply(num, None)
def apply(num: java.math.BigDecimal, quad: Option[String]): FieldValue = FBigDecimal(num, quad)
def apply(bool: Boolean): FieldValue = this.apply(bool, None)
def apply(bool: Boolean, quad: Option[String]): FieldValue = FBoolean(bool, quad)
def apply(str: String): FieldValue = this.apply(str, None, None)
def apply(str: String, lang: Option[String], quad: Option[String]): FieldValue = FString(str, lang, quad)
}
case class FInt(value: Int, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = 4
override def compareToString(unparsedValue: String): Try[Int] =
Try(Integer.parseInt(unparsedValue))
.transform(parsedValue => Try(value.compare(parsedValue)),
cause => Failure(ComparisonImpossible("FInt", unparsedValue, cause)))
}
object FInt {
def apply(n: Int): FInt = FInt(n, None)
}
case class FLong(value: Long, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = 8
override def compareToString(unparsedValue: String): Try[Int] =
Try(java.lang.Long.parseLong(unparsedValue))
.transform(parsedValue => Try(value.compare(parsedValue)),
cause => Failure(ComparisonImpossible("FLong", unparsedValue, cause)))
}
object FLong {
def apply(n: Long): FLong = FLong(n, None)
}
case class FBigInt(value: java.math.BigInteger, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = value.bitLength()
override def compareToString(unparsedValue: String): Try[Int] =
Try(BigInt(unparsedValue).underlying())
.transform(parsedValue => Try(value.compareTo(parsedValue)),
cause => Failure(ComparisonImpossible("FBigInt", unparsedValue, cause)))
}
object FBigInt {
def apply(n: java.math.BigInteger): FBigInt = FBigInt(n, None)
}
case class FFloat(value: Float, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = 4
override def compareToString(unparsedValue: String): Try[Int] =
Try(java.lang.Float.parseFloat(unparsedValue))
.transform(parsedValue => Try(value.compare(parsedValue)),
cause => Failure(ComparisonImpossible("FFloat", unparsedValue, cause)))
}
object FFloat {
def apply(n: Float): FFloat = FFloat(n, None)
}
case class FDouble(value: Double, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = 8
override def compareToString(unparsedValue: String): Try[Int] =
Try(java.lang.Double.parseDouble(unparsedValue))
.transform(parsedValue => Try(value.compare(parsedValue)),
cause => Failure(ComparisonImpossible("FDouble", unparsedValue, cause)))
}
object FDouble {
def apply(n: Double): FDouble = FDouble(n, None)
}
case class FBigDecimal(value: java.math.BigDecimal, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = value.precision
override def compareToString(unparsedValue: String): Try[Int] =
Try(BigDecimal(unparsedValue).underlying())
.transform(parsedValue => Try(value.compareTo(parsedValue)),
cause => Failure(ComparisonImpossible("FBigDecimal", unparsedValue, cause)))
}
object FBigDecimal {
def apply(n: java.math.BigDecimal): FBigDecimal = FBigDecimal(n, None)
}
case class FExternal(value: String, dataTypeURI: String, quad: Option[String]) extends FieldValue {
require(dataTypeURI.forall(_ != '$'))
override def toString(): String = value
override def size: Long = value.getBytes("UTF-8").length + dataTypeURI.getBytes("UTF-8").length
def getDataTypeURI: String =
if (dataTypeURI.take(4) == "xsd#")
"http://www.w3.org/2001/XMLSchema" + dataTypeURI.drop(3)
else dataTypeURI
override def compareToString(unparsedValue: String): Try[Int] =
Try(Ordering.String.compare(value, unparsedValue))
}
object FExternal {
def apply(s: String, u: String): FExternal = FExternal(s, u, None)
}
case class FString(value: String, lang: Option[String], quad: Option[String]) extends FieldValue {
override def toString(): String = value
override def size: Long = value.getBytes("UTF-8").size
override def compareToString(unparsedValue: String): Try[Int] =
Try(Ordering.String.compare(value, unparsedValue))
}
object FString {
def apply(s: String): FString = FString(s, None, None)
}
case class FReference(value: String, quad: Option[String]) extends FieldValue with LazyLogging {
override def toString(): String = value
override def size: Long = value.getBytes("UTF-8").size
def getCmwellPath: String =
if (value.startsWith("https:/")) value.drop("https:/".length)
else if (value.startsWith("cmwell://")) value.drop("cmwell:/".length)
else if (value.startsWith("http:/")) value.drop("http:/".length)
else {
logger.warn(
s"value [$value] has bad prefix, and is not a CM-Well reference (though it is a field value of type FReference)."
)
value
}
override def compareToString(unparsedValue: String): Try[Int] =
Try(Ordering.String.compare(value, unparsedValue))
def getProtocol: String = value.takeWhile(':'.!=)
}
object FReference {
def apply(s: String): FReference = FReference(s, None)
def isUriRef(s: String): Boolean = scala.util.Try { new java.net.URL(s) }.isSuccess || s.startsWith("cmwell://")
}
case class FBoolean(value: Boolean, quad: Option[String]) extends FieldValue {
override def toString(): String = value.toString
override def size: Long = 4
override def compareToString(unparsedValue: String): Try[Int] =
Try(java.lang.Boolean.parseBoolean(unparsedValue))
.transform(parsedValue => Try(value.compare(parsedValue)),
cause => Failure(ComparisonImpossible("FBoolean", unparsedValue, cause)))
}
object FBoolean {
def apply(b: Boolean): FBoolean = FBoolean(b, None)
}
//TODO: inner value should be DateTime. not String! companion object apply method should convert String input into DateTime instances.
final class FDate(private val temp: String, val quad: Option[String]) extends FieldValue {
val (value, inner) = FDate.stringToDate(temp) match {
case scala.util.Success(d) => temp -> d
case scala.util.Failure(e) => {
val v = FDate.fixFormattingIfNeeded(temp)
FDate.stringToDate(v) match {
case scala.util.Success(d) => v -> d
case scala.util.Failure(e) => throw e
}
}
}
def canEqual(a: Any) = a != null && a.isInstanceOf[FDate]
override def equals(that: Any): Boolean = that match {
case that: FDate => that.canEqual(this) && that.value == this.value && that.quad == this.quad
case _ => false
}
override def hashCode: Int = 37 * value.## + quad.##
override def toString(): String = value
def getDate: DateTime = inner
override def size: Long = value.getBytes("UTF-8").size
override def compareToString(unparsedValue: String): Try[Int] = {
FDate
.stringToDate(unparsedValue)
.recoverWith {
case e: Throwable => {
val v = FDate.fixFormattingIfNeeded(temp)
FDate.stringToDate(v).recoverWith { case _ => Failure(e) }
}
}
.transform(parsedValue => Try(inner.compareTo(parsedValue)),
cause => Failure(ComparisonImpossible("FDate", unparsedValue, cause)))
}
}
object FDate extends LazyLogging {
def apply(s: String): FDate = new FDate(s, None)
def apply(s: String, q: Option[String]): FDate = new FDate(s, q)
def unapply(fDate: FDate): Option[(String, Option[String])] = Some(fDate.value -> fDate.quad)
private val withDotdateParser = ISODateTimeFormat.dateTimeParser().withZone(DateTimeZone.UTC)
private val withoutDotdateParser = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'").withZone(DateTimeZone.UTC)
private val justDateParser = DateTimeFormat.forPattern("yyyy-MM-dd").withZone(DateTimeZone.UTC)
private val FixableDate = """(\\d{4}-\\d{2}-\\d{2})\\s*T?\\s*(\\d{2}:\\d{2}:\\d{2})(.\\d+)?\\s*Z?\\s*""".r
def fixFormattingIfNeeded(str: String) = str match {
case FixableDate(date, time, millis) => date + "T" + time + Option(millis).getOrElse("") + "Z"
case _ => str // unfixable
}
def isDate(str: String): Boolean = stringToDateWithFixTry(str).isSuccess
private def stringToDateWithFixTry(str: String): Try[DateTime] = {
val orig = stringToDate(str)
orig.recoverWith {
case e => {
val fixed = fixFormattingIfNeeded(str)
if (fixed == str) orig
else {
logger.warn(s"fixing a date: [$str] to [$fixed]", e)
stringToDate(fixed)
}
}
}
}
private def stringToDate(str: String): Try[DateTime] = {
val orig = Try(withDotdateParser.parseDateTime(str))
orig.recoverWith {
case _ =>
Try(withoutDotdateParser.parseDateTime(str)).recoverWith {
case _ => Try(justDateParser.parseDateTime(str))
}
}
}
}
| dudi3001/CM-Well | server/cmwell-domain/src/main/scala/cmwell/domain/Infoton.scala | Scala | apache-2.0 | 30,513 |
/*******************************************************************************
* Copyright (c) 2016 IBM Corp.
*
* Created by Basho Technologies for IBM
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package org.apache.spark.sql.riak
import com.basho.riak.spark._
import scala.reflect._
import com.basho.riak.spark.rdd.connector.{RiakConnectorConf, RiakConnector}
import com.basho.riak.spark.rdd.{ReadConf, RiakTSRDD}
import com.basho.riak.spark.util.TSConversionUtil
import com.basho.riak.spark.writer.WriteConf
import com.basho.riak.spark.writer.mapper.SqlDataMapper
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.sources.{InsertableRelation, BaseRelation, Filter, PrunedFilteredScan}
import org.apache.spark.sql.types._
import org.apache.spark.sql._
import scala.collection.convert.decorateAsScala._
import com.basho.riak.spark.query.QueryBucketDef
/**
* Implements [[BaseRelation]]]], [[InsertableRelation]]]] and [[PrunedFilteredScan]]]]
* It inserts data to and scans RiakTS bucket. It pushs down some filters to SQL.
*
* @author Sergey Galkin <srggal at gmail dot com>
* @since 1.2.0
*/
private[riak] class RiakRelation(
bucket: String,
connector: RiakConnector,
val readConf: ReadConf,
val writeConf: WriteConf,
override val sqlContext: SQLContext,
userSpecifiedSchema: Option[StructType])
extends BaseRelation with PrunedFilteredScan with InsertableRelation with Logging {
override def schema: StructType = userSpecifiedSchema match {
case None =>
val readSchemaQuery = QueryBucketDef(connector, readConf)
readSchemaQuery.getTableSchema(bucket) match {
case None => throw new IllegalStateException(s"No bucket $bucket was found")
case Some(riakShema) => riakShema
}
case Some(st: StructType) => st
}
private[this] val baseRdd: RiakTSRDD[Row] = sqlContext.sparkContext
.riakTSTable[Row](bucket, readConf, userSpecifiedSchema)(implicitly[ClassTag[Row]], connector)
def buildScan(): RDD[Row] = baseRdd.asInstanceOf[RDD[Row]]
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
val prunedRdd = {
if (requiredColumns.isEmpty)
baseRdd
else
baseRdd.select(requiredColumns: _*)
}
val tsRangeFieldName = readConf.tsRangeFieldName
if (tsRangeFieldName != null && readConf.getOrDefaultSplitCount() > 1) {
val partitionedRdd = prunedRdd.partitionByTimeRanges(tsRangeFieldName, filters)
readConf.quantum match {
case None => partitionedRdd
case Some(q) => partitionedRdd.quantum(q)
}
} else
prunedRdd.filter(filters)
}
// TODO: unhandledFilters() logic should be refactored in 2.x
/**
*
* @param filters
* @return always returns an empty array. Filters will be pushed down to the [[com.basho.riak.spark.rdd.RiakRDD]] and
* in case of unsupported/unhandled filter, it will raise an exception.
* It is a legacy behavior which definitely will be changed on 2.x branch of Spark Connector.
* @since 1.6.3
*/
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = Array.empty[Filter]
override def insert(data: DataFrame, overwrite: Boolean): Unit = {
if (overwrite) {
throw new UnsupportedOperationException("Data truncation is not supported for the moment")
}
implicit val rwf = SqlDataMapper.factory[Row]
implicit val riakConnector = connector
data.rdd.saveToRiakTS(bucket, writeConf = writeConf)
}
}
/**
* @author Sergey Galkin <srggal at gmail dot com>
*/
object RiakRelation {
def apply(bucket: String,
sqlContext: SQLContext,
schema: Option[StructType] = None,
connector: Option[RiakConnector] = None,
readConf: ReadConf,
writeConf: WriteConf): RiakRelation = {
new RiakRelation(bucket, connector.getOrElse(RiakConnector(sqlContext.sparkContext.getConf)),
readConf, writeConf, sqlContext, schema)
}
def apply(sqlContext: SQLContext, parameters: Map[String, String], schema: Option[StructType]): RiakRelation = {
val existingConf = sqlContext.sparkContext.getConf
val bucketDef = BucketDef(parameters(DefaultSource.RiakBucketProperty), None)
val riakConnector = new RiakConnector(RiakConnectorConf(existingConf, parameters))
val readConf = ReadConf(existingConf, parameters)
val writeConf = WriteConf(existingConf, parameters)
RiakRelation(bucketDef.bucket, sqlContext, schema, Some(riakConnector), readConf, writeConf)
}
}
| basho/spark-riak-connector | connector/src/main/scala/org/apache/spark/sql/riak/RiakRelation.scala | Scala | apache-2.0 | 5,240 |
package dotty.tools.dottydoc
package model
import comment.Comment
import references._
import dotty.tools.dotc.core.Symbols.{ Symbol, NoSymbol }
object internal {
final case class PackageImpl(
var symbol: Symbol,
var annotations: List[String],
var name: String,
var members: List[Entity],
var path: List[String],
var superTypes: List[MaterializableLink] = Nil,
var comment: Option[Comment] = None,
var parent: Option[Entity] = None
) extends Package
object EmptyPackage {
def apply(path: List[String], name: String): PackageImpl = {
PackageImpl(NoSymbol, Nil, name, Nil, path)
}
}
final case class TypeAliasImpl (
symbol: Symbol,
annotations: List[String],
modifiers: List[String],
name: String,
path: List[String],
alias: Option[Reference],
typeParams: List[String] = Nil,
var comment: Option[Comment] = None,
var parent: Option[Entity] = None
) extends TypeAlias
final case class ClassImpl(
symbol: Symbol,
annotations: List[String],
name: String,
members: List[Entity],
modifiers: List[String],
path: List[String],
typeParams: List[String] = Nil,
constructors: List[List[ParamList]] = Nil,
superTypes: List[MaterializableLink] = Nil,
var comment: Option[Comment] = None,
var companionPath: List[String] = Nil,
var parent: Option[Entity] = None
) extends Class
final case class CaseClassImpl(
symbol: Symbol,
annotations: List[String],
name: String,
members: List[Entity],
modifiers: List[String],
path: List[String],
typeParams: List[String] = Nil,
constructors: List[List[ParamList]] = Nil,
superTypes: List[MaterializableLink] = Nil,
var comment: Option[Comment] = None,
var companionPath: List[String] = Nil,
var parent: Option[Entity] = None
) extends CaseClass
final case class TraitImpl(
symbol: Symbol,
annotations: List[String],
name: String,
members: List[Entity],
modifiers: List[String],
path: List[String],
typeParams: List[String] = Nil,
traitParams: List[ParamList] = Nil,
superTypes: List[MaterializableLink] = Nil,
var comment: Option[Comment] = None,
var companionPath: List[String] = Nil,
var parent: Option[Entity] = None
) extends Trait
final case class ObjectImpl(
symbol: Symbol,
annotations: List[String],
name: String,
members: List[Entity],
private val mods: List[String],
path: List[String],
superTypes: List[MaterializableLink] = Nil,
var comment: Option[Comment] = None,
var companionPath: List[String] = Nil,
var parent: Option[Entity] = None
) extends Object {
def modifiers: List[String] = mods.filterNot(_ == "final")
}
final case class DefImpl(
symbol: Symbol,
annotations: List[String],
name: String,
modifiers: List[String],
path: List[String],
returnValue: Reference,
typeParams: List[String] = Nil,
paramLists: List[ParamList] = Nil,
var comment: Option[Comment] = None,
implicitlyAddedFrom: Option[Reference] = None,
var parent: Option[Entity] = None
) extends Def
final case class ValImpl(
symbol: Symbol,
annotations: List[String],
name: String,
modifiers: List[String],
path: List[String],
returnValue: Reference,
kind: String,
var comment: Option[Comment] = None,
implicitlyAddedFrom: Option[Reference] = None,
var parent: Option[Entity] = None
) extends Val
final case class ParamListImpl(
list: List[NamedReference],
isImplicit: Boolean
) extends ParamList
}
| som-snytt/dotty | doc-tool/src/dotty/tools/dottydoc/model/internal.scala | Scala | apache-2.0 | 3,621 |
package _docs.tests
class Adoc:
def foo = 123
| dotty-staging/dotty | scaladoc-testcases/src/_docs/tests/Adoc.scala | Scala | apache-2.0 | 49 |
package grammarcomp
package repair
import grammar._
import CFGrammar._
import EBNFGrammar._
import generators._
import parsing._
import equivalence._
import utils._
import scala.collection.mutable.ListBuffer
import scala.annotation.tailrec
import RepairResult._
class ContextBasedSuperSetRepair[T](g: Grammar[T], ungenWord: Word[T], equivChecker: EquivalenceChecker[T])
(implicit gctx: GlobalContext, opctx: RepairContext) {
sealed abstract class Permissibility
case class PermissibleUnderContext() extends Permissibility
case class PermissibleInOtherContexts() extends Permissibility
case class NotPermissible() extends Permissibility
val cnfG = g.cnfGrammar
val gParser = new CYKParser[T](cnfG)
val nontermProductions = g.nontermToRules.map { case (k, v) => (k, v.map(_.rightSide).toSet) }
val refWords = equivChecker.getRefWords
def findNonterminalWithProductions(prods: Set[List[Symbol[T]]]): Option[Nonterminal] = {
nontermProductions.collectFirst {
case (nt, ntRights) if (ntRights == prods) => nt
}
}
lazy val (permissibleParseTrees, acceptedWords) = {
//get the parse trees of cnfG of all the valid strings of the reference grammar
refWords.foldLeft((List[ParseTree[T]](), List[Word[T]]())) {
//check for abort flag
case (acc, _) if gctx.abort => acc
case ((accTrees, accWords), refWord) if accWords.size < opctx.nCorrectWordsForRepair => {
gParser.parseWithTree(refWord) match {
case Some(tree) =>
//for debugging
/*val intTerm = Terminal[T]("Int")
if (refWord == List(intTerm, Terminal[T](","), intTerm, Terminal[T]("=>"), intTerm, Terminal[T]("$"))) {
println("ParseTree[T] for "+refWord+": \\n"+tree)
oneTree = tree
println(wordToString(refWord))
}*/
(accTrees :+ tree, accWords :+ refWord)
case None =>
(accTrees, accWords)
}
}
case (acc, _) => acc
}
}
var childrenCache = Map[Rule[T], Set[(Rule[T], Int)]]()
/**
* Collects the set of rules of the nodes that are children
* of nodes containing context.
*/
def children(context: Rule[T]): Set[(Rule[T], Int)] = {
def collectChildren(tree: ParseTree[T]): Set[(Rule[T], Int)] = {
tree match {
case PNode(_, List()) | PLeaf(_) => Set()
case PNode(rule, children) =>
val foundChildren = (children flatMap {
case cn: PNode[T] => collectChildren(cn)
case _ => List()
}).toSet
if (rule == context) {
foundChildren ++ {
children.zipWithIndex.collect {
case (PNode(r, _), i) => (r, i)
}.toSet
}
} else {
foundChildren
}
}
}
val children = childrenCache.getOrElse(context, {
val chs = permissibleParseTrees.foldLeft(Set[(Rule[T], Int)]())((acc, ptree) => acc ++ collectChildren(ptree))
childrenCache += (context -> chs)
chs
})
children
}
/**
* Finds all the rules of the parent nodes of 'key' in the given 'tree'.
* A parent is a rule and also the index of the rightSIde at which the 'tree' appears
* Assumes that the root has already been processed.
* This relies on the fact that the start symbol does not appear on the left side of productions
* TODO: This code could be really slow O(n^2) where 'n' is the size of the tree. Find a faster algorithm.
*/
def collectParents(tree: ParseTree[T], key: ParseTree[T]): Set[(Rule[T], Int)] = {
tree match {
case PNode(_, List()) | PLeaf(_) => Set()
case PNode(nr, children) =>
val foundParents = (children flatMap {
case cn: PNode[T] => collectParents(cn, key)
case _ => List()
}).toSet
foundParents ++ children.zipWithIndex.collect {
case (`key`, i) => (nr, i)
}.toSet
}
}
/**
* TODO: I am sure there is a better way to identify this using just the
* rules and the ref words
*/
def isTreePermissible(subtree: ParseTree[T], context: Rule[T], index: Int): Permissibility = {
val res = permissibleParseTrees.foldLeft(NotPermissible(): Permissibility)((acc, ptree) => acc match {
case PermissibleUnderContext() => {
acc
}
case _ =>
val parents = collectParents(ptree, subtree)
if (!parents.isEmpty) {
if (parents.contains((context, index))) {
//println("Parents for "+getRule(subtree)+": "+parents)
//println("Permissible under context: "+(getRule(subtree),context)+" in tree \\n "+ptree)
PermissibleUnderContext()
} else PermissibleInOtherContexts()
} else
acc
})
//println("Permissibility result for (subtree, context): "+(getRule(subtree),context)+" : "+res)
res
}
sealed abstract class RepairType
object Aborted extends RepairType
case class RemoveRule(rule: Rule[T]) extends RepairType
case class ExpandRightSides(rule: Rule[T]) extends RepairType
case class PreventUnderContext(rule: Rule[T], index: Int, context: Rule[T]) extends RepairType
/**
* This procedure identifies the rules and the repairs that have to be performed
*/
@tailrec final def findRepairPoint(tree: ParseTree[T]): RepairType = tree match {
case _ if gctx.abort =>
Aborted
case n @ PNode(contextRule, children) =>
val repairPoint = children.zipWithIndex.foldLeft(None: Option[(Permissibility, ParseTree[T], Int)]) {
case (None, (childTree, index)) =>
isTreePermissible(childTree, contextRule, index) match {
case PermissibleUnderContext() =>
//in this case a fix cannot be found inside the 'childTree' so move to the next child
None
case res @ _ =>
Some((res, childTree, index))
}
case (acc, _) => acc
}
if (opctx.debugSupersetRepair)
println("repairPoint: " + repairPoint)
repairPoint match {
case None =>
//Here, every children is individually feasible but the combination of child trees is not feasible.
//To identify the repair point, unfold the current rule so that every possible
//combination of the productions of the nonterminals on the right hand side are made explicit.
//Hence, in the subsequent iterations the current invalid combinations can be eliminated by splitting
ExpandRightSides(contextRule)
case Some((_, childTree: PLeaf[T], _)) =>
//here the terminal is not feasible under the given context.
//Therefore, the only fix is to remove the context rule
RemoveRule(contextRule)
case Some((PermissibleInOtherContexts(), PNode(r, _), index)) =>
//the fix lies in this child
PreventUnderContext(r, index, contextRule)
case Some((NotPermissible(), child: PNode[T], _)) =>
//recurse into the child tree as we have found a smaller infeasible subtree
findRepairPoint(child)
case Some(_) =>
throw new IllegalStateException("Impossible match case taken !!")
}
case l: PLeaf[T] =>
//we should be hitting this case
throw new IllegalStateException("The parse tree starts with a leaf: " + l)
}
/**
* Returns the actual fix performed and the result of the repair as
* a mapping from the old rules to the new rules that replaces it.
*/
def eliminateAParseTree(): GrammarFeedback[T] = {
val gtree = gParser.parseWithTree(ungenWord).get
if (opctx.debugSupersetRepair)
println("Input parse tree: " + gtree)
//when there is no permissible parse trees the grammar doesn't accept any valid string
//hence, remove the first production
if (permissibleParseTrees.isEmpty && !gctx.abort) {
RemoveRules(List(gtree.asInstanceOf[PNode[T]].r))
} else if (gctx.abort) {
NoRepair("Operation Aborted.")
} else {
val repairType = findRepairPoint(gtree)
if (opctx.debugSupersetRepair) {
//println("context rule: "+context+" repair point: "+repairPoint)
println("Repair Type: " + repairType.toString)
}
repairType match {
case _ if gctx.abort =>
NoRepair("Operation Aborted.")
case RemoveRule(rule) =>
RemoveRules(List(rule))
case ExpandRightSides(rule @ Rule(lhs, rhs)) =>
//here, inline the right sides
val nontermsToInline = rhs.collect { case nt: Nonterminal => nt }.toSet
val newRightSides = inlineNontermsInSententialForm(nontermsToInline, rhs, g)
val newRules = newRightSides.map(rside => Rule(lhs, rside))
ExpandRules(List(rule), newRules)
case PreventUnderContext(repairRule, index, context) =>
//Fix(a) check if the 'contextRule' is actually a redundant rule
//TODO: how can we abort the operation here ?
if(isRedundantRule(repairRule)){
//'repairRule' is redundant
RemoveRules(List(repairRule), None)
}
else if(isRedundantRule(context)) {
//'contextRule' is redundant
RemoveRules(List(context), Some(List(repairRule)))
}
else {
//Fix(b): we need to make the 'repairRule' impossible in context
//without affecting the set of strings that are accepted
val (splitNT, newRules) = findReplacement(repairRule, index, context)
//replace the 'repairRule.leftSide' in the contextRule with 'splitNT'
val newContextRule = Rule(context.leftSide, context.rightSide.zipWithIndex.map {
case (sym, `index`) => splitNT
case (sym, i) => sym
})
if (opctx.debugSupersetRepair)
println("Replacing: " + context + " by \\n" + (newContextRule +: newRules.toList).mkString("\\n"))
//Map(context -> (newContextRule +: newRules.toList))
RefineRules(List(context), (newContextRule +: newRules.toList), List(repairRule))
}
}
}
}
def isRedundantRule(rule: Rule[T]) = {
val newCnf = Grammar[T](g.start, g.rules.filterNot(_ == rule)).cnfGrammar
if (!newCnf.rules.isEmpty) {
val cykparser = new CYKParser(newCnf)
val ctex = acceptedWords.find(x => !cykparser.parse(x))
if (!ctex.isDefined) {
//every word that was previously accepted are also accepted now
true
} else false
} else false
}
/**
* Find a replacement for the nonterminal 'refineNT' for use in the given context
* that does not have refineProd, we need to have 'refineNT' as it is for use in other contexts.
* As an optimization check what are the rules of the left side of 'repairRule'
* that are actually used by the context and add only those to the new nonterminal
*/
def findReplacement(refineRule: Rule[T], index: Int, context: Rule[T]): (Nonterminal, Seq[Rule[T]]) = {
//val splitProds = nontermProductions(repairRule.leftSide) -- Set(repairRule.rightSide)
val usedProds = children(context).collect {
case (r, `index`) if r.leftSide == refineRule.leftSide => r
}.map(_.rightSide)
val splitProds = usedProds
/*//here, check if
if (isRulePermissible(r, contextRule, index)) {
usedProds -- Set(refineRule.rightSide)*/
//check if the 'nontermProductions' map already contains a non-terminal for this split
val existingNT = findNonterminalWithProductions(splitProds)
if (existingNT.isDefined && existingNT.get != refineRule.leftSide) //note: we do not want to add the same rule again
(existingNT.get, Seq())
else {
var foundReplacement: Option[Nonterminal] = None
if (opctx.enableExpensiveRepair) {
//check here if any other non-terminal in the grammar can be used in place of refineNT
//note that such a non-terminal should have the same form as spiltProds i.e,
// it cannot have any more or any less productions, it should also agree on all terminals
//TODO: this has connections to minimizing a grammar
val candidates = g.nontermToRules.foldLeft(List[Nonterminal]()) {
case (acc, (nt, rules)) =>
if (rules.size == splitProds.size) {
if (splitProds.exists { sp =>
val foundMatch = rules.exists(rl => (sp zip rl.rightSide).forall {
case (t1: Terminal[T], t2: Terminal[T]) if (t1 == t2) => true
case (nt1: Nonterminal, nt2: Nonterminal) => true
case _ => false
})
!foundMatch
}) {
//here not every rule has a match, so skip this non-terminal
acc
} else {
//here every rule has match (though it may not be one-to-one which is not checked)
acc :+ nt
}
} else acc //here there cannot be a one-to-one match for every rule
}
if (opctx.debugSupersetRepair) {
println("Candidates chosen: " + candidates.mkString(","))
}
//try each candidate to replace the non-terminal at the 'index' of the right-side of the
//context rule
val ruleWOCtx = g.rules.filterNot(_ == context)
foundReplacement = candidates.find { cand =>
val newContextRule = Rule(context.leftSide, context.rightSide.zipWithIndex.map {
case (sym, `index`) => cand
case (sym, _) => sym
})
val candidateCNF = Grammar[T](g.start, ruleWOCtx :+ newContextRule).cnfGrammar
val candParser = new CYKParser(candidateCNF)
//check if candidateCNF does not accept the ungeneratable word
//Here, we are using CYK parser, hence, this could be expensive
if (!candParser.parse(ungenWord)) {
//check if the acceptance of the grammar is preserved
val ctex = acceptedWords.find(x => !candParser.parse(x))
if (!ctex.isDefined) {
//TODO: is this necessary ?
//check if the new grammar is a subset of the old
/* val ctex2 = (new EquivalenceChecker(g)).counterExampleForInclusion(candidateCNF)
if (ctex2.isEmpty) {*/
if (opctx.debugSupersetRepair) {
println("Found a replacement: " + cand)
}
true
//} else false
} else false
} else false
}
}
if (foundReplacement.isEmpty) {
//here, we need to create a new non-terminal as nothing in the grammar can be reused
val newnt = copy(refineRule.leftSide)
(newnt, splitProds.map(prod => Rule(newnt, prod)).toList)
} else
(foundReplacement.get, Seq())
}
}
/**
* Splits a nonterminal into several nonterminals
*/
def splitNonterminal(oldSym: Nonterminal, newSyms: Set[Nonterminal], rules: Set[Rule[T]]): Set[Rule[T]] = {
//first blow up all rights that contains 'oldSym'
val newrules = Util.fixpoint((rls: Set[Rule[T]]) => rls.flatMap[Rule[T], Set[Rule[T]]] {
case Rule(lside, rside) if rside.contains(oldSym) =>
val firstIndex = rside.indexOf(oldSym)
val prefix = rside.take(firstIndex) //this excludes 'oldSym'
val suffix = rside.drop(firstIndex + 1) //this includes 'oldSym'
newSyms.map(sym => (prefix :+ sym) ++ suffix).map(newRight => Rule(lside, newRight))
case other @ _ => Set(other)
})(rules)
//Now, blow up all lefts containing 'oldSym'
newrules.flatMap[Rule[T], Set[Rule[T]]] {
case Rule(lside, rside) if lside == oldSym =>
newSyms.map(sym => Rule(sym, rside))
case other @ _ => Set(other)
}
}
} | epfl-lara/GrammarComparison | src/main/scala/grammarcomp/repair/ContextBasedSuperSetRepair.scala | Scala | mit | 15,978 |
package browser
import helpers.TestDataPerTest
import org.scalatestplus.play.{HtmlUnitFactory, OneBrowserPerTest, OneServerPerTest, PlaySpec}
class ListPacksPageTests extends PlaySpec with TestDataPerTest with OneServerPerTest with OneBrowserPerTest with HtmlUnitFactory {
"The list packs page" should {
"display all packs" in {
go to listPacksPage
find("packs").get.text must include(pack.name)
}
"not display packs after they are deleted" in {
go to listPacksPage
click on className("delete-pack")
find("packs").get.text must not include pack.name
}
"display packs after they are created" in {
go to listPacksPage
textField("name").value = "test-name"
click on "add-pack"
find("packs").get.text must include("test-name")
}
"displays an error when pack name is not provided" in {
go to listPacksPage
click on "add-pack"
find("add-pack-form").get.text must include("This field is required")
}
"links to the view pack page" in {
go to listPacksPage
click on linkText(pack.name)
find("links").get.text must include(pack.name)
}
}
private def listPacksPage = {
s"http://localhost:$port/"
}
}
| notclive/backpack | test/browser/ListPacksPageTests.scala | Scala | mit | 1,249 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar.compression
import java.nio.ByteBuffer
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.execution.columnar._
import org.apache.spark.sql.execution.columnar.ColumnarTestUtils._
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.types.AtomicType
class DictionaryEncodingSuite extends SparkFunSuite {
val nullValue = -1
testDictionaryEncoding(new IntColumnStats, INT)
testDictionaryEncoding(new LongColumnStats, LONG)
testDictionaryEncoding(new StringColumnStats, STRING, false)
def testDictionaryEncoding[T <: AtomicType](
columnStats: ColumnStats,
columnType: NativeColumnType[T],
testDecompress: Boolean = true): Unit = {
val typeName = columnType.getClass.getSimpleName.stripSuffix("$")
def buildDictionary(buffer: ByteBuffer) = {
(0 until buffer.getInt()).map(columnType.extract(buffer) -> _.toShort).toMap
}
def stableDistinct(seq: Seq[Int]): Seq[Int] = if (seq.isEmpty) {
Seq.empty
} else {
seq.head +: seq.tail.filterNot(_ == seq.head)
}
def skeleton(uniqueValueCount: Int, inputSeq: Seq[Int]): Unit = {
// -------------
// Tests encoder
// -------------
val builder = TestCompressibleColumnBuilder(columnStats, columnType, DictionaryEncoding)
val (values, rows) = makeUniqueValuesAndSingleValueRows(columnType, uniqueValueCount)
val dictValues = stableDistinct(inputSeq)
inputSeq.foreach(i => builder.appendFrom(rows(i), 0))
if (dictValues.length > DictionaryEncoding.MAX_DICT_SIZE) {
withClue("Dictionary overflowed, compression should fail") {
intercept[Throwable] {
builder.build()
}
}
} else {
val buffer = builder.build()
val headerSize = CompressionScheme.columnHeaderSize(buffer)
// 4 extra bytes for dictionary size
val dictionarySize = 4 + rows.map(columnType.actualSize(_, 0)).sum
// 2 bytes for each `Short`
val compressedSize = 4 + dictionarySize + 2 * inputSeq.length
// 4 extra bytes for compression scheme type ID
assertResult(headerSize + compressedSize, "Wrong buffer capacity")(buffer.capacity)
// Skips column header
buffer.position(headerSize)
assertResult(DictionaryEncoding.typeId, "Wrong compression scheme ID")(buffer.getInt())
val dictionary = buildDictionary(buffer).toMap
dictValues.foreach { i =>
assertResult(i, "Wrong dictionary entry") {
dictionary(values(i))
}
}
inputSeq.foreach { i =>
assertResult(i.toShort, "Wrong column element value")(buffer.getShort())
}
// -------------
// Tests decoder
// -------------
// Rewinds, skips column header and 4 more bytes for compression scheme ID
buffer.rewind().position(headerSize + 4)
val decoder = DictionaryEncoding.decoder(buffer, columnType)
val mutableRow = new GenericInternalRow(1)
if (inputSeq.nonEmpty) {
inputSeq.foreach { i =>
assert(decoder.hasNext)
assertResult(values(i), "Wrong decoded value") {
decoder.next(mutableRow, 0)
columnType.getField(mutableRow, 0)
}
}
}
assert(!decoder.hasNext)
}
}
def skeletonForDecompress(uniqueValueCount: Int, inputSeq: Seq[Int]): Unit = {
if (!testDecompress) return
val builder = TestCompressibleColumnBuilder(columnStats, columnType, DictionaryEncoding)
val (values, rows) = makeUniqueValuesAndSingleValueRows(columnType, uniqueValueCount)
val dictValues = stableDistinct(inputSeq)
val nullRow = new GenericInternalRow(1)
nullRow.setNullAt(0)
inputSeq.foreach { i =>
if (i == nullValue) {
builder.appendFrom(nullRow, 0)
} else {
builder.appendFrom(rows(i), 0)
}
}
val buffer = builder.build()
// ----------------
// Tests decompress
// ----------------
// Rewinds, skips column header and 4 more bytes for compression scheme ID
val headerSize = CompressionScheme.columnHeaderSize(buffer)
buffer.position(headerSize)
assertResult(DictionaryEncoding.typeId, "Wrong compression scheme ID")(buffer.getInt())
val decoder = DictionaryEncoding.decoder(buffer, columnType)
val columnVector = new OnHeapColumnVector(inputSeq.length, columnType.dataType)
decoder.decompress(columnVector, inputSeq.length)
if (inputSeq.nonEmpty) {
inputSeq.zipWithIndex.foreach { case (i: Any, index: Int) =>
if (i == nullValue) {
assertResult(true, s"Wrong null ${index}-th position") {
columnVector.isNullAt(index)
}
} else {
columnType match {
case INT =>
assertResult(values(i), s"Wrong ${index}-th decoded int value") {
columnVector.getInt(index)
}
case LONG =>
assertResult(values(i), s"Wrong ${index}-th decoded long value") {
columnVector.getLong(index)
}
case _ => fail("Unsupported type")
}
}
}
}
}
test(s"$DictionaryEncoding with $typeName: empty") {
skeleton(0, Seq.empty)
}
test(s"$DictionaryEncoding with $typeName: simple case") {
skeleton(2, Seq(0, 1, 0, 1))
}
test(s"$DictionaryEncoding with $typeName: dictionary overflow") {
skeleton(DictionaryEncoding.MAX_DICT_SIZE + 1, 0 to DictionaryEncoding.MAX_DICT_SIZE)
}
test(s"$DictionaryEncoding with $typeName: empty for decompress()") {
skeletonForDecompress(0, Seq.empty)
}
test(s"$DictionaryEncoding with $typeName: simple case for decompress()") {
skeletonForDecompress(2, Seq(0, nullValue, 0, nullValue))
}
test(s"$DictionaryEncoding with $typeName: dictionary overflow for decompress()") {
skeletonForDecompress(DictionaryEncoding.MAX_DICT_SIZE + 2,
Seq(nullValue) ++ (0 to DictionaryEncoding.MAX_DICT_SIZE - 1) ++ Seq(nullValue))
}
}
}
| goldmedal/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/DictionaryEncodingSuite.scala | Scala | apache-2.0 | 7,165 |
package controllers
/**
* Created by Jörg Amelunxen on 20.11.14.
*/
import javax.inject.Singleton
import org.slf4j.{Logger, LoggerFactory}
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json._
import play.api.mvc._
import play.modules.reactivemongo.MongoController
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.api.Cursor
import scala.concurrent.Future
@Singleton
class RoleController extends Controller with MongoController {
private final val logger: Logger = LoggerFactory.getLogger(classOf[RoleController])
def collection: JSONCollection = db.collection[JSONCollection]("usersAdd")
import models.JsonFormats._
import models._
/**
* Returns a user/role in the db given by its userid
*
* @return a list that contains every role as a JSON object
*/
def getRole(userid: String) = Action.async {
val cursor: Cursor[UserAddModel] = collection.find(Json.obj("userid" -> userid)).cursor[UserAddModel]
val futureRolesList: Future[List[UserAddModel]] = cursor.collect[List]()
val futureRolesJsonArray: Future[JsArray] = futureRolesList.map { roles =>
Json.arr(roles)
}
futureRolesJsonArray.map {
roles =>
Ok(roles(0))
}
}
/**
* Returns every user/role in the db
*
* @return a list that contains every role as a JSON object
*/
def getRoles = Action.async {
val cursor: Cursor[UserAddModel] = collection.find(Json.obj()).cursor[UserAddModel]
val futureRolesList: Future[List[UserAddModel]] = cursor.collect[List]()
val futureRolesJsonArray: Future[JsArray] = futureRolesList.map { roles =>
Json.arr(roles)
}
futureRolesJsonArray.map {
roles =>
Ok(roles(0))
}
}
/**
* Updates the role
*
* @return
*/
def updateRole = Action.async(parse.json) {
request =>
request.body.validate[UserAddModel].map {
userAdd =>
val modifier = Json.obj( "$set" -> Json.obj("role" -> userAdd.role),
"$set" -> Json.obj("master" -> userAdd.master),
"$set" -> Json.obj("admin" -> userAdd.admin))
// update entry
collection.update(Json.obj("userid" -> userAdd.userid),modifier,upsert = true).map {
lastError =>
logger.debug(s"Successfully inserted with LastError: $lastError")
Created(s"Role updated")
}
}.getOrElse(Future.successful(BadRequest("invalid json")))
}
}
| HiP-App/HiPBackend | app/controllers/RoleController.scala | Scala | apache-2.0 | 2,575 |
package org.scalatra
import javax.servlet.ServletContext
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import org.scalatra.servlet.ServletApiImplicits
object ScalatraContext {
private class StableValuesContext(
implicit val request: HttpServletRequest,
val response: HttpServletResponse,
val servletContext: ServletContext) extends ScalatraContext
}
trait ScalatraContext
extends ServletApiImplicits
with SessionSupport
with CookieContext {
import org.scalatra.ScalatraContext.StableValuesContext
implicit def request: HttpServletRequest
implicit def response: HttpServletResponse
def servletContext: ServletContext
/**
* Gets the content type of the current response.
*/
def contentType: String = response.contentType getOrElse null
/**
* Gets the status code of the current response.
*/
def status: Int = response.status.code
/**
* Sets the content type of the current response.
*/
def contentType_=(contentType: String): Unit = {
response.contentType = Option(contentType)
}
@deprecated("Use status_=(Int) instead", "2.1.0")
def status(code: Int): Unit = { status_=(code) }
/**
* Sets the status code of the current response.
*/
def status_=(code: Int): Unit = { response.status = ResponseStatus(code) }
/**
* Explicitly sets the request-scoped format. This takes precedence over
* whatever was inferred from the request.
*/
def format_=(formatValue: Symbol): Unit = {
request(ApiFormats.FormatKey) = formatValue.name
}
/**
* Explicitly sets the request-scoped format. This takes precedence over
* whatever was inferred from the request.
*/
def format_=(formatValue: String): Unit = {
request(ApiFormats.FormatKey) = formatValue
}
protected[this] implicit def scalatraContext: ScalatraContext = {
new StableValuesContext()(request, response, servletContext)
}
} | Alefas/Scalatra | core/src/main/scala/org/scalatra/ScalatraContext.scala | Scala | bsd-2-clause | 1,938 |
/**
* Copyright (C) 2019 Inera AB (http://www.inera.se)
*
* This file is part of statistik (https://github.com/sklintyg/statistik).
*
* statistik is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* statistik is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package se.inera.statistics.gatling
import io.gatling.core.Predef._
object NationellDiagnosavsnitt {
def exec(grupp: String) = RestCall.get(
s"getDiagnosavsnittstatistik",
s"${Conf.uri}/api/getDiagnosavsnittstatistik/${grupp}")
}
| sklintyg/statistik | gatling/src/test/scala/se/inera/statistics/gatling/NationellDiagnosavsnitt.scala | Scala | lgpl-3.0 | 1,028 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package sbt.testing
import scala.scalajs.reflect.annotation._
/** Interface implemented by test frameworks. */
@EnableReflectiveInstantiation
trait Framework {
/** A human-friendly name of the test framework that this object represents.
*/
def name(): String
/** An array of <a href="Fingerprint.html"><code>Fingerprint</code></a>s
* that specify how to identify test classes during discovery.
*/
def fingerprints(): Array[Fingerprint]
/** Initiates a run.
*
* If a client invokes this method before a previously initiated run has
* completed, the test framework may throw
* <code>IllegalStateException</code> to indicate it cannot perform the two
* runs concurrently.
*
* @param args the test-framework-specific arguments for the new run
* @param remoteArgs the test-framework-specific remote arguments for the run in a forked JVM
* @param testClassLoader a class loader to use when loading test classes during the run
*
* @return a <code>Runner</code> representing the newly started run.
* @throws java.lang.IllegalStateException if the test framework is unable to
* initiate a run because it is already performing a previously initiated
* run that has not yet completed.
*/
def runner(args: Array[String], remoteArgs: Array[String],
testClassLoader: ClassLoader): Runner
/** Scala.js specific: Creates a slave runner for a given run.
*
* The slave may send a message to the master runner by calling `send`.
*/
def slaveRunner(args: Array[String], remoteArgs: Array[String],
testClassLoader: ClassLoader, send: String => Unit): Runner
}
| gzm0/scala-js | test-interface/src/main/scala/sbt/testing/Framework.scala | Scala | apache-2.0 | 1,931 |
/*
* Copyright 2020 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.jdbc.sharded
import java.sql.ResultSet
import com.spotify.scio.jdbc.JdbcConnectionOptions
/**
* A bag of options for the JDBC sharded read.
*
* @param connectionOptions
* Connection options
* @param tableName
* Name of a table or materialized view to read from
* @param shardColumn
* Column to shard by. Must be of integer/long type ideally with evenly distributed values
* @param rowMapper
* Function to map from a SQL [[java.sql.ResultSet]] to `T`
* @param fetchSize
* Amount of rows fetched per [[java.sql.ResultSet]]. Default value is 100000. To apply an
* unbounded fetch size set this parameter to -1
* @param numShards
* Number of shards to split the table into for reading. There is no guarantee that Beam will
* actually execute reads in parallel. It is up to Beam auto scaler to decide the level of
* parallelism to use (number of workers and threads per worker). But the behavior could be
* controlled with maxNumWorkers and numberOfWorkerHarnessThreads parameters (see more details
* about these parameters here). Defaults to 4
* @param shard
* An implementation of the [[com.spotify.scio.jdbc.sharded.Shard]] trait which knows how to shard
* a column of a type S. Example of sharding by a column of type Long:
* {{{
* sc.jdbcShardedSelect(getShardedReadOptions(opts), Shard.range[Long])
* }}}
*/
final case class JdbcShardedReadOptions[T, S](
connectionOptions: JdbcConnectionOptions,
tableName: String,
shardColumn: String,
shard: Shard[S],
rowMapper: ResultSet => T,
fetchSize: Int = JdbcShardedReadOptions.DefaultFetchSize,
numShards: Int = JdbcShardedReadOptions.DefaultNumShards
)
object JdbcShardedReadOptions {
val DefaultFetchSize: Int = 100000
val UnboundedFetchSize: Int = -1
val DefaultNumShards: Int = 4
}
| spotify/scio | scio-jdbc/src/main/scala/com/spotify/scio/jdbc/sharded/JdbcShardedReadOptions.scala | Scala | apache-2.0 | 2,451 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.internal.cypher.acceptance
import org.neo4j.cypher.ExecutionEngineFunSuite
class RemoveAcceptanceTest extends ExecutionEngineFunSuite {
test("should ignore nulls") {
val n = createNode("apa" -> 42)
val result = execute("MATCH n OPTIONAL MATCH n-[r]->() REMOVE r.apa RETURN n")
result.toList should equal(List(Map("n" -> n)))
}
}
| HuangLS/neo4j | community/cypher/acceptance/src/test/scala/org/neo4j/internal/cypher/acceptance/RemoveAcceptanceTest.scala | Scala | apache-2.0 | 1,162 |
package org.elasticsearch.spark.cfg
import org.elasticsearch.spark.serialization.ReflectionUtils._
import org.junit.Test
import org.junit.Assert._
import org.hamcrest.Matchers._
import org.apache.spark.SparkConf
import org.elasticsearch.hadoop.cfg.PropertiesSettings
class SparkConfigTest {
@Test
def testProperties() {
val cfg = new SparkConf().set("type", "onegative")
val settings = new SparkSettingsManager().load(cfg)
val props = new PropertiesSettings().load(settings.save())
assertEquals("onegative", props.getProperty("type"))
}
@Test
def testSparkProperties() {
val cfg = new SparkConf().set("spark.type", "onegative")
val settings = new SparkSettingsManager().load(cfg)
val props = new PropertiesSettings().load(settings.save())
assertEquals("onegative", props.getProperty("type"))
}
@Test
def testSparkPropertiesOverride() {
val cfg = new SparkConf().set("spark.type", "fail").set("type", "win")
val settings = new SparkSettingsManager().load(cfg)
val props = new PropertiesSettings().load(settings.save())
assertEquals("win", props.getProperty("type"))
}
} | costin/elasticsearch-hadoop | spark/core/test/scala/org/elasticsearch/spark/cfg/SparkConfigTest.scala | Scala | apache-2.0 | 1,141 |
package org.ai4fm.proofprocess.core.analysis
import org.eclipse.emf.ecore.{EObject, EStructuralFeature}
import org.eclipse.emf.ecore.util.EcoreUtil.EqualityHelper
/**
* A helper for EMF equality matching. Allows using only a subset of EObject features
* for equality.
*
* @author Andrius Velykis
*/
class FeatureEqualityHelper(eqFeatures: Set[EStructuralFeature]) extends EqualityHelper {
override def haveEqualFeature(eObj1: EObject, eObj2: EObject, feature: EStructuralFeature): Boolean =
if (eqFeatures.contains(feature)) {
super.haveEqualFeature(eObj1, eObj2, feature)
} else {
// this feature is not included in equality matching, so take it as equal without checking
true
}
}
object FeatureEqualityHelper {
def apply(eqFeatures: EStructuralFeature*): FeatureEqualityHelper =
new FeatureEqualityHelper(eqFeatures.toSet)
}
| andriusvelykis/proofprocess | org.ai4fm.proofprocess.core/src/org/ai4fm/proofprocess/core/analysis/FeatureEqualityHelper.scala | Scala | epl-1.0 | 883 |
/*
* Copyright (c) 2014-2014 Erik van Oosten All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.grons.otagolog.client
import nl.grons.otagolog.client.net.SimpleSocketClient
import java.net.InetSocketAddress
import nl.grons.otagolog.shared.config.{ConfigurationDefaults, InetSocketAddressParser}
object OtagologClient {
def main(args: Array[String]) {
sendTestData()
}
def sendTestData() {
val client = new SimpleSocketClient(new InetSocketAddress(ConfigurationDefaults.DefaultServerPort))
try {
val messageCount = 2000000
val buffer = Array.tabulate[Byte](1024) { i => ('a' + i % 26).toByte }
buffer(0) = 0.toByte
buffer(1) = 0.toByte
buffer(2) = 0.toByte
(0 to messageCount).foreach { i =>
val len = i % 256
buffer(3) = len.toByte
client.postNoReply(buffer, 0, len)
}
} finally {
client.halt()
}
}
} | erikvanoosten/otagolog | src/main/scala/nl/grons/otagolog/client/OtagologClient.scala | Scala | apache-2.0 | 1,456 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.xASSEMBLEx.util
import scala.annotation.tailrec
object NucleotideSequenceHash {
/**
* Hashes a char.
*
* @param c Char to hash.
* @return Returns a long.
*
* @throws IllegalArgumentException Exception thrown if letter is not in {A,C,G,T,U}.
*/
def hashChar(c: Char): Long = c match {
case 'A' => 0L
case 'C' => 1L
case 'G' => 2L
case 'T' | 'U' => 3L
case _ => throw new IllegalArgumentException("Saw non-nucleotide letter (" + c + ").")
}
/**
* Hashes a sequence of nucleotides into a 64 bit long.
*
* @param sequence Nucleotide sequence to hash.
* @return Returns a long hash.
*
* @throws IllegalArgumentException Throws an exception if the string contains
* non-nucleotide (ACTUG) letters.
*/
def hashSequence(sequence: String): Long = {
@tailrec def hashHelper(iter: Iterator[Char], hash: Long): Long = {
if (!iter.hasNext) {
hash
} else {
val newHash = 4L * hash + hashChar(iter.next)
hashHelper(iter, newHash)
}
}
// we store a hash at 2 bits per nucleotide in a 64 bit long
val seq = if (sequence.length > 32) {
sequence.take(31)
} else {
sequence
}
// call tail recursive hash computing function
hashHelper(sequence.toIterator, 0L)
}
}
| fnothaft/xASSEMBLEx | xASSEMBLEx-core/src/main/scala/org/bdgenomics/xASSEMBLEx/util/NucleotideSequenceHash.scala | Scala | apache-2.0 | 2,156 |
/*
* Copyright 2015 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.helper
import java.io.{FileOutputStream, OutputStreamWriter, BufferedWriter, File}
import simplex3d.math.double._
import simplex3d.math.floatx.{ConstVec3f, Mat4f, ConstMat4f}
import simx.core.components.renderer.setup._
import simx.core.ontology._
import simx.core.ontology.referencesystems.CoordinateSystemConverter
import com.thoughtworks.xstream.XStream
import simx.core.ontology.types.OntologySymbol
import scala.xml.{XML, Node}
/**
* Created by martin
* on 14/08/15.
*/
object Calibration {
def apply(xmlFile: File): Calibration = {
Symbols.vrpn
Symbols.kinect
val calibrationXml = XML.loadFile(xmlFile)
val xStream = new XStream()
val data = xStream.fromXML(calibrationXml.toString()).asInstanceOf[CalibrationData]
val coordinateSystemName =
OntologySymbol.lookup(Symbol(data.coordinateSystemName)).getOrElse(
throw new Exception("Could not look up " + data.coordinateSystemName))
new Calibration(
data.screenTransformation,
data.screenSize.toTuple,
data.screenResolution.toTuple,
coordinateSystemName,
data.display
)
}
abstract sealed class StereoType
case object FrameSequential extends StereoType
case object TwoScreens extends StereoType
}
/**
*
* Created by martin on 12.08.15.
*/
class Calibration(
val screenTransformation: ConstMat4,
screenSize: (Double, Double),
screenResolution: (Int, Int) = (1920, 1080),
coordinateSystemName: GroundedSymbol = Symbols.kinect,
display: Int = 0
) {
val converter =
new CoordinateSystemConverter[ConstMat4](coordinateSystemName, screenTransformation)
def targetToScreenCoordinates(m : ConstMat4f) = ConstMat4f(converter.toRefCoords(m))
def targetToScreenCoordinates(v : ConstVec3): ConstVec3f =
ConstVec3f(converter.toRefCoords(ConstMat4f(Mat4x3.translate(v)))(3).xyz)
def createDisplaySetupDescription(fullscreen : Boolean = false, stereo: Option[Calibration.StereoType] = None): DisplaySetupDesc = {
//val widthOfScreenInMeters: Double = widthInPx / dpi * 0.0254
val transformation = Mat4f.Identity
val eyeSeparation = Some( 0.065f )
val resolution = if (fullscreen) None else Some(screenResolution)
val size = screenSize //widthOfScreenInMeters * heightInPx / widthInPx
stereo match {
case None =>
val displayDesc = new DisplayDesc(resolution, size, transformation, new CamDesc(0, Eye.RightEye, Some( 0.0f )))
new DisplaySetupDesc().addDevice(new DisplayDevice(Some((display, 0, 0)), displayDesc :: Nil, LinkType.SingleDisplay), 0)
case Some(_: Calibration.TwoScreens.type) =>
val rightEyeDisplay = new DisplayDesc(resolution, size, transformation, new CamDesc(0, Eye.RightEye, eyeSeparation))
val leftEyeDisplay = new DisplayDesc(resolution, size, transformation, new CamDesc(0, Eye.LeftEye, eyeSeparation))
new DisplaySetupDesc().
addDevice(new DisplayDevice(Some((display, 1, 0)), leftEyeDisplay :: Nil, LinkType.SingleDisplay), 0).
addDevice(new DisplayDevice(Some((display, 0, 0)), rightEyeDisplay :: Nil, LinkType.SingleDisplay), 0)
case Some(_: Calibration.FrameSequential.type) =>
val rightEyeDisplay = new DisplayDesc(resolution, size, transformation, new CamDesc(0, Eye.RightEye, eyeSeparation))
val leftEyeDisplay = new DisplayDesc(resolution, size, transformation, new CamDesc(0, Eye.LeftEye, eyeSeparation))
new DisplaySetupDesc().
addDevice(new DisplayDevice(None , rightEyeDisplay :: leftEyeDisplay :: Nil, LinkType.FrameSequential), 0)
}
}
def saveTo(xmlFile: File): Unit = {
val serializer = new XStream()
val data = CalibrationData(coordinateSystemName.toString, Size(screenSize), Size(screenResolution), display, screenTransformation)
val xml = scala.xml.Unparsed(serializer.toXML(data))
save(xml, xmlFile, prettyPrint = true, addXmlDeclaration = true)
}
private def pretty(xml: Node) = {
val prettyPrinter = new scala.xml.PrettyPrinter(80, 2)
prettyPrinter.format(xml)
}
private def save(xml: Node, file: File, prettyPrint: Boolean = false, addXmlDeclaration: Boolean = false): Unit = {
var xmlString = if(prettyPrint) pretty(xml) else xml.toString()
if(addXmlDeclaration)
xmlString = """<?xml version='1.0' encoding='UTF-8'?>""" + "\\n" + xmlString
val out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"))
try {out.write(xmlString)} finally {out.close()}
}
}
private case class CalibrationData(
coordinateSystemName: String,
screenSize: Size[Double],
screenResolution: Size[Int],
display: Int = 0,
screenTransformation: ConstMat4
)
private object Size {
def apply[T](size: (T, T)) = new Size(size._1, size._2)
}
private class Size[T](val width: T, val height: T) {
def toTuple = (width, height)
} | simulator-x/core | src/simx/core/helper/Calibration.scala | Scala | apache-2.0 | 5,807 |
import scalaj.http._
import scala.collection.JavaConversions._
import java.io.File
import scala.io.Source
case class HttpException(msg: String) extends Throwable
/*
In order to use this profiling script, you should use install the latest version of scala and sbt(basically sbt will install everything for you).
The following command may help you to get it run
Problem: tailing white space in file may cause the script to crash. Problem is on line 74. getlines() function will fail if there is a tailing whitespace.
For simplicity and readability, this issue is still in the program. Tailing white space should not appear in the query files anyway.
cd bin
sbt run
*/
object util{
//get current dic
def getCurrentDirectory = new java.io.File( "." ).getCanonicalPath
/*
This is a timer for timing
*/
def time[R](block: => R): R = {
val t0 = System.nanoTime()
val result = block // call-by-name
val t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
result
}
/**
*
* @param url url address
* @param cookies:List of cookie pairs: (String,String)
* @return
*/
def get(url:String, cookies: List[(String,String)]): String ={
val request = Http(url)
cookies.foreach((tuple_string) => request.cookie(tuple_string._1, tuple_string._2)) //insert cookies
request.header("User-Agent","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9.2.2) Gecko/20100316 Firefox/3.6.2")
val response = try{
request.asString
} catch {
case _:Throwable => throw new HttpException("error in get data and translate to String, maybe it's bad connection")
}
val reg_200 = """.*(200).*""".r
if(response.isSuccess && reg_200.findFirstIn(response.statusLine).isDefined){
try{
response.body
}catch{
case throwable: Throwable => throw new HttpException("fail to get the content")
}
}
else{
throw new HttpException("bad request" + response.statusLine)
}
}
}
//main func
object profile extends App{
//two
val main_dic_r = """(.*)/bin""".r
val queries_r = """(.*\d\d.txt)""".r
val main_dic_r(main_dic) = util.getCurrentDirectory
val test_dic = main_dic + "/test/queries"
//get the absolute path of all query files
val file_list = new java.io.File(test_dic).listFiles.map{
file => file.getName match{
case queries_r(name) => test_dic + "/" + name
case _ => null
}
}.filterNot(_==null)
//
val all_queries = file_list.map(fileName => Source.fromFile(fileName).getLines().toList).flatten
println("Total queries count:" + all_queries.length)
println("Start firing queries to localhost...")
//don't change it to remote server, remote server may blacklist you
val baseURL = "http://localhost:9000/api/search.json?q="
//a util function for make queries, return 0 if failed, and 1 if succeed
def make_query(word:String):Int = {
//for accuracy, you should remove the line below
println("Query:" + word)
try{
util.get(baseURL+word, Nil)
1
}catch{
case _:Throwable => 0
}
}
//time is just a genetic stop watch wrapper, one of the advantage of scala.
//code is parallized to saturate the I/O, move out for accuracy
val par_queries = all_queries.par
val parlevel = 15 //parallization level(number of threads), don't set it too high. Higher number will give higher pressure to the server.
import scala.collection.parallel._
par_queries.tasksupport = new ForkJoinTaskSupport(new scala.concurrent.forkjoin.ForkJoinPool(parlevel))
val good_unsumed = util.time{
par_queries.map(make_query)
}
//move out for accuracy
val good = good_unsumed.toList.sum
println("Succeeded:" + good.toString)
println("Failed:" + (all_queries.length - good).toString)
}
| YagoGG/loklak_server | bin/profile.scala | Scala | lgpl-2.1 | 3,786 |
package org.jetbrains.plugins.scala
package debugger.evaluation
import com.intellij.debugger.SourcePosition
import com.intellij.debugger.engine.evaluation.CodeFragmentFactoryContextWrapper
import com.intellij.debugger.engine.evaluation.expression._
import com.intellij.debugger.engine.{JVMName, JVMNameUtil}
import com.intellij.lang.java.JavaLanguage
import com.intellij.psi._
import com.intellij.psi.search.LocalSearchScope
import com.intellij.psi.search.searches.ReferencesSearch
import com.intellij.psi.util.CachedValueProvider.Result
import com.intellij.psi.util.{CachedValueProvider, CachedValuesManager, PsiTreeUtil}
import org.jetbrains.plugins.scala.debugger.ScalaPositionManager
import org.jetbrains.plugins.scala.debugger.evaluation.evaluator._
import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base._
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.expr.xml.ScXmlPattern
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScParameterClause}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScTemplateBody}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScEarlyDefinitions, ScModifierListOwner, ScNamedElement, ScTypedDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.{ImplicitParametersOwner, ScPackage}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticFunction
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuffer
import scala.reflect.NameTransformer
/**
* Nikolay.Tropin
* 2014-09-28
*/
private[evaluation] trait ScalaEvaluatorBuilderUtil {
this: ScalaEvaluatorBuilder =>
import org.jetbrains.plugins.scala.debugger.evaluation.ScalaEvaluatorBuilderUtil._
def fileName = contextClass.toOption.flatMap(_.getContainingFile.toOption).map(_.name).orNull
def importedQualifierEvaluator(ref: ScReferenceElement, resolveResult: ScalaResolveResult): Evaluator = {
val message = ScalaBundle.message("cannot.evaluate.imported.reference")
resolveResult.fromType match {
case Some(ScDesignatorType(element)) =>
element match {
case obj: ScObject => stableObjectEvaluator(obj)
case cl: PsiClass if cl.getLanguage.isInstanceOf[JavaLanguage] =>
new TypeEvaluator(JVMNameUtil.getJVMQualifiedName(cl))
case _ =>
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(element.name, ref.getContext, ref)
evaluatorFor(expr)
}
case Some(p: ScProjectionType) =>
def exprToEvaluate(p: ScProjectionType): String = p.projected match {
case ScDesignatorType(elem) => elem.name + "." + p.actualElement.name
case projected: ScProjectionType => exprToEvaluate(projected) + "." + projected.actualElement.name
case ScThisType(cl) if contextClass == cl => s"this.${p.actualElement.name}"
case ScThisType(cl) => s"${cl.name}.this.${p.actualElement.name}"
case _ => throw EvaluationException(message)
}
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprToEvaluate(p), ref.getContext, ref)
evaluatorFor(expr)
case _ => throw EvaluationException(message)
}
}
def thisOrImportedQualifierEvaluator(ref: ScReferenceElement): Evaluator = {
ref.bind() match {
case Some(resolveResult: ScalaResolveResult) =>
if (resolveResult.importsUsed.nonEmpty) importedQualifierEvaluator(ref, resolveResult)
else thisEvaluator(resolveResult)
case None => new ScalaThisEvaluator()
}
}
def thisEvaluator(resolveResult: ScalaResolveResult): Evaluator = {
//this reference
val elem = resolveResult.element
val containingClass = resolveResult.fromType match {
case Some(ScThisType(clazz)) => clazz
case Some(tp) => ScType.extractClass(tp, Some(elem.getProject)) match {
case Some(x) => x
case None => getContextClass(elem)
}
case _ => getContextClass(elem)
}
containingClass match {
case o: ScObject if isStable(o) =>
return stableObjectEvaluator(o)
case _ =>
}
val (outerClass, iterationCount) = findContextClass(e => e == null || e == containingClass)
if (outerClass != null)
new ScalaThisEvaluator(iterationCount)
else new ScalaThisEvaluator()
}
def thisOrSuperEvaluator(refOpt: Option[ScStableCodeReferenceElement], isSuper: Boolean): Evaluator = {
def thisEval(i: Int) = if (isSuper) new ScalaSuperEvaluator(i) else new ScalaThisEvaluator(i)
def stableEvaluator(e: Evaluator) = if (isSuper) new ScalaSuperDelegate(e) else e
def default: Evaluator = {
val (result, iters) = findContextClass(e => e == null || e.isInstanceOf[PsiClass])
if (result == null) thisEval(0)
else thisEval(iters)
}
refOpt match {
case Some(ResolvesTo(clazz: PsiClass)) =>
clazz match {
case o: ScObject if isStable(o) => stableEvaluator(stableObjectEvaluator(o))
case _ =>
val (result, iters) = findContextClass(e => e == null || e == clazz)
if (result == null) thisEval(0)
else thisEval(iters)
}
case Some(ref) =>
val refName = ref.refName
val (result, iters) = findContextClass {
case null => true
case cl: PsiClass if cl.name != null && cl.name == refName => true
case _ => false
}
result match {
case o: ScObject if isStable(o) => stableEvaluator(stableObjectEvaluator(o))
case null => default
case _ => thisEval(iters)
}
case _ => default
}
}
def findContextClass(stopCondition: PsiElement => Boolean): (PsiElement, Int) = {
var current: PsiElement = contextClass
var iterations = 0
while (!stopCondition(current)) {
iterations += anonClassCount(current)
current = getContextClass(current)
}
(current, iterations)
}
def localMethodEvaluator(fun: ScFunctionDefinition, argEvaluators: Seq[Evaluator]): Evaluator = {
def localFunName() = {
val transformed = NameTransformer.encode(fun.name)
fun match {
case ScalaPositionManager.InsideAsync(call) =>
val containingFun = PsiTreeUtil.getParentOfType(fun, classOf[ScFunctionDefinition], true)
if (containingFun != null && call.isAncestorOf(containingFun))
transformed
else
transformed + "$macro"
case _ => transformed
}
}
val name = localFunName()
val containingClass = if (fun.isSynthetic) fun.containingClass else getContextClass(fun)
val message = ScalaBundle.message("cannot.evaluate.local.method")
if (contextClass == null) {
throw EvaluationException(message)
}
val thisEvaluator: Evaluator = containingClass match {
case obj: ScObject if isStable(obj) =>
stableObjectEvaluator(obj)
case t: ScTrait =>
thisOrSuperEvaluator(None, isSuper = true)
case _ =>
val (outerClass, iters) = findContextClass(e => e == null || e == containingClass)
if (outerClass != null) new ScalaThisEvaluator(iters)
else null
}
if (thisEvaluator != null) {
val locals = DebuggerUtil.localParamsForFunDef(fun)
val evaluators = argEvaluators ++ locals.map(fromLocalArgEvaluator)
val signature = DebuggerUtil.getFunctionJVMSignature(fun)
val positions = DebuggerUtil.getSourcePositions(fun.getNavigationElement)
val idx = localFunctionIndex(fun)
new ScalaMethodEvaluator(thisEvaluator, name, signature, evaluators, traitImplementation(fun), positions, idx)
}
else throw EvaluationException(message)
}
def stableObjectEvaluator(qual: String): ScalaFieldEvaluator = {
val jvm = JVMNameUtil.getJVMRawText(qual)
new ScalaFieldEvaluator(new TypeEvaluator(jvm), "MODULE$")
}
def stableObjectEvaluator(obj: ScObject): Evaluator = {
val qualName =
if (obj.isPackageObject)
obj.qualifiedName + ".package"
else obj.getQualifiedNameForDebugger
val qual = qualName.split('.').map(NameTransformer.encode).mkString(".") + "$"
stableObjectEvaluator(qual)
}
def objectEvaluator(obj: ScObject, qualEvaluator: () => Evaluator): Evaluator = {
if (isStable(obj)) stableObjectEvaluator(obj)
else {
val objName = NameTransformer.encode(obj.name)
new ScalaMethodEvaluator(qualEvaluator(), objName, null /* todo? */, Seq.empty,
traitImplementation(obj), DebuggerUtil.getSourcePositions(obj.getNavigationElement))
}
}
def syntheticFunctionEvaluator(synth: ScSyntheticFunction,
qualOpt: Option[ScExpression],
ref: ScReferenceExpression,
arguments: Seq[ScExpression]): Evaluator = {
if (synth.isStringPlusMethod && arguments.length == 1) {
val qualText = qualOpt.fold("this")(_.getText)
val exprText = s"($qualText).concat(_root_.java.lang.String.valueOf(${arguments.head.getText}))"
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, ref.getContext, ref)
return evaluatorFor(expr)
}
val name = synth.name
val argEvaluators = arguments.map(evaluatorFor(_))
def unaryEval(operatorName: String, function: Evaluator => Evaluator): Evaluator = {
if (argEvaluators.isEmpty) {
val eval = qualOpt match {
case None => new ScalaThisEvaluator()
case Some(qual) => evaluatorFor(qual)
}
function(eval)
} else throw EvaluationException(ScalaBundle.message("wrong.number.of.arguments", operatorName))
}
def unaryEvalForBoxes(operatorName: String, boxesName: String): Evaluator = {
unaryEval(operatorName, unaryEvaluator(_, boxesName))
}
def binaryEval(operatorName: String, function: (Evaluator, Evaluator) => Evaluator): Evaluator = {
if (argEvaluators.length == 1) {
val eval = qualOpt match {
case None => new ScalaThisEvaluator()
case Some(qual) => evaluatorFor(qual)
}
function(eval, argEvaluators.head)
} else throw EvaluationException(ScalaBundle.message("wrong.number.of.arguments", operatorName))
}
def binaryEvalForBoxes(operatorName: String, boxesName: String): Evaluator = {
binaryEval(operatorName, binaryEvaluator(_, _, boxesName))
}
def equalsEval(opName: String): Evaluator = {
val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;Ljava/lang/Object;)Z")
binaryEval(name, (l, r) => new ScalaMethodEvaluator(BOXES_RUN_TIME, "equals", rawText, boxed(l, r)))
}
def isInstanceOfEval: Evaluator = {
unaryEval("isInstanceOf", eval => {
import org.jetbrains.plugins.scala.lang.psi.types.Nothing
val tp = ref.getParent match {
case gen: ScGenericCall => gen.typeArgs match {
case Some(args) => args.typeArgs match {
case Seq(arg) => arg.calcType
case _ => Nothing
}
case None => Nothing
}
case _ => Nothing
}
val jvmName: JVMName = DebuggerUtil.getJVMQualifiedName(tp)
new ScalaInstanceofEvaluator(eval, new TypeEvaluator(jvmName))
})
}
def trueEval = expressionFromTextEvaluator("true", ref)
def falseEval = expressionFromTextEvaluator("false", ref)
def conditionalOr = binaryEval("||", (first, second) => new ScalaIfEvaluator(first, trueEval, Some(second)))
def conditionalAnd = binaryEval("&&", (first, second) => new ScalaIfEvaluator(first, second, Some(falseEval)))
name match {
case "isInstanceOf" => isInstanceOfEval
case "asInstanceOf" => unaryEval(name, identity) //todo: primitive type casting?
case "##" => unaryEval(name, eval => new ScalaMethodEvaluator(BOXES_RUN_TIME, "hashFromObject",
JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)I"), Seq(boxEvaluator(eval))))
case "==" => equalsEval("==")
case "!=" => unaryEvaluator(equalsEval("!="), "takeNot")
case "unary_!" => unaryEvalForBoxes("!", "takeNot")
case "unary_~" => unaryEvalForBoxes("~", "complement")
case "unary_+" => unaryEvalForBoxes("+", "positive")
case "unary_-" => unaryEvalForBoxes("-", "negate")
case "eq" => binaryEval(name, eqEvaluator)
case "ne" => binaryEval(name, neEvaluator)
case "<" => binaryEvalForBoxes(name, "testLessThan")
case ">" => binaryEvalForBoxes(name, "testGreaterThan")
case ">=" => binaryEvalForBoxes(name, "testGreaterOrEqualThan")
case "<=" => binaryEvalForBoxes(name, "testLessOrEqualThan")
case "+" => binaryEvalForBoxes(name, "add")
case "-" => binaryEvalForBoxes(name, "subtract")
case "*" => binaryEvalForBoxes(name, "multiply")
case "/" => binaryEvalForBoxes(name, "divide")
case "%" => binaryEvalForBoxes(name, "takeModulo")
case ">>" => binaryEvalForBoxes(name, "shiftSignedRight")
case "<<" => binaryEvalForBoxes(name, "shiftSignedLeft")
case ">>>" => binaryEvalForBoxes(name, "shiftLogicalRight")
case "&" => binaryEvalForBoxes(name, "takeAnd")
case "|" => binaryEvalForBoxes(name, "takeOr")
case "^" => binaryEvalForBoxes(name, "takeXor")
case "&&" => conditionalAnd
case "||" => conditionalOr
case "toInt" => unaryEvalForBoxes(name, "toInteger")
case "toChar" => unaryEvalForBoxes(name, "toCharacter")
case "toShort" => unaryEvalForBoxes(name, "toShort")
case "toByte" => unaryEvalForBoxes(name, "toByte")
case "toDouble" => unaryEvalForBoxes(name, "toDouble")
case "toLong" => unaryEvalForBoxes(name, "toLong")
case "toFloat" => unaryEvalForBoxes(name, "toFloat")
case "synchronized" =>
throw EvaluationException("synchronized statement is not supported")
case _ =>
throw EvaluationException("Cannot evaluate synthetic method: " + name)
}
}
def arrayMethodEvaluator(name: String, qual: Option[ScExpression], argEvaluators: Seq[Evaluator]): Evaluator = {
val qualEval = qual match {
case Some(q) => evaluatorFor(q)
case None => throw EvaluationException(ScalaBundle.message("array.instance.is.not.found", name))
}
val message = ScalaBundle.message("wrong.number.of.arguments", s"Array.$name")
name match {
case "apply" =>
if (argEvaluators.length == 1) new ScalaArrayAccessEvaluator(qualEval, argEvaluators.head)
else throw EvaluationException(message)
case "length" =>
if (argEvaluators.isEmpty) new ScalaFieldEvaluator(qualEval, "length")
else throw EvaluationException(message)
case "clone" =>
if (argEvaluators.isEmpty) new ScalaMethodEvaluator(qualEval, "clone", null/*todo*/, Nil)
else throw EvaluationException(message)
case "update" =>
if (argEvaluators.length == 2) {
val leftEval = new ScalaArrayAccessEvaluator(qualEval, argEvaluators.head)
new AssignmentEvaluator(leftEval, unboxEvaluator(argEvaluators(1)))
} else throw EvaluationException(message)
case "toString" =>
if (argEvaluators.isEmpty) new ScalaMethodEvaluator(qualEval, "toString", null/*todo*/, Nil)
else throw EvaluationException(message)
case _ =>
throw EvaluationException(ScalaBundle.message("array.method.not.supported"))
}
}
def isArrayFunction(fun: ScFunction): Boolean = {
fun.getContext match {
case tb: ScTemplateBody =>
fun.containingClass match {
case clazz: ScClass if clazz.qualifiedName == "scala.Array" => true
case _ => false
}
case _ => false
}
}
def isClassOfFunction(fun: ScFunction): Boolean = {
if (fun.name != "classOf") return false
fun.getContext match {
case tb: ScTemplateBody =>
fun.containingClass match {
case clazz: PsiClass if clazz.qualifiedName == "scala.Predef" => true
case _ => false
}
case _ => false
}
}
def classOfFunctionEvaluator(ref: ScReferenceExpression) = {
val clazzJVMName = ref.getContext match {
case gen: ScGenericCall =>
gen.arguments.head.getType(TypingContext.empty).map(tp => {
ScType.extractClass(tp, Some(ref.getProject)) match {
case Some(clazz) =>
DebuggerUtil.getClassJVMName(clazz)
case None => null
}
}).getOrElse(null)
case _ => null
}
import org.jetbrains.plugins.scala.lang.psi.types.Null
if (clazzJVMName != null) new ClassObjectEvaluator(new TypeEvaluator(clazzJVMName))
else new ScalaLiteralEvaluator(null, Null)
}
def valueClassInstanceEvaluator(value: Evaluator, innerType: ScType, classType: ScType): Evaluator = {
val valueClassType = new TypeEvaluator(DebuggerUtil.getJVMQualifiedName(classType))
val innerJvmName = DebuggerUtil.getJVMStringForType(innerType, isParam = true)
val signature = JVMNameUtil.getJVMRawText(s"($innerJvmName)V")
new ScalaDuplexEvaluator(new ScalaNewClassInstanceEvaluator(valueClassType, signature, Array(value)), value)
}
def repeatedArgEvaluator(exprsForP: Seq[ScExpression], expectedType: ScType, context: PsiElement): Evaluator = {
def seqEvaluator: Evaluator = {
val argTypes = exprsForP.map(_.getType().getOrAny)
val argTypeText =
if (argTypes.isEmpty) expectedType.canonicalText
else Bounds.lub(argTypes).canonicalText
val argsText = if (exprsForP.nonEmpty) exprsForP.sortBy(_.getTextRange.getStartOffset).map(_.getText).mkString(".+=(", ").+=(", ").result()") else ""
val exprText = s"_root_.scala.collection.Seq.newBuilder[$argTypeText]$argsText"
val newExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, context, context)
evaluatorFor(newExpr)
}
if (exprsForP.length == 1) {
exprsForP.head match {
case t: ScTypedStmt if t.isSequenceArg => evaluatorFor(t.expr)
case _ => seqEvaluator
}
} else seqEvaluator
}
def implicitArgEvaluator(fun: ScMethodLike, param: ScParameter, owner: ImplicitParametersOwner): Evaluator = {
assert(param.owner == fun)
val implicitParameters = fun.effectiveParameterClauses.lastOption match {
case Some(clause) if clause.isImplicit => clause.effectiveParameters
case _ => Seq.empty
}
val i = implicitParameters.indexOf(param)
val cannotFindMessage = ScalaBundle.message("cannot.find.implicit.parameters")
owner.findImplicitParameters match {
case Some(resolveResults) if resolveResults.length == implicitParameters.length =>
if (resolveResults(i) == null) throw EvaluationException(cannotFindMessage)
val exprText = resolveResults(i) match {
case ScalaResolveResult(clazz: ScTrait, substitutor) if clazz.qualifiedName == "scala.reflect.ClassManifest" =>
val argType = substitutor.subst(clazz.getType(TypingContext.empty).get)
argType match {
case ScParameterizedType(tp, Seq(paramType)) => classManifestText(paramType)
case _ =>
throw EvaluationException(cannotFindMessage)
}
case ScalaResolveResult(clazz: ScTrait, substitutor) if clazz.qualifiedName == "scala.reflect.ClassTag" =>
val argType = substitutor.subst(clazz.getType(TypingContext.empty).get)
argType match {
case ScParameterizedType(tp, Seq(arg)) => classTagText(arg)
case _ =>
throw EvaluationException(cannotFindMessage)
}
case ScalaResolveResult(elem, _) =>
val context = ScalaPsiUtil.nameContext(elem)
val clazz = context.getContext match {
case _: ScTemplateBody | _: ScEarlyDefinitions =>
ScalaPsiUtil.getContextOfType(context, true, classOf[PsiClass])
case _ if context.isInstanceOf[ScClassParameter] =>
ScalaPsiUtil.getContextOfType(context, true, classOf[PsiClass])
case _ => null
}
clazz match {
case o: ScObject if isStable(o) => o.qualifiedName + "." + elem.name
case o: ScObject => //todo: It can cover many cases!
throw EvaluationException(ScalaBundle.message("implicit.parameters.from.dependent.objects"))
case _ => elem.name //from scope
}
}
val newExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, owner.getContext, owner)
evaluatorFor(newExpr)
case None =>
throw EvaluationException(cannotFindMessage)
}
}
def parameterEvaluator(fun: PsiElement, resolve: PsiElement): Evaluator = {
val name = NameTransformer.encode(resolve.asInstanceOf[PsiNamedElement].name)
val evaluator = new ScalaLocalVariableEvaluator(name, fileName)
fun match {
case funDef: ScFunctionDefinition =>
def paramIndex(fun: ScFunctionDefinition, context: PsiElement, elem: PsiElement): Int = {
val locIndex = DebuggerUtil.localParamsForFunDef(fun).indexOf(elem)
val funParams = fun.effectiveParameterClauses.flatMap(_.effectiveParameters)
if (locIndex < 0) funParams.indexOf(elem)
else locIndex + funParams.size
}
val pIndex = paramIndex(funDef, getContextClass(fun), resolve)
evaluator.setParameterIndex(pIndex)
evaluator.setMethodName(funDef.name)
case funExpr: ScFunctionExpr =>
evaluator.setParameterIndex(funExpr.parameters.indexOf(resolve))
evaluator.setMethodName("apply")
case _ => throw EvaluationException(ScalaBundle.message("cannot.evaluate.parameter", name))
}
evaluator
}
def javaFieldEvaluator(field: PsiField, ref: ScReferenceExpression): Evaluator = {
ref.qualifier match {
case Some(qual) =>
if (field.hasModifierPropertyScala("static")) {
val eval = new TypeEvaluator(JVMNameUtil.getContextClassJVMQualifiedName(SourcePosition.createFromElement(field)))
val name = field.name
new ScalaFieldEvaluator(eval, name)
} else {
val qualEvaluator = evaluatorFor(qual)
new ScalaFieldEvaluator(qualEvaluator, field.name)
}
case None =>
val evaluator = thisOrImportedQualifierEvaluator(ref)
new ScalaFieldEvaluator(evaluator, field.name)
}
}
def javaMethodEvaluator(method: PsiMethod, ref: ScReferenceExpression, arguments: Seq[ScExpression]): Evaluator = {
def boxArguments(arguments: Seq[Evaluator], method: PsiElement): Seq[Evaluator] = {
val params = method match {
case fun: ScMethodLike => fun.effectiveParameterClauses.flatMap(_.parameters)
case m: PsiMethod => m.getParameterList.getParameters.toSeq
case _ => return arguments
}
arguments.zipWithIndex.map {
case (arg, i) =>
if (params.length <= i || isOfPrimitiveType(params(i))) arg
else boxEvaluator(arg)
}
}
val argEvals = boxArguments(arguments.map(evaluatorFor(_)), method)
val methodPosition = DebuggerUtil.getSourcePositions(method.getNavigationElement)
val signature = JVMNameUtil.getJVMSignature(method)
ref.qualifier match {
case Some(qual @ ExpressionType(tp)) if isPrimitiveScType(tp) =>
val boxEval = boxEvaluator(evaluatorFor(qual))
ScalaMethodEvaluator(boxEval, method.name, signature, argEvals, None, methodPosition)
case Some(q) if method.hasModifierPropertyScala("static") =>
val eval = new TypeEvaluator(JVMNameUtil.getContextClassJVMQualifiedName(SourcePosition.createFromElement(method)))
val name = method.name
ScalaMethodEvaluator(eval, name, signature, argEvals, None, methodPosition)
case Some(q) =>
val name = method.name
new ScalaMethodEvaluator(evaluatorFor(q), name, signature, argEvals, None, methodPosition)
case _ =>
val evaluator = thisOrImportedQualifierEvaluator(ref)
val name = method.name
new ScalaMethodEvaluator(evaluator, name, signature, argEvals, None, methodPosition)
}
}
def unresolvedMethodEvaluator(ref: ScReferenceExpression, args: Seq[ScExpression]): Evaluator = {
val argEvals = args.map(evaluatorFor(_))
val name = NameTransformer.encode(ref.refName)
ref.qualifier match {
case Some(q) => new ScalaMethodEvaluator(evaluatorFor(q), name, null, argEvals)
case _ => new ScalaMethodEvaluator(thisOrImportedQualifierEvaluator(ref), name, null, argEvals)
}
}
def argumentEvaluators(fun: ScMethodLike, matchedParameters: Map[Parameter, Seq[ScExpression]],
call: ScExpression, ref: ScReferenceExpression, arguments: Seq[ScExpression]): Seq[Evaluator] = {
val clauses = fun.effectiveParameterClauses
val parameters = clauses.flatMap(_.effectiveParameters).map(new Parameter(_))
def addForNextClause(previousClausesEvaluators: Seq[Evaluator], clause: ScParameterClause): Seq[Evaluator] = {
def isDefaultExpr(expr: ScExpression) = expr match {
case ChildOf(p: ScParameter) => p.isDefaultParam
case _ => false
}
previousClausesEvaluators ++ clause.effectiveParameters.map {
case param =>
val p = new Parameter(param)
val exprsForP = matchedParameters.find(_._1.name == p.name).map(_._2).getOrElse(Seq.empty).filter(_ != null)
if (p.isByName) throw new NeedCompilationException(ScalaBundle.message("method.with.by-name.parameters"))
val evaluator =
if (p.isRepeated) repeatedArgEvaluator(exprsForP, p.expectedType, call)
else if (exprsForP.size > 1) throw EvaluationException(ScalaBundle.message("wrong.number.of.expressions"))
else if (exprsForP.length == 1 && !isDefaultExpr(exprsForP.head)) evaluatorFor(exprsForP.head)
else if (param.isImplicitParameter) implicitArgEvaluator(fun, param, call)
else if (p.isDefault) {
val paramIndex = parameters.indexOf(p) + 1
val methodName = defaultParameterMethodName(fun, paramIndex)
val localParams = p.paramInCode.toSeq.flatMap(DebuggerUtil.localParamsForDefaultParam(_))
val localParamRefs =
localParams.map(td => ScalaPsiElementFactory.createExpressionWithContextFromText(td.name, call.getContext, call))
val localEvals = localParamRefs.map(evaluatorFor(_))
functionEvaluator(ref.qualifier, ref, methodName, previousClausesEvaluators ++ localEvals)
}
else throw EvaluationException(ScalaBundle.message("cannot.evaluate.parameter", p.name))
if (!isOfPrimitiveType(param)) boxEvaluator(evaluator)
else evaluator
}
}
val argEvaluators: Seq[Evaluator] = clauses.foldLeft(Seq.empty[Evaluator])(addForNextClause)
if (argEvaluators.contains(null)) arguments.map(arg => evaluatorFor(arg))
else argEvaluators
}
def functionEvaluator(qualOption: Option[ScExpression], ref: ScReferenceExpression,
funName: String, argEvaluators: Seq[Evaluator]): Evaluator = {
def qualEvaluator(r: ScalaResolveResult) = {
def defaultQualEvaluator = qualifierEvaluator(qualOption, ref)
r.getActualElement match {
case o: ScObject if funName == "apply" => objectEvaluator(o, defaultQualEvaluator _)
case _ => defaultQualEvaluator
}
}
val name = NameTransformer.encode(funName)
ref.bind() match {
case Some(r) if r.tuplingUsed => throw EvaluationException(ScalaBundle.message("tupling.not.supported"))
case None => throw EvaluationException(ScalaBundle.message("cannot.evaluate.method", funName))
case Some(r @ privateTraitMethod(tr, fun)) =>
val traitTypeEval = new TypeEvaluator(DebuggerUtil.getClassJVMName(tr, withPostfix = true))
val qualEval = qualEvaluator(r)
new ScalaMethodEvaluator(traitTypeEval, name, null, qualEval +: argEvaluators)
case Some(r) =>
val resolve = r.element
val qualEval = qualEvaluator(r)
val signature = resolve match {
case fun: ScFunction => DebuggerUtil.getFunctionJVMSignature(fun)
case _ => null
}
new ScalaMethodEvaluator(qualEval, name, signature, argEvaluators,
traitImplementation(resolve), DebuggerUtil.getSourcePositions(resolve.getNavigationElement))
}
}
def methodCallEvaluator(call: ScExpression, arguments: Seq[ScExpression], matchedParameters: Map[Parameter, Seq[ScExpression]]): Evaluator = {
val ref = call match {
case hasDeepestInvokedReference(r) => r
case _ => throw EvaluationException(ScalaBundle.message("cannot.evaluate.method", call.getText))
}
val qualOption = ref.qualifier
val resolve = ref.resolve()
resolve match {
case fun: ScFunctionDefinition if isLocalFunction(fun) =>
val args = argumentEvaluators(fun, matchedParameters, call, ref, arguments)
localMethodEvaluator(fun, args)
case fun: ScFunction if isClassOfFunction(fun) =>
classOfFunctionEvaluator(ref)
case synth: ScSyntheticFunction =>
syntheticFunctionEvaluator(synth, qualOption, ref, arguments) //todo: use matched parameters
case fun: ScFunction if isArrayFunction(fun) =>
val args = argumentEvaluators(fun, matchedParameters, call, ref, arguments)
arrayMethodEvaluator(fun.name, qualOption, args)
case fun: ScFunction =>
ref match {
case isInsideValueClass(c) if qualOption.isEmpty =>
val clName = c.name
val paramName = c.allClauses.flatMap(_.parameters).map(_.name).headOption.getOrElse("$this")
val text = s"new $clName($paramName).${call.getText}"
val expr = ScalaPsiElementFactory.createExpressionFromText(text, call.getContext)
evaluatorFor(expr)
case _ =>
val args: Seq[Evaluator] = argumentEvaluators(fun, matchedParameters, call, ref, arguments)
functionEvaluator(qualOption, ref, fun.name, args)
}
case method: PsiMethod =>
javaMethodEvaluator(method, ref, arguments)
case _ =>
unresolvedMethodEvaluator(ref, arguments)
}
}
def evaluatorForReferenceWithoutParameters(qualifier: Option[ScExpression],
resolve: PsiElement,
ref: ScReferenceExpression): Evaluator = {
def withOuterFieldEvaluator(containingClass: PsiElement, name: String, message: String) = {
val (innerClass, iterationCount) = findContextClass { e =>
e == null || {val nextClass = getContextClass(e); nextClass == null || nextClass == containingClass}
}
if (innerClass == null) throw EvaluationException(message)
val thisEval = new ScalaThisEvaluator(iterationCount)
new ScalaFieldEvaluator(thisEval, name)
}
def calcLocal(named: PsiNamedElement): Evaluator = {
val name = NameTransformer.encode(named.name)
val containingClass = getContextClass(named)
val localVariableEvaluator: Evaluator = ScalaPsiUtil.nameContext(named) match {
case param: ScParameter =>
param.owner match {
case fun@(_: ScFunction | _: ScFunctionExpr) => parameterEvaluator(fun, param)
case _ => throw EvaluationException(ScalaBundle.message("cannot.evaluate.parameter", param.name))
}
case caseCl: ScCaseClause => patternEvaluator(caseCl, named)
case _: ScGenerator | _: ScEnumerator if position != null && isNotUsedEnumerator(named, position.getElementAt) =>
throw EvaluationException(ScalaBundle.message("not.used.from.for.statement", name))
case LazyVal(_) => localLazyValEvaluator(named)
case ScalaPositionManager.InsideAsync(_) =>
val simpleLocal = new ScalaLocalVariableEvaluator(name, fileName)
val fieldMacro = new ScalaFieldEvaluator(new ScalaThisEvaluator(), name + "$macro")
new ScalaDuplexEvaluator(simpleLocal, fieldMacro)
case _ => new ScalaLocalVariableEvaluator(name, fileName)
}
containingClass match {
case `contextClass` | _: ScGenerator | _: ScEnumerator => localVariableEvaluator
case _ if contextClass == null => localVariableEvaluator
case _ =>
val fieldEval = withOuterFieldEvaluator(containingClass, name, ScalaBundle.message("cannot.evaluate.local.variable", name))
new ScalaDuplexEvaluator(fieldEval, localVariableEvaluator)
}
}
def calcLocalObject(obj: ScObject) = {
def fromVolatileObjectReference(eval: Evaluator) = new ScalaFieldEvaluator(eval, "elem")
val containingClass = getContextClass(obj)
val name = NameTransformer.encode(obj.name) + "$module"
if (containingClass == contextClass) {
fromVolatileObjectReference(new ScalaLocalVariableEvaluator(name, fileName))
} else {
val fieldEval = withOuterFieldEvaluator(containingClass, name, ScalaBundle.message("cannot.evaluate.local.object", name))
fromVolatileObjectReference(fieldEval)
}
}
val labeledOrSynthetic = labeledOrSyntheticEvaluator(ref, resolve)
if (labeledOrSynthetic.isDefined) return labeledOrSynthetic.get
val isLocalValue = DebuggerUtil.isLocalV(resolve)
resolve match {
case Both(isInsideLocalFunction(fun), named: PsiNamedElement) if isLocalValue =>
new ScalaDuplexEvaluator(calcLocal(named), parameterEvaluator(fun, resolve))
case p: ScParameter if p.isCallByNameParameter && isLocalValue =>
val localEval = calcLocal(p)
new ScalaMethodEvaluator(localEval, "apply", null, Nil)
case obj: ScObject if isLocalValue => calcLocalObject(obj)
case named: PsiNamedElement if isLocalValue =>
calcLocal(named)
case obj: ScObject =>
objectEvaluator(obj, () => qualifierEvaluator(qualifier, ref))
case _: PsiMethod | _: ScSyntheticFunction =>
methodCallEvaluator(ref, Nil, Map.empty)
case cp: ScClassParameter if cp.isCallByNameParameter =>
val qualEval = qualifierEvaluator(qualifier, ref)
val name = NameTransformer.encode(cp.name)
val fieldEval = new ScalaFieldEvaluator(qualEval, name, true)
new ScalaMethodEvaluator(fieldEval, "apply", null, Nil)
case privateThisField(named) =>
val named = resolve.asInstanceOf[ScNamedElement]
val qualEval = qualifierEvaluator(qualifier, ref)
val name = NameTransformer.encode(named.name)
new ScalaFieldEvaluator(qualEval, name, true)
case cp: ScClassParameter if qualifier.isEmpty && ValueClassType.isValueClass(cp.containingClass) =>
//methods of value classes have hidden argument with underlying value
new ScalaLocalVariableEvaluator("$this", fileName)
case _: ScClassParameter | _: ScBindingPattern =>
//this is scala "field"
val named = resolve.asInstanceOf[ScNamedElement]
val name = NameTransformer.encode(named.name)
val qualEval = qualifierEvaluator(qualifier, ref)
val withSimpleNameEval = new ScalaMethodEvaluator(qualEval, name, null /* todo */, Seq.empty,
traitImplementation(resolve), DebuggerUtil.getSourcePositions(resolve.getNavigationElement))
getContextClass(named) match {
//in some cases compiler uses full qualified names for fields and methods
case clazz: ScTemplateDefinition if ScalaPsiUtil.hasStablePath(clazz)
&& clazz.members.contains(ScalaPsiUtil.nameContext(named)) =>
val qualName = clazz.qualifiedName
val newName = qualName.split('.').map(NameTransformer.encode).mkString("$") + "$$" + name
val reserveEval = new ScalaMethodEvaluator(qualEval, newName, null/* todo */, Seq.empty,
traitImplementation(resolve), DebuggerUtil.getSourcePositions(resolve.getNavigationElement))
new ScalaDuplexEvaluator(withSimpleNameEval, reserveEval)
case _ => withSimpleNameEval
}
case field: PsiField => javaFieldEvaluator(field, ref)
case pack: ScPackage =>
//let's try to find package object:
val qual = (pack.getQualifiedName + ".package$").split('.').map(NameTransformer.encode).mkString(".")
stableObjectEvaluator(qual)
case _ =>
//unresolved symbol => try to resolve it dynamically
val name = NameTransformer.encode(ref.refName)
val fieldOrVarEval = qualifier match {
case Some(qual) => new ScalaFieldEvaluator(evaluatorFor(qual), name)
case None => new ScalaLocalVariableEvaluator(name, fileName)
}
new ScalaDuplexEvaluator(fieldOrVarEval, unresolvedMethodEvaluator(ref, Seq.empty))
}
}
def labeledOrSyntheticEvaluator(ref: ScReferenceExpression, resolve: PsiElement): Option[Evaluator] = {
if (resolve == null) return None
val labeledValue = resolve.getUserData(CodeFragmentFactoryContextWrapper.LABEL_VARIABLE_VALUE_KEY)
if (labeledValue != null)
return Some(new IdentityEvaluator(labeledValue))
val isSynthetic = codeFragment.isAncestorOf(resolve)
if (isSynthetic && ref.qualifier.isEmpty)
Some(syntheticVariableEvaluator(ref.refName))
else None
}
def qualifierEvaluator(qualifier: Option[ScExpression], ref: ScReferenceExpression): Evaluator = qualifier match {
case Some(q) => evaluatorFor(q)
case _ => thisOrImportedQualifierEvaluator(ref)
}
def patternEvaluator(caseCl: ScCaseClause, namedElement: PsiNamedElement): Evaluator = {
val name = namedElement.name
if (caseCl.getParent != null) {
val pattern = caseCl.pattern
if (pattern.isEmpty) throw EvaluationException(ScalaBundle.message("cannot.find.pattern"))
caseCl.getParent.getParent match {
case matchStmt: ScMatchStmt if namedElement.isInstanceOf[ScPattern] =>
val expr = matchStmt.expr
if (expr.isEmpty) throw EvaluationException(ScalaBundle.message("cannot.find.expression.of.match"))
val exprEval = evaluatorFor(expr.get)
val fromPatternEvaluator = evaluateSubpatternFromPattern(exprEval, pattern.get, namedElement.asInstanceOf[ScPattern])
new ScalaDuplexEvaluator(new ScalaLocalVariableEvaluator(name, fileName), fromPatternEvaluator)
case block: ScBlockExpr => //it is anonymous function
val argEvaluator = new ScalaLocalVariableEvaluator("", fileName)
argEvaluator.setMethodName("apply")
argEvaluator.setParameterIndex(0)
val fromPatternEvaluator = evaluateSubpatternFromPattern(argEvaluator, pattern.get, namedElement.asInstanceOf[ScPattern])
new ScalaDuplexEvaluator(new ScalaLocalVariableEvaluator(name, fileName), fromPatternEvaluator)
case _ => new ScalaLocalVariableEvaluator(name, fileName)
}
} else throw EvaluationException(ScalaBundle.message("invalid.case.clause"))
}
def assignmentEvaluator(stmt: ScAssignStmt): Evaluator = {
val message = ScalaBundle.message("assignent.without.expression")
if (stmt.isNamedParameter) {
stmt.getRExpression match {
case Some(expr) => evaluatorFor(expr)
case _ => throw EvaluationException(message)
}
} else {
stmt.getLExpression match {
case call: ScMethodCall =>
val invokedText = call.getInvokedExpr.getText
val rExprText = stmt.getRExpression.fold("null")(_.getText)
val args = (call.args.exprs.map(_.getText) :+ rExprText).mkString("(", ", ", ")")
val exprText = s"($invokedText).update$args"
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, stmt.getContext, stmt)
evaluatorFor(expr)
case _ =>
val leftEvaluator = evaluatorFor(stmt.getLExpression)
val rightEvaluator = stmt.getRExpression match {
case Some(expr) => evaluatorFor(expr)
case _ => throw EvaluationException(message)
}
def createAssignEvaluator(leftEvaluator: Evaluator): Option[Evaluator] = {
leftEvaluator match {
case m: ScalaMethodEvaluator =>
Some(m.copy(_methodName = m.methodName + "_$eq", argumentEvaluators = Seq(rightEvaluator))) //todo: signature?
case ScalaDuplexEvaluator(first, second) =>
createAssignEvaluator(first) orElse createAssignEvaluator(second)
case _ => None
}
}
createAssignEvaluator(leftEvaluator).getOrElse(new AssignmentEvaluator(leftEvaluator, rightEvaluator))
}
}
}
def evaluateSubpatternFromPattern(exprEval: Evaluator, pattern: ScPattern, subPattern: ScPattern): Evaluator = {
def evaluateConstructorOrInfix(exprEval: Evaluator, ref: ScStableCodeReferenceElement, pattern: ScPattern, nextPatternIndex: Int): Evaluator = {
ref.resolve() match {
case fun: ScFunctionDefinition =>
val elem = ref.bind().get.getActualElement //object or case class
val qual = ref.qualifier.map(q => ScalaPsiElementFactory.createExpressionWithContextFromText(q.getText, q.getContext, q))
val refExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(ref.getText, ref.getContext, ref)
val refEvaluator = evaluatorForReferenceWithoutParameters(qual, elem, refExpr.asInstanceOf[ScReferenceExpression])
val funName = fun.name
val newEval =
if (funName == "unapply") {
val extractEval = new ScalaMethodEvaluator(refEvaluator, funName, DebuggerUtil.getFunctionJVMSignature(fun), Seq(exprEval))
if (pattern.subpatterns.length == 1)
new ScalaMethodEvaluator(extractEval, "get", null, Seq.empty)
else if (pattern.subpatterns.length > 1) {
val getEval = new ScalaMethodEvaluator(extractEval, "get", null, Seq.empty)
new ScalaFieldEvaluator(getEval, s"_${nextPatternIndex + 1}")
}
else throw EvaluationException(ScalaBundle.message("unapply.without.arguments"))
} else if (funName == "unapplySeq") {
val extractEval = new ScalaMethodEvaluator(refEvaluator, funName, DebuggerUtil.getFunctionJVMSignature(fun), Seq(exprEval))
val getEval = new ScalaMethodEvaluator(extractEval, "get", null, Seq.empty)
val indexExpr = ScalaPsiElementFactory.createExpressionFromText("" + nextPatternIndex, pattern.getManager)
val indexEval = evaluatorFor(indexExpr)
new ScalaMethodEvaluator(getEval, "apply", null, Seq(indexEval))
} else throw EvaluationException(ScalaBundle.message("pattern.doesnot.resolves.to.unapply", ref.refName))
val nextPattern = pattern.subpatterns(nextPatternIndex)
evaluateSubpatternFromPattern(newEval, nextPattern, subPattern)
case _ => throw EvaluationException(ScalaBundle.message("pattern.doesnot.resolves.to.unapply", ref.refName))
}
}
if (pattern == null || subPattern == null)
throw new IllegalArgumentException("Patterns should not be null")
val nextPatternIndex: Int = pattern.subpatterns.indexWhere(next => next == subPattern || subPattern.parents.contains(next))
if (pattern == subPattern) exprEval
else if (nextPatternIndex < 0) throw new IllegalArgumentException("Pattern is not ancestor of subpattern")
else {
pattern match {
case naming: ScNamingPattern => evaluateSubpatternFromPattern(exprEval, naming.named, subPattern)
case typed: ScTypedPattern => evaluateSubpatternFromPattern(exprEval, pattern.subpatterns.head, subPattern)
case par: ScParenthesisedPattern =>
val withoutPars = par.subpattern.getOrElse(throw new IllegalStateException("Empty parentheses pattern"))
evaluateSubpatternFromPattern(exprEval, withoutPars, subPattern)
case tuple: ScTuplePattern =>
val nextPattern = tuple.subpatterns(nextPatternIndex)
val newEval = new ScalaFieldEvaluator(exprEval, s"_${nextPatternIndex + 1}")
evaluateSubpatternFromPattern(newEval, nextPattern, subPattern)
case constr: ScConstructorPattern =>
val ref: ScStableCodeReferenceElement = constr.ref
evaluateConstructorOrInfix(exprEval, ref, constr, nextPatternIndex)
case infix: ScInfixPattern =>
val ref: ScStableCodeReferenceElement = infix.reference
evaluateConstructorOrInfix(exprEval, ref, infix, nextPatternIndex)
//todo: handle infix with tuple right pattern
case _: ScCompositePattern => throw EvaluationException(ScalaBundle.message("pattern.alternatives.cannot.bind.vars"))
case _: ScXmlPattern => throw EvaluationException(ScalaBundle.message("xml.patterns.not.supported")) //todo: xml patterns
case _ => throw EvaluationException(ScalaBundle.message("kind.of.patterns.not.supported", pattern.getText)) //todo: xml patterns
}
}
}
def newTemplateDefinitionEvaluator(templ: ScNewTemplateDefinition): Evaluator = {
templ.extendsBlock.templateParents match {
case Some(parents: ScClassParents) =>
if (parents.typeElements.length != 1) {
throw new NeedCompilationException(ScalaBundle.message("anon.classes.not.supported"))
}
parents.constructor match {
case Some(constr) =>
val tp = constr.typeElement.calcType
ScType.extractClass(tp, Some(templ.getProject)) match {
case Some(clazz) if clazz.qualifiedName == "scala.Array" =>
val typeArgs = constr.typeArgList.fold("")(_.getText)
val args = constr.args.fold("(0)")(_.getText)
val exprText = s"_root_.scala.Array.ofDim$typeArgs$args"
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, templ.getContext, templ)
evaluatorFor(expr)
case Some(clazz) =>
val jvmName = DebuggerUtil.getClassJVMName(clazz)
val typeEvaluator = new TypeEvaluator(jvmName)
val argumentEvaluators = constructorArgumentsEvaluators(templ, constr, clazz)
constr.reference.map(_.resolve()) match {
case Some(named: PsiNamedElement) =>
val signature = DebuggerUtil.constructorSignature(named)
new ScalaMethodEvaluator(typeEvaluator, "<init>", signature, argumentEvaluators)
case _ =>
new ScalaMethodEvaluator(typeEvaluator, "<init>", null, argumentEvaluators)
}
case _ => throw EvaluationException(ScalaBundle.message("new.expression.without.class.reference"))
}
case None => throw EvaluationException(ScalaBundle.message("new.expression.without.constructor.call"))
}
case _ => throw EvaluationException(ScalaBundle.message("new.expression.without.template.parents"))
}
}
def constructorArgumentsEvaluators(newTd: ScNewTemplateDefinition,
constr: ScConstructor,
clazz: PsiClass): Seq[Evaluator] = {
val constrDef = constr.reference match {
case Some(ResolvesTo(elem)) => elem
case _ => throw EvaluationException(ScalaBundle.message("could.not.resolve.constructor"))
}
val explicitArgs = constr.arguments.flatMap(_.exprs)
val explEvaluators =
for {
arg <- explicitArgs
} yield {
val eval = evaluatorFor(arg)
val param = ScalaPsiUtil.parameterOf(arg).flatMap(_.psiParam)
if (param.exists(!isOfPrimitiveType(_))) boxEvaluator(eval)
else eval
}
constrDef match {
case scMethod: ScMethodLike =>
val scClass = scMethod.containingClass.asInstanceOf[ScClass]
val containingClass = getContextClass(scClass)
val implicitParams = scMethod.parameterList.params.filter(_.isImplicitParameter)
val implicitsEvals =
for {
typeElem <- constr.simpleTypeElement.toSeq
p <- implicitParams
} yield {
val eval = implicitArgEvaluator(scMethod, p, typeElem)
if (isOfPrimitiveType(p)) eval
else boxEvaluator(eval)
}
val (outerClass, iters) = findContextClass(e => e == null || e == containingClass)
val outerThis = outerClass match {
case obj: ScObject if isStable(obj) => None
case null => None
case _ => Some(new ScalaThisEvaluator(iters))
}
val locals = DebuggerUtil.localParamsForConstructor(scClass)
outerThis ++: explEvaluators ++: implicitsEvals ++: locals.map(fromLocalArgEvaluator)
case _ => explEvaluators
}
}
def fromLocalArgEvaluator(local: ScTypedDefinition): Evaluator = {
val name = local.asInstanceOf[PsiNamedElement].name
val elemAt = position.getElementAt
val ref = ScalaPsiElementFactory.createExpressionWithContextFromText(name, elemAt, elemAt)
val refEval = evaluatorFor(ref)
if (local.isInstanceOf[ScObject]) {
val qual = "scala.runtime.VolatileObjectRef"
val typeEvaluator = new TypeEvaluator(JVMNameUtil.getJVMRawText(qual))
val signature = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)V")
new ScalaNewClassInstanceEvaluator(typeEvaluator, signature, Array(refEval))
}
else FromLocalArgEvaluator(refEval)
}
def expressionFromTextEvaluator(string: String, context: PsiElement): Evaluator = {
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(string, context.getContext, context)
evaluatorFor(expr)
}
def localLazyValEvaluator(named: PsiNamedElement): Evaluator = {
val name = named.name
val localRefName = s"$name$$lzy"
val localRefEval = new ScalaLocalVariableEvaluator(localRefName, fileName)
val lzyIndex = lazyValIndex(named)
val bitmapName = "bitmap$" + (lzyIndex / 8)
val bitmapEval = new ScalaLocalVariableEvaluator(bitmapName, fileName)
val localFunIndex = localFunctionIndex(named)
val methodName = s"$name$$$localFunIndex"
new ScalaMethodEvaluator(new ScalaThisEvaluator(), methodName, null, Seq(localRefEval, bitmapEval))
}
def ifStmtEvaluator(stmt: ScIfStmt): Evaluator = {
val condEvaluator = stmt.condition match {
case Some(cond) => evaluatorFor(cond)
case None => throw EvaluationException(ScalaBundle.message("if.statement.without.condition"))
}
val ifBranch = stmt.thenBranch match {
case Some(th) => evaluatorFor(th)
case None => throw EvaluationException(ScalaBundle.message("if.statement.without.if.branch"))
}
val elseBranch = stmt.elseBranch.map(evaluatorFor(_))
new ScalaIfEvaluator(condEvaluator, ifBranch, elseBranch)
}
def literalEvaluator(l: ScLiteral): Evaluator = {
l match {
case interpolated: ScInterpolatedStringLiteral =>
val evaluatorOpt = interpolated.getStringContextExpression.map(evaluatorFor(_))
evaluatorOpt.getOrElse(ScalaLiteralEvaluator(l))
case _ if l.isSymbol =>
val value = l.getValue.asInstanceOf[Symbol].name
val expr = ScalaPsiElementFactory.createExpressionFromText( s"""Symbol("$value")""", l.getContext)
evaluatorFor(expr)
case _ => ScalaLiteralEvaluator(l)
}
}
def whileStmtEvaluator(ws: ScWhileStmt): Evaluator = {
val condEvaluator = ws.condition match {
case Some(cond) => evaluatorFor(cond)
case None => throw EvaluationException(ScalaBundle.message("while.statement.without.condition"))
}
val iterationEvaluator = ws.body match {
case Some(body) => evaluatorFor(body)
case None => throw EvaluationException(ScalaBundle.message("while.statement.without.body"))
}
new WhileStatementEvaluator(condEvaluator, iterationEvaluator, null)
}
def doStmtEvaluator(doSt: ScDoStmt): Evaluator = {
val condEvaluator = doSt.condition match {
case Some(cond) => evaluatorFor(cond)
case None =>
throw EvaluationException(ScalaBundle.message("do.statement.without.condition"))
}
val iterationEvaluator = doSt.getExprBody match {
case Some(body) => evaluatorFor(body)
case None =>
throw EvaluationException(ScalaBundle.message("do.statement.without.body"))
}
new ScalaDoStmtEvaluator(condEvaluator, iterationEvaluator)
}
def scMethodCallEvaluator(methodCall: ScMethodCall): Evaluator = {
def applyCall(invokedText: String, argsText: String) = {
val newExprText = s"($invokedText).apply$argsText"
ScalaPsiElementFactory.createExpressionWithContextFromText(newExprText, methodCall.getContext, methodCall)
}
@tailrec
def collectArgumentsAndBuildEvaluator(call: ScMethodCall,
collected: Seq[ScExpression] = Seq.empty,
tailString: String = "",
matchedParameters: Map[Parameter, Seq[ScExpression]] = Map.empty): Evaluator = {
if (call.isApplyOrUpdateCall) {
if (!call.isUpdateCall) {
val expr = applyCall(call.getInvokedExpr.getText, call.args.getText + tailString)
return evaluatorFor(expr)
} else {
//should be handled on assignment
throw new NeedCompilationException("Update method is not supported")
}
}
val message = ScalaBundle.message("cannot.evaluate.method", call.getText)
call.getInvokedExpr match {
case ref: ScReferenceExpression =>
methodCallEvaluator(methodCall, call.argumentExpressions ++ collected, matchedParameters ++ call.matchedParametersMap)
case newCall: ScMethodCall =>
collectArgumentsAndBuildEvaluator(newCall, call.argumentExpressions ++ collected, call.args.getText + tailString,
matchedParameters ++ call.matchedParametersMap)
case gen: ScGenericCall =>
gen.referencedExpr match {
case ref: ScReferenceExpression if ref.resolve().isInstanceOf[PsiMethod] =>
methodCallEvaluator(methodCall, call.argumentExpressions ++ collected, matchedParameters ++ call.matchedParametersMap)
case ref: ScReferenceExpression =>
ref.getType().getOrAny match {
//isApplyOrUpdateCall does not work for generic calls
case ScType.ExtractClass(psiClass) if psiClass.findMethodsByName("apply", true).nonEmpty =>
val typeArgsText = gen.typeArgs.fold("")(_.getText)
val expr = applyCall(ref.getText, s"$typeArgsText${call.args.getText}$tailString")
evaluatorFor(expr)
case _ => throw EvaluationException(message)
}
case _ =>
throw EvaluationException(message)
}
case _ => throw EvaluationException(message)
}
}
methodCall match {
case hasDeepestInvokedReference(ScReferenceExpression.withQualifier(implicitlyConvertedTo(expr))) =>
val copy = methodCall.copy().asInstanceOf[ScMethodCall]
copy match {
case hasDeepestInvokedReference(ScReferenceExpression.withQualifier(q)) =>
q.replaceExpression(expr, removeParenthesis = false)
evaluatorFor(copy)
case _ =>
val message = ScalaBundle.message("method.call.implicitly.converted.qualifier", methodCall.getText)
throw EvaluationException(message)
}
case _ =>
//todo: handle partially applied functions
collectArgumentsAndBuildEvaluator(methodCall)
}
}
def infixExpressionEvaluator(infix: ScInfixExpr): Evaluator = {
val operation = infix.operation
def isUpdate(ref: ScReferenceExpression): Boolean = {
ref.refName.endsWith("=") &&
(ref.resolve() match {
case n: PsiNamedElement if n.name + "=" == ref.refName => true
case _ => false
})
}
if (isUpdate(operation)) {
val baseExprText = infix.getBaseExpr.getText
val operationText = operation.refName.dropRight(1)
val argText = infix.getArgExpr.getText
val exprText = s"$baseExprText = $baseExprText $operationText $argText"
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, infix.getContext, infix)
evaluatorFor(expr)
}
else {
val equivCall = ScalaPsiElementFactory.createEquivMethodCall(infix)
evaluatorFor(equivCall)
}
}
def blockExprEvaluator(block: ScBlock): Evaluator = {
withNewSyntheticVariablesHolder {
val evaluators = block.statements.filter(!_.isInstanceOf[ScImportStmt]).map(evaluatorFor)
new ScalaBlockExpressionEvaluator(evaluators.toSeq)
}
}
def postfixExprEvaluator(p: ScPostfixExpr): Evaluator = {
val equivRef = ScalaPsiElementFactory.createEquivQualifiedReference(p)
evaluatorFor(equivRef)
}
def prefixExprEvaluator(p: ScPrefixExpr): Evaluator = {
val newExprText = s"(${p.operand.getText}).unary_${p.operation.refName}"
val newExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(newExprText, p.getContext, p)
evaluatorFor(newExpr)
}
def refExpressionEvaluator(ref: ScReferenceExpression): Evaluator = {
ref.qualifier match {
case Some(implicitlyConvertedTo(e)) =>
val copy = ref.copy().asInstanceOf[ScReferenceExpression]
copy.qualifier.get.replaceExpression(e, removeParenthesis = false)
evaluatorFor(copy)
case _ =>
val resolve: PsiElement = ref.resolve()
evaluatorForReferenceWithoutParameters(ref.qualifier, resolve, ref)
}
}
def tupleEvaluator(tuple: ScTuple): Evaluator = {
val exprText = "_root_.scala.Tuple" + tuple.exprs.length + tuple.exprs.map(_.getText).mkString("(", ", ", ")")
val expr = ScalaPsiElementFactory.createExpressionWithContextFromText(exprText, tuple.getContext, tuple)
evaluatorFor(expr)
}
def valOrVarDefinitionEvaluator(pList: ScPatternList, expr: ScExpression) = {
val evaluators = ArrayBuffer[Evaluator]()
val exprEval = new ScalaCachingEvaluator(evaluatorFor(expr))
evaluators += exprEval
for {
pattern <- pList.patterns
binding <- pattern.bindings
} {
val name = binding.name
createSyntheticVariable(name)
val leftEval = syntheticVariableEvaluator(name)
val rightEval = evaluateSubpatternFromPattern(exprEval, pattern, binding)
evaluators += new AssignmentEvaluator(leftEval, rightEval)
}
new ScalaBlockExpressionEvaluator(evaluators)
}
def variableDefinitionEvaluator(vd: ScVariableDefinition): Evaluator = {
vd.expr match {
case None => throw EvaluationException(s"Variable definition needs right hand side: ${vd.getText}")
case Some(e) => valOrVarDefinitionEvaluator(vd.pList, e)
}
}
def patternDefinitionEvaluator(pd: ScPatternDefinition): Evaluator = {
pd.expr match {
case None => throw EvaluationException(s"Value definition needs right hand side: ${pd.getText}")
case Some(e) => valOrVarDefinitionEvaluator(pd.pList, e)
}
}
def postProcessExpressionEvaluator(expr: ScExpression, evaluator: Evaluator): Evaluator = {
//boxing and unboxing actions
def unbox(typeTo: String) = unaryEvaluator(unboxEvaluator(evaluator), typeTo)
def box() = boxEvaluator(evaluator)
def valueClassInstance(eval: Evaluator) = {
expr match {
case _: ScNewTemplateDefinition => eval
case ExpressionType(_: ValType) => eval
case ExpressionType(tp @ ValueClassType(inner)) =>
valueClassInstanceEvaluator(eval, inner, tp)
case _ => eval
}
}
import org.jetbrains.plugins.scala.lang.psi.types._
val unboxed = expr.smartExpectedType() match {
case Some(Int) => unbox("toInteger")
case Some(Byte) => unbox("toByte")
case Some(Long) => unbox("toLong")
case Some(Boolean) => unboxEvaluator(evaluator)
case Some(Float) => unbox("toFloat")
case Some(Short) => unbox("toShort")
case Some(Double) => unbox("toDouble")
case Some(Char) => unbox("toCharacter")
case Some(Unit) => new BlockStatementEvaluator(Array(evaluator, unitEvaluator()))
case None => evaluator
case _ => box()
}
valueClassInstance(unboxed)
}
}
object ScalaEvaluatorBuilderUtil {
private val BOXES_RUN_TIME = new TypeEvaluator(JVMNameUtil.getJVMRawText("scala.runtime.BoxesRunTime"))
private val BOXED_UNIT = new TypeEvaluator(JVMNameUtil.getJVMRawText("scala.runtime.BoxedUnit"))
def boxEvaluator(eval: Evaluator): Evaluator = new ScalaBoxingEvaluator(eval)
def boxed(evaluators: Evaluator*): Seq[Evaluator] = evaluators.map(boxEvaluator)
def unboxEvaluator(eval: Evaluator): Evaluator = new UnBoxingEvaluator(eval)
def notEvaluator(eval: Evaluator): Evaluator = {
val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)Ljava/lang/Object;")
unboxEvaluator(new ScalaMethodEvaluator(BOXES_RUN_TIME, "takeNot", rawText, boxed(eval)))
}
def eqEvaluator(left: Evaluator, right: Evaluator): Evaluator = {
new ScalaEqEvaluator(left, right)
}
def neEvaluator(left: Evaluator, right: Evaluator): Evaluator = {
notEvaluator(eqEvaluator(left, right))
}
def unitEvaluator(): Evaluator = {
new ScalaFieldEvaluator(BOXED_UNIT, "UNIT")
}
def unaryEvaluator(eval: Evaluator, boxesRunTimeName: String): Evaluator = {
val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)Ljava/lang/Object;")
unboxEvaluator(new ScalaMethodEvaluator(BOXES_RUN_TIME, boxesRunTimeName, rawText, boxed(eval)))
}
def binaryEvaluator(left: Evaluator, right: Evaluator, boxesRunTimeName: String): Evaluator = {
val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;")
unboxEvaluator(new ScalaMethodEvaluator(BOXES_RUN_TIME, boxesRunTimeName, rawText, boxed(left, right)))
}
object hasDeepestInvokedReference {
@tailrec
final def unapply(expr: ScExpression): Option[ScReferenceExpression] = {
expr match {
case call: ScMethodCall => unapply(call.deepestInvokedExpr)
case genCall: ScGenericCall => unapply(genCall.referencedExpr)
case ref: ScReferenceExpression => Some(ref)
case _ => None
}
}
}
def classTagText(arg: ScType): String = {
import org.jetbrains.plugins.scala.lang.psi.types._
arg match {
case Short => "_root_.scala.reflect.ClassTag.Short"
case Byte => "_root_.scala.reflect.ClassTag.Byte"
case Char => "_root_.scala.reflect.ClassTag.Char"
case Int => "_root_.scala.reflect.ClassTag.Int"
case Long => "_root_.scala.reflect.ClassTag.Long"
case Float => "_root_.scala.reflect.ClassTag.Float"
case Double => "_root_.scala.reflect.ClassTag.Double"
case Boolean => "_root_.scala.reflect.ClassTag.Boolean"
case Unit => "_root_.scala.reflect.ClassTag.Unit"
case Any => "_root_.scala.reflect.ClassTag.Any"
case AnyVal => "_root_.scala.reflect.ClassTag.AnyVal"
case Nothing => "_root_.scala.reflect.ClassTag.Nothing"
case Null => "_root_.scala.reflect.ClassTag.Null"
case Singleton => "_root_.scala.reflect.ClassTag.Object"
//todo:
case _ => "_root_.scala.reflect.ClassTag.apply(classOf[_root_.java.lang.Object])"
}
}
def classManifestText(scType: ScType): String = {
import org.jetbrains.plugins.scala.lang.psi.types._
scType match {
case Short => "_root_.scala.reflect.ClassManifest.Short"
case Byte => "_root_.scala.reflect.ClassManifest.Byte"
case Char => "_root_.scala.reflect.ClassManifest.Char"
case Int => "_root_.scala.reflect.ClassManifest.Int"
case Long => "_root_.scala.reflect.ClassManifest.Long"
case Float => "_root_.scala.reflect.ClassManifest.Float"
case Double => "_root_.scala.reflect.ClassManifest.Double"
case Boolean => "_root_.scala.reflect.ClassManifest.Boolean"
case Unit => "_root_.scala.reflect.ClassManifest.Unit"
case Any => "_root_.scala.reflect.ClassManifest.Any"
case AnyVal => "_root_.scala.reflect.ClassManifest.AnyVal"
case Nothing => "_root_.scala.reflect.ClassManifest.Nothing"
case Null => "_root_.scala.reflect.ClassManifest.Null"
case Singleton => "_root_.scala.reflect.ClassManifest.Object"
case JavaArrayType(arg) =>
"_root_.scala.reflect.ClassManifest.arrayType(" + classManifestText(arg) + ")"
case ScParameterizedType(ScDesignatorType(clazz: ScClass), Seq(arg))
if clazz.qualifiedName == "scala.Array" =>
"_root_.scala.reflect.ClassManifest.arrayType(" + classManifestText(arg) + ")"
/*case ScParameterizedType(des, args) =>
ScType.extractClass(des, Option(expr.getProject)) match {
case Some(clazz) =>
"_root_.scala.reflect.ClassManifest.classType(" +
case _ => "null"
}*/ //todo:
case _ => ScType.extractClass(scType) match {
case Some(clss) => "_root_.scala.reflect.ClassManifest.classType(classOf[_root_." +
clss.qualifiedName + "])"
case _ => "_root_.scala.reflect.ClassManifest.classType(classOf[_root_.java.lang." +
"Object])"
}
}
}
def isOfPrimitiveType(param: PsiParameter) = param match { //todo specialized type parameters
case p: ScParameter =>
val tp: ScType = p.getType(TypingContext.empty).getOrAny
isPrimitiveScType(tp)
case p: PsiParameter =>
val tp = param.getType
import com.intellij.psi.PsiType._
Set[PsiType](BOOLEAN, INT, CHAR, DOUBLE, FLOAT, LONG, BYTE, SHORT).contains(tp)
case _ => false
}
def isPrimitiveScType(tp: ScType) = {
import org.jetbrains.plugins.scala.lang.psi.types._
Set[ScType](Boolean, Int, Char, Double, Float, Long, Byte, Short).contains(tp)
}
object implicitlyConvertedTo {
def unapply(expr: ScExpression): Option[ScExpression] = {
val implicits = expr.getImplicitConversions(fromUnder = true)
implicits._2 match {
case Some(fun: ScFunction) =>
val exprText = expr.getText
val callText = s"${fun.name}($exprText)"
val newExprText = fun.containingClass match {
case o: ScObject if isStable(o) => s"${o.qualifiedName}.$callText"
case o: ScObject => //todo: It can cover many cases!
throw EvaluationException(ScalaBundle.message("implicit.conversions.from.dependent.objects"))
case _ => callText //from scope
}
Some(ScalaPsiElementFactory.createExpressionWithContextFromText(newExprText, expr.getContext, expr))
case _ => None
}
}
}
@tailrec
final def isStable(o: ScObject): Boolean = {
val context = PsiTreeUtil.getParentOfType(o, classOf[ScTemplateDefinition], classOf[ScExpression])
if (context == null) return true
context match {
case o: ScObject => isStable(o)
case _ => false
}
}
def getContextClass(elem: PsiElement, strict: Boolean = true): PsiElement = {
if (!strict && isGenerateClass(elem)) elem
else elem.contexts.find(isGenerateClass).orNull
}
def isGenerateClass(elem: PsiElement): Boolean = {
if (ScalaPositionManager.isCompiledWithIndyLambdas(elem.getContainingFile))
isGenerateNonAnonfunClass(elem) || isAnonfunInsideSuperCall(elem)
else isGenerateNonAnonfunClass(elem) || isGenerateAnonfun(elem)
}
def isGenerateNonAnonfunClass(elem: PsiElement): Boolean = {
elem match {
case newTd: ScNewTemplateDefinition if !DebuggerUtil.generatesAnonClass(newTd) => false
case clazz: PsiClass => true
case _ => false
}
}
def isAnonfunInsideSuperCall(elem: PsiElement) = {
def isInsideSuperCall(td: ScTypeDefinition) = {
val extBlock = td.extendsBlock
PsiTreeUtil.getParentOfType(elem, classOf[ScEarlyDefinitions], classOf[ScConstructor]) match {
case ed: ScEarlyDefinitions if ed.getParent == extBlock => true
case c: ScConstructor if c.getParent.getParent == extBlock => true
case _ => false
}
}
val containingClass = PsiTreeUtil.getParentOfType(elem, classOf[ScTypeDefinition])
isGenerateAnonfun(elem) && isInsideSuperCall(containingClass)
}
def isGenerateAnonfun(elem: PsiElement): Boolean = {
def isGenerateAnonfunWithCache: Boolean = {
def computation = elem match {
case e: ScExpression if ScUnderScoreSectionUtil.underscores(e).nonEmpty => true
case e: ScExpression if ScalaPsiUtil.isByNameArgument(e) || ScalaPsiUtil.isArgumentOfFunctionType(e) => true
case ScalaPsiUtil.MethodValue(_) => true
case Both(ChildOf(argExprs: ScArgumentExprList), ScalaPositionManager.InsideAsync(call))
if call.args == argExprs => true
case _ => false
}
def cacheProvider = new CachedValueProvider[Boolean] {
override def compute(): Result[Boolean] = Result.create(computation, elem)
}
if (elem == null) false
else CachedValuesManager.getCachedValue(elem, cacheProvider)
}
def isGenerateAnonfunSimple: Boolean = {
elem match {
case f: ScFunctionExpr => true
case (_: ScExpression) childOf (_: ScForStatement) => true
case (cc: ScCaseClauses) childOf (b: ScBlockExpr) if b.isAnonymousFunction => true
case (g: ScGuard) childOf (_: ScEnumerators) => true
case (g: ScGenerator) childOf (enums: ScEnumerators) if !enums.generators.headOption.contains(g) => true
case e: ScEnumerator => true
case _ => false
}
}
isGenerateAnonfunSimple || isGenerateAnonfunWithCache
}
def anonClassCount(elem: PsiElement): Int = { //todo: non irrefutable patterns?
elem match {
case (e: ScExpression) childOf (f: ScForStatement) =>
f.enumerators.fold(1)(e => e.generators.length)
case (e @ (_: ScEnumerator | _: ScGenerator | _: ScGuard)) childOf (enums: ScEnumerators) =>
enums.children.takeWhile(_ != e).count(_.isInstanceOf[ScGenerator])
case _ => 1
}
}
def localFunctionIndex(named: PsiNamedElement): Int = {
elementsWithSameNameIndex(named, {
case f: ScFunction if f.isLocal && f.name == named.name => true
case Both(ScalaPsiUtil.inNameContext(LazyVal(_)), lzy: ScBindingPattern) if lzy.name == named.name => true
case _ => false
})
}
def lazyValIndex(named: PsiNamedElement): Int = {
elementsWithSameNameIndex(named, {
case Both(ScalaPsiUtil.inNameContext(LazyVal(_)), lzy: ScBindingPattern) if lzy.name == named.name => true
case _ => false
})
}
def defaultParameterMethodName(method: ScMethodLike, paramIndex: Int): String = {
method match {
case fun: ScFunction if !fun.isConstructor =>
val suffix: String = if (!fun.isLocal) "" else "$" + localFunctionIndex(fun)
fun.name + "$default$" + paramIndex + suffix
case _ if method.isConstructor => "$lessinit$greater$default$" + paramIndex + "()"
}
}
def elementsWithSameNameIndex(named: PsiNamedElement, condition: PsiElement => Boolean): Int = {
val containingClass = getContextClass(named)
if (containingClass == null) return -1
val depthFirstIterator = containingClass.depthFirst {
case `containingClass` => true
case elem if isGenerateClass(elem) => false
case _ => true
}
val sameNameElements = depthFirstIterator.filter(condition).toList
sameNameElements.indexOf(named) + 1
}
def traitImplementation(elem: PsiElement): Option[JVMName] = {
val clazz = getContextClass(elem)
clazz match {
case t: ScTrait =>
Some(DebuggerUtil.getClassJVMName(t, withPostfix = true))
case _ => None
}
}
def isLocalFunction(fun: ScFunction): Boolean = {
!fun.getContext.isInstanceOf[ScTemplateBody]
}
def isNotUsedEnumerator(named: PsiNamedElement, place: PsiElement): Boolean = {
named match {
case ScalaPsiUtil.inNameContext(enum @ (_: ScEnumerator | _: ScGenerator)) =>
enum.getParent.getParent match {
case ScForStatement(enums, body) =>
enums.namings.map(_.pattern) match {
case Seq(refPattern: ScReferencePattern) => return false //can always evaluate from single simple generator
case _ =>
}
def insideBody = PsiTreeUtil.isAncestor(body, place, false)
def isNotUsed = ReferencesSearch.search(named, new LocalSearchScope(body)).findFirst() == null
insideBody && isNotUsed
case _ => false
}
case _ => false
}
}
object isInsideValueClass {
def unapply(elem: PsiElement): Option[ScClass] = {
getContextClass(elem) match {
case c: ScClass if ValueClassType.isValueClass(c) => Some(c)
case _ => None
}
}
}
object isInsideLocalFunction {
def unapply(elem: PsiElement): Option[ScFunction] = {
@tailrec
def inner(element: PsiElement): Option[ScFunction] = {
element match {
case null => None
case fun: ScFunction if isLocalFunction(fun) &&
!fun.parameters.exists(param => PsiTreeUtil.isAncestor(param, elem, false)) =>
Some(fun)
case other if other.getContext != null => inner(other.getContext)
case _ => None
}
}
inner(elem)
}
}
object privateTraitMethod {
def unapply(r: ScalaResolveResult): Option[(ScTrait, ScFunctionDefinition)] = {
r.getElement match {
case Both(fun: ScFunctionDefinition, ContainingClass(tr: ScTrait)) if fun.isPrivate => Some(tr, fun)
case _ => None
}
}
}
object privateThisField {
def unapply(elem: PsiElement): Option[ScNamedElement] = {
elem match {
case c: ScClassParameter if c.isPrivateThis => Some(c)
case Both(bp: ScBindingPattern, ScalaPsiUtil.inNameContext(v @ (_: ScVariable | _: ScValue))) =>
v match {
case mo: ScModifierListOwner if mo.getModifierList.accessModifier.exists(am => am.isPrivate && am.isThis) => Some(bp)
case _ => None
}
case _ => None
}
}
}
}
| advancedxy/intellij-scala | src/org/jetbrains/plugins/scala/debugger/evaluation/ScalaEvaluatorBuilderUtil.scala | Scala | apache-2.0 | 75,445 |
package ohnosequences.datasets.illumina
import ohnosequences.datasets._
/*
### Illumina reads
*/
trait AnyReadsData extends AnyData {
type InsertSize <: AnyInsertSize
val insertSize: InsertSize
type EndType <: AnyEndType
val endType: EndType
type Length <: AnyLength
val length: Length
lazy val labelPrefix: String = s"Illumina.${endType}.${length.toInt}"
}
abstract class ReadsData[
InsrtSz <: AnyInsertSize,
EndTp <: AnyEndType,
Lngth <: AnyLength
](val endType: EndTp,
val length: Lngth,
val insertSize: InsrtSz
) extends AnyReadsData {
type InsertSize = InsrtSz
type Length = Lngth
type EndType = EndTp
}
// Illumina insert size
sealed trait AnyInsertSize { val size: Option[Int] }
case object unknownInsertSize extends AnyInsertSize { val size = None }
case class InsertSize(val s: Int) extends AnyInsertSize { val size = Some(s) }
// Illumina (runs?) are either single-end or paired-end
sealed trait AnyEndType
case object singleEndType extends AnyEndType
case object pairedEndType extends AnyEndType
sealed trait AnyLength { val toInt: Int }
class Length(val toInt: Int) extends AnyLength
case object bp300 extends Length(300)
case object bp250 extends Length(250)
case object bp150 extends Length(150)
case object bp100 extends Length(100)
case object bp75 extends Length(75)
case object bp50 extends Length(50)
class SingleEndReads[
InsrtSz <: AnyInsertSize,
Lngth <: AnyLength
](val l: Lngth, val is: InsrtSz)(val label: String)
extends ReadsData(singleEndType, l, is)
class PairedEndReads1[
InsrtSz <: AnyInsertSize,
Lngth <: AnyLength
](val l: Lngth, val is: InsrtSz)(val label: String)
extends ReadsData(pairedEndType, l, is)
class PairedEndReads2[
InsrtSz <: AnyInsertSize,
Lngth <: AnyLength
](val l: Lngth, val is: InsrtSz)(val label: String)
extends ReadsData(pairedEndType, l, is)
// TODO if there's any use of this, it should go into some AnyDataType
trait AnySequencingTechnology
case object Illumina extends AnySequencingTechnology
sealed trait AnyIlluminaMachine
case object MiSeq extends AnyIlluminaMachine
case object MiSeqDx extends AnyIlluminaMachine
case object MiSeqFGx extends AnyIlluminaMachine
case object NextSeq500 extends AnyIlluminaMachine
case object HiSeq2500 extends AnyIlluminaMachine
case object HiSeq3000 extends AnyIlluminaMachine
case object HiSeq4000 extends AnyIlluminaMachine
case object HiSeqXFive extends AnyIlluminaMachine
case object HiSeqXTen extends AnyIlluminaMachine
// useless as of now
trait AnySequencingRun {
type Technology <: AnySequencingTechnology
val technology: Technology
// TODO provider, whatever
}
| ohnosequences/datasets.illumina | src/main/scala/reads.scala | Scala | agpl-3.0 | 2,688 |
package wvlet.airframe.codec
import wvlet.airframe.surface.Surface
import wvlet.log.LogSupport
/**
*/
case class MessageCodecFactory(codecFinder: MessageCodecFinder = Compat.messageCodecFinder, mapOutput: Boolean = false)
extends ScalaCompat.MessageCodecFactoryBase
with LogSupport {
private[this] var cache = Map.empty[Surface, MessageCodec[_]]
def withCodecs(additionalCodecs: Map[Surface, MessageCodec[_]]): MessageCodecFactory = {
this.copy(codecFinder = codecFinder orElse MessageCodecFinder.newCodecFinder(additionalCodecs))
}
// Generate a codec that outputs objects as Map type. This should be enabled for generating JSON data
def withMapOutput: MessageCodecFactory = {
if (mapOutput == true) {
this
} else {
this.copy(mapOutput = true)
}
}
def noMapOutput: MessageCodecFactory = {
this.copy(mapOutput = false)
}
@deprecated(message = "use withMapOutput ", since = "19.11.0")
def withObjectMapCodec: MessageCodecFactory = withMapOutput
def orElse(other: MessageCodecFactory): MessageCodecFactory = {
this.copy(codecFinder = codecFinder.orElse(other.codecFinder))
}
private def generateObjectSurface(seenSet: Set[Surface]): PartialFunction[Surface, MessageCodec[_]] = {
case surface: Surface =>
val codecs = for (p <- surface.params) yield {
ofSurface(p.surface, seenSet)
}
if (mapOutput) {
ObjectMapCodec(surface, codecs.toIndexedSeq)
} else {
ObjectCodec(surface, codecs.toIndexedSeq)
}
}
def of(surface: Surface): MessageCodec[_] = ofSurface(surface)
def ofSurface(surface: Surface, seen: Set[Surface] = Set.empty): MessageCodec[_] = {
// TODO Create a fast object codec with code generation (e.g., Scala macros)
if (cache.contains(surface)) {
cache(surface)
} else if (seen.contains(surface)) {
LazyCodec(surface, this)
} else {
val seenSet = seen + surface
val codec =
codecFinder
.findCodec(this, seenSet)
.orElse {
// fallback
generateObjectSurface(seenSet)
}
.apply(surface)
cache += surface -> codec
codec
}
}
}
object MessageCodecFactory {
val defaultFactory: MessageCodecFactory = new MessageCodecFactory()
def defaultFactoryForJSON: MessageCodecFactory = defaultFactory.withMapOutput
def defaultFactoryForMapOutput: MessageCodecFactory = defaultFactoryForJSON
/**
* Create a custom MessageCodecFactory from a partial mapping
*/
def newFactory(pf: PartialFunction[Surface, MessageCodec[_]]): MessageCodecFactory = {
new MessageCodecFactory(codecFinder = new MessageCodecFinder {
override def findCodec(
factory: MessageCodecFactory,
seenSet: Set[Surface]
): PartialFunction[Surface, MessageCodec[_]] = {
pf
}
})
}
}
| wvlet/airframe | airframe-codec/src/main/scala/wvlet/airframe/codec/MessageCodecFactory.scala | Scala | apache-2.0 | 2,904 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants._
import org.apache.hadoop.hive.ql.exec.Utilities
import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition, Table => HiveTable}
import org.apache.hadoop.hive.ql.plan.{PlanUtils, TableDesc}
import org.apache.hadoop.hive.serde2.Deserializer
import org.apache.hadoop.hive.serde2.objectinspector.primitive._
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorConverters, StructObjectInspector}
import org.apache.hadoop.io.Writable
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf}
import org.apache.spark.Logging
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.{EmptyRDD, HadoopRDD, RDD, UnionRDD}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.{SerializableConfiguration, Utils}
/**
* A trait for subclasses that handle table scans.
* 处理表扫描的子类的特征
*/
private[hive] sealed trait TableReader {
def makeRDDForTable(hiveTable: HiveTable): RDD[InternalRow]
def makeRDDForPartitionedTable(partitions: Seq[HivePartition]): RDD[InternalRow]
}
/**
* Helper class for scanning tables stored in Hadoop - e.g., to read Hive tables that reside in the
* data warehouse directory.
* 用于扫描存储在Hadoop中的表的助手类 - 例如,用于读取驻留在数据仓库目录中的Hive表
*/
private[hive]
class HadoopTableReader(
@transient attributes: Seq[Attribute],
@transient relation: MetastoreRelation,
@transient sc: HiveContext,
@transient hiveExtraConf: HiveConf)
extends TableReader with Logging {
// Hadoop honors "mapred.map.tasks" as hint, but will ignore when mapred.job.tracker is "local".
// https://hadoop.apache.org/docs/r1.0.4/mapred-default.html
//
// In order keep consistency with Hive, we will let it be 0 in local mode also.
//为了与Hive保持一致,我们也将在本地模式下为0。
private val _minSplitsPerRDD = if (sc.sparkContext.isLocal) {
0 // will splitted based on block by default.
} else {
math.max(sc.hiveconf.getInt("mapred.map.tasks", 1), sc.sparkContext.defaultMinPartitions)
}
// TODO: set aws s3 credentials.
private val _broadcastedHiveConf =
sc.sparkContext.broadcast(new SerializableConfiguration(hiveExtraConf))
override def makeRDDForTable(hiveTable: HiveTable): RDD[InternalRow] =
makeRDDForTable(
hiveTable,
Utils.classForName(relation.tableDesc.getSerdeClassName).asInstanceOf[Class[Deserializer]],
filterOpt = None)
/**
* Creates a Hadoop RDD to read data from the target table's data directory. Returns a transformed
* RDD that contains deserialized rows.
* 创建Hadoop RDD以从目标表的数据目录中读取数据,返回包含反序列化行的已转换RDD
*
* @param hiveTable Hive metadata for the table being scanned.正在扫描的表的Hive元数据
* @param deserializerClass Class of the SerDe used to deserialize Writables read from Hadoop.
* 用于反序列化从Hadoop读取的Writable的SerDe类
* @param filterOpt If defined, then the filter is used to reject files contained in the data
* directory being read. If None, then all files are accepted.
*/
def makeRDDForTable(
hiveTable: HiveTable,
deserializerClass: Class[_ <: Deserializer],
filterOpt: Option[PathFilter]): RDD[InternalRow] = {
assert(!hiveTable.isPartitioned, """makeRDDForTable() cannot be called on a partitioned table,
since input formats may differ across partitions. Use makeRDDForTablePartitions() instead.""")
// Create local references to member variables, so that the entire `this` object won't be
// serialized in the closure below.
//创建对成员变量的本地引用,以便在下面的闭包中不会序列化整个`this`对象。
val tableDesc = relation.tableDesc
val broadcastedHiveConf = _broadcastedHiveConf
val tablePath = hiveTable.getPath
val inputPathStr = applyFilterIfNeeded(tablePath, filterOpt)
// logDebug("Table input: %s".format(tablePath))
val ifc = hiveTable.getInputFormatClass
.asInstanceOf[java.lang.Class[InputFormat[Writable, Writable]]]
val hadoopRDD = createHadoopRdd(tableDesc, inputPathStr, ifc)
val attrsWithIndex = attributes.zipWithIndex
val mutableRow = new SpecificMutableRow(attributes.map(_.dataType))
val deserializedHadoopRDD = hadoopRDD.mapPartitions { iter =>
val hconf = broadcastedHiveConf.value.value
val deserializer = deserializerClass.newInstance()
deserializer.initialize(hconf, tableDesc.getProperties)
HadoopTableReader.fillObject(iter, deserializer, attrsWithIndex, mutableRow, deserializer)
}
deserializedHadoopRDD
}
override def makeRDDForPartitionedTable(partitions: Seq[HivePartition]): RDD[InternalRow] = {
val partitionToDeserializer = partitions.map(part =>
(part, part.getDeserializer.getClass.asInstanceOf[Class[Deserializer]])).toMap
makeRDDForPartitionedTable(partitionToDeserializer, filterOpt = None)
}
/**
* Create a HadoopRDD for every partition key specified in the query. Note that for on-disk Hive
* tables, a data directory is created for each partition corresponding to keys specified using
* 'PARTITION BY'.
* 为查询中指定的每个分区键创建HadoopRDD,请注意,对于磁盘上的Hive表,
* 将为与使用“PARTITION BY”指定的键对应的每个分区创建一个数据目录
*
* @param partitionToDeserializer Mapping from a Hive Partition metadata object to the SerDe
* class to use to deserialize input Writables from the corresponding partition.
* @param filterOpt If defined, then the filter is used to reject files contained in the data
* subdirectory of each partition being read. If None, then all files are accepted.
*/
def makeRDDForPartitionedTable(
partitionToDeserializer: Map[HivePartition,
Class[_ <: Deserializer]],
filterOpt: Option[PathFilter]): RDD[InternalRow] = {
// SPARK-5068:get FileStatus and do the filtering locally when the path is not exists
def verifyPartitionPath(
partitionToDeserializer: Map[HivePartition, Class[_ <: Deserializer]]):
Map[HivePartition, Class[_ <: Deserializer]] = {
if (!sc.conf.verifyPartitionPath) {
partitionToDeserializer
} else {
var existPathSet = collection.mutable.Set[String]()
var pathPatternSet = collection.mutable.Set[String]()
partitionToDeserializer.filter {
case (partition, partDeserializer) =>
def updateExistPathSetByPathPattern(pathPatternStr: String) {
val pathPattern = new Path(pathPatternStr)
val fs = pathPattern.getFileSystem(sc.hiveconf)
val matches = fs.globStatus(pathPattern)
matches.foreach(fileStatus => existPathSet += fileStatus.getPath.toString)
}
// convert /demo/data/year/month/day to /demo/data/*/*/*/
def getPathPatternByPath(parNum: Int, tempPath: Path): String = {
var path = tempPath
for (i <- (1 to parNum)) path = path.getParent
val tails = (1 to parNum).map(_ => "*").mkString("/", "/", "/")
path.toString + tails
}
val partPath = partition.getDataLocation
val partNum = Utilities.getPartitionDesc(partition).getPartSpec.size();
var pathPatternStr = getPathPatternByPath(partNum, partPath)
if (!pathPatternSet.contains(pathPatternStr)) {
pathPatternSet += pathPatternStr
updateExistPathSetByPathPattern(pathPatternStr)
}
existPathSet.contains(partPath.toString)
}
}
}
val hivePartitionRDDs = verifyPartitionPath(partitionToDeserializer)
.map { case (partition, partDeserializer) =>
val partDesc = Utilities.getPartitionDesc(partition)
val partPath = partition.getDataLocation
val inputPathStr = applyFilterIfNeeded(partPath, filterOpt)
val ifc = partDesc.getInputFileFormatClass
.asInstanceOf[java.lang.Class[InputFormat[Writable, Writable]]]
// Get partition field info
//获取分区字段信息
val partSpec = partDesc.getPartSpec
val partProps = partDesc.getProperties
val partColsDelimited: String = partProps.getProperty(META_TABLE_PARTITION_COLUMNS)
// Partitioning columns are delimited by "/"
//分区列由分隔符分隔
val partCols = partColsDelimited.trim().split("/").toSeq
// 'partValues[i]' contains the value for the partitioning column at 'partCols[i]'.
val partValues = if (partSpec == null) {
Array.fill(partCols.size)(new String)
} else {
partCols.map(col => new String(partSpec.get(col))).toArray
}
// Create local references so that the outer object isn't serialized.
//创建本地引用,以便不对序列化外部对象。
val tableDesc = relation.tableDesc
val broadcastedHiveConf = _broadcastedHiveConf
val localDeserializer = partDeserializer
val mutableRow = new SpecificMutableRow(attributes.map(_.dataType))
// Splits all attributes into two groups, partition key attributes and those that are not.
// Attached indices indicate the position of each attribute in the output schema.
val (partitionKeyAttrs, nonPartitionKeyAttrs) =
attributes.zipWithIndex.partition { case (attr, _) =>
relation.partitionKeys.contains(attr)
}
def fillPartitionKeys(rawPartValues: Array[String], row: MutableRow): Unit = {
partitionKeyAttrs.foreach { case (attr, ordinal) =>
val partOrdinal = relation.partitionKeys.indexOf(attr)
row(ordinal) = Cast(Literal(rawPartValues(partOrdinal)), attr.dataType).eval(null)
}
}
// Fill all partition keys to the given MutableRow object
//将所有分区键填充到给定的MutableRow对象
fillPartitionKeys(partValues, mutableRow)
createHadoopRdd(tableDesc, inputPathStr, ifc).mapPartitions { iter =>
val hconf = broadcastedHiveConf.value.value
val deserializer = localDeserializer.newInstance()
deserializer.initialize(hconf, partProps)
// get the table deserializer
val tableSerDe = tableDesc.getDeserializerClass.newInstance()
tableSerDe.initialize(hconf, tableDesc.getProperties)
// fill the non partition key attributes
HadoopTableReader.fillObject(iter, deserializer, nonPartitionKeyAttrs,
mutableRow, tableSerDe)
}
}.toSeq
// Even if we don't use any partitions, we still need an empty RDD
//即使我们不使用任何分区,我们仍然需要一个空的RDD
if (hivePartitionRDDs.size == 0) {
new EmptyRDD[InternalRow](sc.sparkContext)
} else {
new UnionRDD(hivePartitionRDDs(0).context, hivePartitionRDDs)
}
}
/**
* If `filterOpt` is defined, then it will be used to filter files from `path`. These files are
* returned in a single, comma-separated string.
* 如果定义了`filterOpt`,那么它将用于从`path`过滤文件,这些文件以逗号分隔的单个字符串返回。
*/
private def applyFilterIfNeeded(path: Path, filterOpt: Option[PathFilter]): String = {
filterOpt match {
case Some(filter) =>
val fs = path.getFileSystem(sc.hiveconf)
val filteredFiles = fs.listStatus(path, filter).map(_.getPath.toString)
filteredFiles.mkString(",")
case None => path.toString
}
}
/**
* Creates a HadoopRDD based on the broadcasted HiveConf and other job properties that will be
* applied locally on each slave.
* 基于广播的HiveConf和将在本地应用于每个从属的其他作业属性创建HadoopRDD
*/
private def createHadoopRdd(
tableDesc: TableDesc,
path: String,
inputFormatClass: Class[InputFormat[Writable, Writable]]): RDD[Writable] = {
val initializeJobConfFunc = HadoopTableReader.initializeLocalJobConfFunc(path, tableDesc) _
val rdd = new HadoopRDD(
sc.sparkContext,
_broadcastedHiveConf.asInstanceOf[Broadcast[SerializableConfiguration]],
Some(initializeJobConfFunc),
inputFormatClass,
classOf[Writable],
classOf[Writable],
_minSplitsPerRDD)
// Only take the value (skip the key) because Hive works only with values.
rdd.map(_._2)
}
}
private[hive] object HadoopTableReader extends HiveInspectors with Logging {
/**
* Curried. After given an argument for 'path', the resulting JobConf => Unit closure is used to
* instantiate a HadoopRDD.
* 在给出'path'的参数后,生成的JobConf => Unit闭包用于实例化HadoopRDD。
*/
def initializeLocalJobConfFunc(path: String, tableDesc: TableDesc)(jobConf: JobConf) {
FileInputFormat.setInputPaths(jobConf, Seq[Path](new Path(path)): _*)
if (tableDesc != null) {
PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc)
Utilities.copyTableJobPropertiesToConf(tableDesc, jobConf)
}
val bufferSize = System.getProperty("spark.buffer.size", "65536")
jobConf.set("io.file.buffer.size", bufferSize)
}
/**
* Transform all given raw `Writable`s into `Row`s.
* 将所有给定的原始`Writable`s转换为`Row`s
* @param iterator Iterator of all `Writable`s to be transformed
* @param rawDeser The `Deserializer` associated with the input `Writable`
* @param nonPartitionKeyAttrs Attributes that should be filled together with their corresponding
* positions in the output schema
* @param mutableRow A reusable `MutableRow` that should be filled
* @param tableDeser Table Deserializer
* @return An `Iterator[Row]` transformed from `iterator`
*/
def fillObject(
iterator: Iterator[Writable],
rawDeser: Deserializer,
nonPartitionKeyAttrs: Seq[(Attribute, Int)],
mutableRow: MutableRow,
tableDeser: Deserializer): Iterator[InternalRow] = {
val soi = if (rawDeser.getObjectInspector.equals(tableDeser.getObjectInspector)) {
rawDeser.getObjectInspector.asInstanceOf[StructObjectInspector]
} else {
ObjectInspectorConverters.getConvertedOI(
rawDeser.getObjectInspector,
tableDeser.getObjectInspector).asInstanceOf[StructObjectInspector]
}
logDebug(soi.toString)
val (fieldRefs, fieldOrdinals) = nonPartitionKeyAttrs.map { case (attr, ordinal) =>
soi.getStructFieldRef(attr.name) -> ordinal
}.unzip
/**
* Builds specific unwrappers ahead of time according to object inspector
* types to avoid pattern matching and branching costs per row.
* 根据对象检查器类型提前构建特定的unwrappers,以避免模式匹配和每行的分支成本
*/
val unwrappers: Seq[(Any, MutableRow, Int) => Unit] = fieldRefs.map {
_.getFieldObjectInspector match {
case oi: BooleanObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setBoolean(ordinal, oi.get(value))
case oi: ByteObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setByte(ordinal, oi.get(value))
case oi: ShortObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setShort(ordinal, oi.get(value))
case oi: IntObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setInt(ordinal, oi.get(value))
case oi: LongObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setLong(ordinal, oi.get(value))
case oi: FloatObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setFloat(ordinal, oi.get(value))
case oi: DoubleObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) => row.setDouble(ordinal, oi.get(value))
case oi: HiveVarcharObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) =>
row.update(ordinal, UTF8String.fromString(oi.getPrimitiveJavaObject(value).getValue))
case oi: HiveDecimalObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) =>
row.update(ordinal, HiveShim.toCatalystDecimal(oi, value))
case oi: TimestampObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) =>
row.setLong(ordinal, DateTimeUtils.fromJavaTimestamp(oi.getPrimitiveJavaObject(value)))
case oi: DateObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) =>
row.setInt(ordinal, DateTimeUtils.fromJavaDate(oi.getPrimitiveJavaObject(value)))
case oi: BinaryObjectInspector =>
(value: Any, row: MutableRow, ordinal: Int) =>
row.update(ordinal, oi.getPrimitiveJavaObject(value))
case oi =>
(value: Any, row: MutableRow, ordinal: Int) => row(ordinal) = unwrap(value, oi)
}
}
val converter = ObjectInspectorConverters.getConverter(rawDeser.getObjectInspector, soi)
// Map each tuple to a row object
iterator.map { value =>
val raw = converter.convert(rawDeser.deserialize(value))
var i = 0
while (i < fieldRefs.length) {
val fieldValue = soi.getStructFieldData(raw, fieldRefs(i))
if (fieldValue == null) {
mutableRow.setNullAt(fieldOrdinals(i))
} else {
unwrappers(i)(fieldValue, mutableRow, fieldOrdinals(i))
}
i += 1
}
mutableRow: InternalRow
}
}
}
| tophua/spark1.52 | sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala | Scala | apache-2.0 | 18,757 |
package io.github.daviddenton.finagle.aws
import io.github.daviddenton.finagle.aws.AwsHmacSha256.{hex, hmacSHA256}
import org.scalatest.{FunSpec, Matchers}
class AwsHmacSha256Test extends FunSpec with Matchers {
describe("AwsHmacSha256") {
it("hash") {
AwsHmacSha256.hash("test string") shouldBe "d5579c46dfcc7f18207013e65b44e4cb4e2c2298f4ac457ba8f82743f31e930b"
}
it("hex") {
hex("test string".getBytes) shouldBe "7465737420737472696e67"
}
it("encrypt") {
hex(hmacSHA256("test key".getBytes, "test string")) shouldBe "6864a9fdc9bc77190c4bc6d1d875a0afe19461907f486f4ba5213a1f15b71cc9"
}
}
}
| daviddenton/finagle-aws | src/test/scala/io/github/daviddenton/finagle/aws/AwsHmacSha256Test.scala | Scala | apache-2.0 | 644 |
package chapter.two
import ExerciseSixThroughNine.product
import ExerciseSixThroughNine.product2
import ExerciseSixThroughNine.product3
import ExerciseSixThroughNine.productRecursive
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ExerciseSixThroughNineSpec extends FlatSpec with Matchers {
"product" should "return the product of the Unicode codes of all letters when given a string" in {
product("Hello") shouldBe 9415087488L
product2("Hello") shouldBe 9415087488L
product3("Hello") shouldBe 9415087488L
productRecursive("Hello") shouldBe 9415087488L
}
} | deekim/impatient-scala | src/test/scala/chapter/two/ExerciseSixThroughNineSpec.scala | Scala | apache-2.0 | 671 |
package pl.touk.nussknacker.ui.db.entity
import slick.jdbc.JdbcProfile
import slick.lifted.{ProvenShape, TableQuery => LTableQuery}
//TODO: Remove it in next release
trait EnvironmentsEntityFactory {
protected val profile: JdbcProfile
import profile.api._
val environmentsTable: LTableQuery[EnvironmentsEntityFactory#EnvironmentsEntity] = LTableQuery(new EnvironmentsEntity(_))
class EnvironmentsEntity(tag: Tag) extends Table[EnvironmentsEntityData](tag, "environments") {
def name: Rep[String] = column[String]("name", O.PrimaryKey)
def * : ProvenShape[EnvironmentsEntityData] = name <> (EnvironmentsEntityData.apply, EnvironmentsEntityData.unapply)
}
}
case class EnvironmentsEntityData(name: String)
| TouK/nussknacker | ui/server/src/main/scala/pl/touk/nussknacker/ui/db/entity/EnvironmentsEntityFactory.scala | Scala | apache-2.0 | 728 |
package lila.tournament
import akka.actor._
import akka.pattern.ask
import com.typesafe.config.Config
import lila.common.PimpedConfig._
import lila.hub.actorApi.map.Ask
import lila.hub.{ ActorMap, Sequencer }
import lila.socket.actorApi.GetVersion
import lila.socket.History
import makeTimeout.short
final class Env(
config: Config,
system: ActorSystem,
db: lila.db.Env,
mongoCache: lila.memo.MongoCache.Builder,
flood: lila.security.Flood,
hub: lila.hub.Env,
roundMap: ActorRef,
roundSocketHub: ActorSelection,
lightUser: String => Option[lila.common.LightUser],
isOnline: String => Boolean,
onStart: String => Unit,
secondsToMove: Int,
trophyApi: lila.user.TrophyApi,
scheduler: lila.common.Scheduler) {
private val settings = new {
val CollectionTournament = config getString "collection.tournament"
val CollectionPlayer = config getString "collection.player"
val CollectionPairing = config getString "collection.pairing"
val HistoryMessageTtl = config duration "history.message.ttl"
val CreatedCacheTtl = config duration "created.cache.ttl"
val LeaderboardCacheTtl = config duration "leaderboard.cache.ttl"
val UidTimeout = config duration "uid.timeout"
val SocketTimeout = config duration "socket.timeout"
val SocketName = config getString "socket.name"
val OrganizerName = config getString "organizer.name"
val ReminderName = config getString "reminder.name"
val SequencerTimeout = config duration "sequencer.timeout"
val SequencerMapName = config getString "sequencer.map_name"
val NetDomain = config getString "net.domain"
}
import settings._
lazy val forms = new DataForm
lazy val cached = new Cached(CreatedCacheTtl)
lazy val api = new TournamentApi(
cached = cached,
scheduleJsonView = scheduleJsonView ,
system = system,
sequencers = sequencerMap,
autoPairing = autoPairing,
clearJsonViewCache = jsonView.clearCache,
router = hub.actor.router,
renderer = hub.actor.renderer,
timeline = hub.actor.timeline,
socketHub = socketHub,
site = hub.socket.site,
lobby = hub.socket.lobby,
trophyApi = trophyApi,
roundMap = roundMap,
roundSocketHub = roundSocketHub)
lazy val socketHandler = new SocketHandler(
hub = hub,
socketHub = socketHub,
chat = hub.actor.chat,
flood = flood)
lazy val winners = new Winners(
mongoCache = mongoCache,
ttl = LeaderboardCacheTtl)
lazy val jsonView = new JsonView(lightUser)
lazy val scheduleJsonView = new ScheduleJsonView(lightUser)
private val socketHub = system.actorOf(
Props(new lila.socket.SocketHubActor.Default[Socket] {
def mkActor(tournamentId: String) = new Socket(
tournamentId = tournamentId,
history = new History(ttl = HistoryMessageTtl),
jsonView = jsonView,
uidTimeout = UidTimeout,
socketTimeout = SocketTimeout,
lightUser = lightUser)
}), name = SocketName)
private val sequencerMap = system.actorOf(Props(ActorMap { id =>
new Sequencer(SequencerTimeout)
}), name = SequencerMapName)
private val organizer = system.actorOf(Props(new Organizer(
api = api,
reminder = system.actorOf(Props(new Reminder(
renderer = hub.actor.renderer
)), name = ReminderName),
isOnline = isOnline,
socketHub = socketHub
)), name = OrganizerName)
private val tournamentScheduler = system.actorOf(Props(new Scheduler(api)))
def version(tourId: String): Fu[Int] =
socketHub ? Ask(tourId, GetVersion) mapTo manifest[Int]
private lazy val autoPairing = new AutoPairing(
roundMap = roundMap,
system = system,
onStart = onStart,
secondsToMove = secondsToMove)
{
import scala.concurrent.duration._
scheduler.message(2 seconds) {
organizer -> actorApi.AllCreatedTournaments
}
scheduler.message(3 seconds) {
organizer -> actorApi.StartedTournaments
}
scheduler.message(5 minutes) {
tournamentScheduler -> actorApi.ScheduleNow
}
}
private[tournament] lazy val tournamentColl = db(CollectionTournament)
private[tournament] lazy val pairingColl = db(CollectionPairing)
private[tournament] lazy val playerColl = db(CollectionPlayer)
}
object Env {
private def hub = lila.hub.Env.current
lazy val current = "[boot] tournament" describes new Env(
config = lila.common.PlayApp loadConfig "tournament",
system = lila.common.PlayApp.system,
db = lila.db.Env.current,
mongoCache = lila.memo.Env.current.mongoCache,
flood = lila.security.Env.current.flood,
hub = lila.hub.Env.current,
roundMap = lila.round.Env.current.roundMap,
roundSocketHub = lila.hub.Env.current.socket.round,
lightUser = lila.user.Env.current.lightUser,
isOnline = lila.user.Env.current.isOnline,
onStart = lila.game.Env.current.onStart,
secondsToMove = lila.game.Env.current.MandatorySecondsToMove,
trophyApi = lila.user.Env.current.trophyApi,
scheduler = lila.common.PlayApp.scheduler)
}
| Happy0/lila | modules/tournament/src/main/Env.scala | Scala | mit | 5,063 |
package com.mehmetakiftutuncu.classveobject
/**
* Created by akif on 05/01/16.
*/
object ScalaSingleton {
def selam(): Unit = {
println("Scala: Selam!")
}
}
| mehmetakiftutuncu/ScalaBlogOrnekleri | ClassVeObject/src/com/mehmetakiftutuncu/classveobject/ScalaSingleton.scala | Scala | gpl-3.0 | 170 |
/**
* Copyright 2013-2014 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package securesocial.core.authenticator
import org.joda.time.DateTime
import scala.annotation.meta.getter
import scala.concurrent.{ExecutionContext, Future}
import play.api.mvc.SimpleResult
/**
* Base trait for the Cookie and Http Header based authenticators
*
* @tparam U the user object type
* @tparam T the authenticator type
*/
trait StoreBackedAuthenticator[U, T <: Authenticator[U]] extends Authenticator[U] {
@transient
protected val logger = play.api.Logger(this.getClass.getName)
@(transient @getter)
val store: AuthenticatorStore[T]
/**
* The time an authenticator is allowed to live in the store
*/
val absoluteTimeoutInSeconds: Int
/**
* The inactivity period after which an authenticator is considered invalid
*/
val idleTimeoutInMinutes: Int
/**
* Returns a copy of this authenticator with the given last used time
*
* @param time the new time
* @return the modified authenticator
*/
def withLastUsedTime(time: DateTime): T
/**
* Returns a copy of this Authenticator with the given user
*
* @param user the new user
* @return the modified authenticator
*/
def withUser(user: U): T
/**
* Updated the last used timestamp
*
* @return a future with the updated authenticator
*/
override def touch: Future[T] = {
val updated = withLastUsedTime(DateTime.now())
logger.debug(s"touched: lastUsed = $lastUsed")
store.save(updated, absoluteTimeoutInSeconds)
}
/**
* Updates the user information associated with this authenticator
*
* @param user the user object
* @return a future with the updated authenticator
*/
override def updateUser(user: U): Future[T] = {
val updated = withUser(user)
logger.debug(s"updated user: $updated")
store.save(updated, absoluteTimeoutInSeconds)
}
/**
* Checks if the authenticator has expired. This is an absolute timeout since the creation of
* the authenticator
*
* @return true if the authenticator has expired, false otherwise.
*/
def expired: Boolean = expirationDate.isBeforeNow
/**
* Checks if the time elapsed since the last time the authenticator was used is longer than
* the maximum idle timeout specified in the properties.
*
* @return true if the authenticator timed out, false otherwise.
*/
def timedOut: Boolean = lastUsed.plusMinutes(CookieAuthenticator.idleTimeout).isBeforeNow
/**
* Checks if the authenticator is valid. For this implementation it means that the
* authenticator has not expired or timed out.
*
* @return true if the authenticator is valid, false otherwise.
*/
override def isValid: Boolean = !expired && !timedOut
/////// Result handling methods
/**
* Adds a touched authenticator to the result (for Scala). In this implementation there's no need
* to do anything with the result
*
* @param result
* @return
*/
override def touching(result: SimpleResult): Future[SimpleResult] = {
Future.successful(result)
}
/**
* Adds a touched authenticator to the result(for Java). In this implementation there's no need
* to do anything with the result
*
* @param javaContext the current invocation context
*/
def touching(javaContext: play.mvc.Http.Context): Future[Unit] = {
Future.successful(())
}
/**
* Ends an authenticator session. This is invoked when the user logs out or if the
* authenticator becomes invalid (maybe due to a timeout)
*
* @param result the result that is about to be sent to the client.
* @return the result modified to signal the authenticator is no longer valid
*/
override def discarding(result: SimpleResult): Future[SimpleResult] = {
import ExecutionContext.Implicits.global
store.delete(id).map { _ => result }
}
/**
* Ends an authenticator session. This is invoked when the authenticator becomes invalid (for Java actions)
*
* @param javaContext the current http context
* @return the current http context modified to signal the authenticator is no longer valid
*/
override def discarding(javaContext: play.mvc.Http.Context): Future[Unit] = {
import ExecutionContext.Implicits.global
store.delete(id).map { _ => () }
}
} | matthewchartier/securesocial | module-code/app/securesocial/core/authenticator/StoreBackedAuthenticator.scala | Scala | apache-2.0 | 4,892 |
package org.genivi.sota.core.jsonrpc
/**
* Trait including utilities for generating randomr JSON values
* Used by property-based tests
*/
trait JsonGen {
import io.circe.Json
import org.scalacheck.Arbitrary
import org.scalacheck.Gen
val JBooleanGen : Gen[Json] = Gen.oneOf(true, false).map( Json.fromBoolean )
val JStrGen : Gen[Json] = Gen.identifier.map( Json.fromString )
val JNumGen : Gen[Json] = Arbitrary.arbInt.arbitrary.map( Json.fromInt )
val JNullGen: Gen[Json] = Gen.const( Json.Null )
val JsonGen : Gen[Json] = Gen.oneOf( JBooleanGen, JStrGen, JNumGen, JNumGen)
implicit val arbJson : Arbitrary[Json] = Arbitrary( JsonGen )
}
object JsonGen extends JsonGen
| PDXostc/rvi_sota_server | core/src/test/scala/org/genivi/sota/core/jsonrpc/JsonGen.scala | Scala | mpl-2.0 | 696 |
/*--------------------------------------------------------------------------
* Copyright 2013 Taro L. Saito
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*--------------------------------------------------------------------------*/
//--------------------------------------
//
// LArrayLoaderTest.scala
// Since: 2013/03/29 9:56
//
//--------------------------------------
package xerial.larray.impl
import java.net.{URL, URLClassLoader}
import java.io.File
import xerial.larray.LArraySpec
/**
* @author Taro L. Saito
*/
class LArrayLoaderTest extends LArraySpec {
"LArrayLoader" should {
"use a different library name for each class loader" in {
val larrayNativeCls = "xerial.larray.impl.LArrayNative"
val cl = Thread.currentThread().getContextClassLoader
val parentCl = cl.getParent.getParent // Fix for sbt-0.13
debug(s"context cl: ${cl}, parent cl: $parentCl")
val classPath = Array(new File("larray-mmap/target/classes").toURI.toURL)
val cl1 = new URLClassLoader(classPath, parentCl)
val cl2 = new URLClassLoader(classPath, parentCl)
import java.{lang=>jl}
val nativeCls1 = cl1.loadClass(larrayNativeCls)
val ni1 = nativeCls1.newInstance()
val arr1 = Array.ofDim[Byte](100)
val m1 = nativeCls1.getDeclaredMethod("copyToArray", jl.Long.TYPE, classOf[AnyRef], jl.Integer.TYPE, jl.Integer.TYPE)
m1.invoke(ni1, Seq.apply[AnyRef](new jl.Long(0L), arr1, new jl.Integer(0), new jl.Integer(0)):_*)
val nativeCls1_2 = cl1.loadClass(larrayNativeCls)
val nativeCls2 = cl2.loadClass(larrayNativeCls)
val ni2 = nativeCls2.newInstance()
val arr2 = Array.ofDim[Byte](100)
val m2 = nativeCls2.getDeclaredMethod("copyToArray", jl.Long.TYPE, classOf[AnyRef], jl.Integer.TYPE, jl.Integer.TYPE)
m2.invoke(ni1, Seq.apply[AnyRef](new jl.Long(0L), arr2, new jl.Integer(0), new jl.Integer(0)):_*)
nativeCls1 should not be (nativeCls2)
nativeCls1 should be (nativeCls1_2)
val arr3 = Array.ofDim[Byte](100)
LArrayNative.copyToArray(0, arr3, 0, 0)
}
}
} | xerial/larray | larray-mmap/src/test/scala/xerial/larray/impl/LArrayLoaderTest.scala | Scala | apache-2.0 | 2,693 |
import scala.tools.partest.SessionTest
object Test extends SessionTest {
override def stripMargins = false
def session =
"""
scala> object Y { def f[A](a: => A) = 1 ; def f[A](a: => Either[Exception, A]) = 2 }
<console>:11: error: double definition:
def f[A](a: => A): Int at line 11 and
def f[A](a: => Either[Exception,A]): Int at line 11
have same type after erasure: (a: Function0)Int
object Y { def f[A](a: => A) = 1 ; def f[A](a: => Either[Exception, A]) = 2 }
^
scala> object Y { def f[A](a: => A) = 1 ; def f[A](a: => Either[Exception, A]) = 2 }
<console>:11: error: double definition:
def f[A](a: => A): Int at line 11 and
def f[A](a: => Either[Exception,A]): Int at line 11
have same type after erasure: (a: Function0)Int
object Y { def f[A](a: => A) = 1 ; def f[A](a: => Either[Exception, A]) = 2 }
^
scala> object Y {
| def f[A](a: => A) = 1
| def f[A](a: => Either[Exception, A]) = 2
| }
<console>:13: error: double definition:
def f[A](a: => A): Int at line 12 and
def f[A](a: => Either[Exception,A]): Int at line 13
have same type after erasure: (a: Function0)Int
def f[A](a: => Either[Exception, A]) = 2
^
scala> :pa
// Entering paste mode (ctrl-D to finish)
object Y {
def f[A](a: => A) = 1
def f[A](a: => Either[Exception, A]) = 2
}
// Exiting paste mode, now interpreting.
<pastie>:13: error: double definition:
def f[A](a: => A): Int at line 12 and
def f[A](a: => Either[Exception,A]): Int at line 13
have same type after erasure: (a: Function0)Int
def f[A](a: => Either[Exception, A]) = 2
^
scala> :quit"""
}
| shimib/scala | test/files/run/t9170.scala | Scala | bsd-3-clause | 1,706 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package generic
import scala.collection._
import mutable.Builder
/** @define coll collection
* @define Coll `Traversable`
* @define factoryInfo
* This object provides a set of operations to create `$Coll` values.
* @author Martin Odersky
* @version 2.8
* @define canBuildFromInfo
* The standard `CanBuildFrom` instance for $Coll objects.
* @see CanBuildFrom
* @define bitsetCanBuildFrom
* The standard `CanBuildFrom` instance for bitsets.
*/
trait BitSetFactory[Coll <: BitSet with BitSetLike[Coll]] {
def empty: Coll
def newBuilder: Builder[Int, Coll]
def apply(elems: Int*): Coll = (empty /: elems) (_ + _)
def bitsetCanBuildFrom = new CanBuildFrom[Coll, Int, Coll] {
def apply(from: Coll) = newBuilder
def apply() = newBuilder
}
}
| felixmulder/scala | src/library/scala/collection/generic/BitSetFactory.scala | Scala | bsd-3-clause | 1,349 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.lang.reflect.Modifier
import scala.reflect.{classTag, ClassTag}
import scala.reflect.runtime.universe.TypeTag
import org.apache.spark.annotation.Experimental
import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
import org.apache.spark.sql.catalyst.expressions.{BoundReference, Cast}
import org.apache.spark.sql.catalyst.expressions.objects.{DecodeUsingSerializer, EncodeUsingSerializer}
import org.apache.spark.sql.types._
/**
* :: Experimental ::
* Methods for creating an [[Encoder]].
*
* @since 1.6.0
*/
@Experimental
object Encoders {
/**
* An encoder for nullable boolean type.
* The Scala primitive encoder is available as [[scalaBoolean]].
* @since 1.6.0
*/
def BOOLEAN: Encoder[java.lang.Boolean] = ExpressionEncoder()
/**
* An encoder for nullable byte type.
* The Scala primitive encoder is available as [[scalaByte]].
* @since 1.6.0
*/
def BYTE: Encoder[java.lang.Byte] = ExpressionEncoder()
/**
* An encoder for nullable short type.
* The Scala primitive encoder is available as [[scalaShort]].
* @since 1.6.0
*/
def SHORT: Encoder[java.lang.Short] = ExpressionEncoder()
/**
* An encoder for nullable int type.
* The Scala primitive encoder is available as [[scalaInt]].
* @since 1.6.0
*/
def INT: Encoder[java.lang.Integer] = ExpressionEncoder()
/**
* An encoder for nullable long type.
* The Scala primitive encoder is available as [[scalaLong]].
* @since 1.6.0
*/
def LONG: Encoder[java.lang.Long] = ExpressionEncoder()
/**
* An encoder for nullable float type.
* The Scala primitive encoder is available as [[scalaFloat]].
* @since 1.6.0
*/
def FLOAT: Encoder[java.lang.Float] = ExpressionEncoder()
/**
* An encoder for nullable double type.
* The Scala primitive encoder is available as [[scalaDouble]].
* @since 1.6.0
*/
def DOUBLE: Encoder[java.lang.Double] = ExpressionEncoder()
/**
* An encoder for nullable string type.
*
* @since 1.6.0
*/
def STRING: Encoder[java.lang.String] = ExpressionEncoder()
/**
* An encoder for nullable decimal type.
*
* @since 1.6.0
*/
def DECIMAL: Encoder[java.math.BigDecimal] = ExpressionEncoder()
/**
* An encoder for nullable date type.
*
* @since 1.6.0
*/
def DATE: Encoder[java.sql.Date] = ExpressionEncoder()
/**
* An encoder for nullable timestamp type.
*
* @since 1.6.0
*/
def TIMESTAMP: Encoder[java.sql.Timestamp] = ExpressionEncoder()
/**
* An encoder for arrays of bytes.
*
* @since 1.6.1
*/
def BINARY: Encoder[Array[Byte]] = ExpressionEncoder()
/**
* Creates an encoder for Java Bean of type T.
*
* T must be publicly accessible.
*
* supported types for java bean field:
* - primitive types: boolean, int, double, etc.
* - boxed types: Boolean, Integer, Double, etc.
* - String
* - java.math.BigDecimal
* - time related: java.sql.Date, java.sql.Timestamp
* - collection types: only array and java.util.List currently, map support is in progress
* - nested java bean.
*
* @since 1.6.0
*/
def bean[T](beanClass: Class[T]): Encoder[T] = ExpressionEncoder.javaBean(beanClass)
/**
* (Scala-specific) Creates an encoder that serializes objects of type T using Kryo.
* This encoder maps T into a single byte array (binary) field.
*
* T must be publicly accessible.
*
* @since 1.6.0
*/
def kryo[T: ClassTag]: Encoder[T] = genericSerializer(useKryo = true)
/**
* Creates an encoder that serializes objects of type T using Kryo.
* This encoder maps T into a single byte array (binary) field.
*
* T must be publicly accessible.
*
* @since 1.6.0
*/
def kryo[T](clazz: Class[T]): Encoder[T] = kryo(ClassTag[T](clazz))
/**
* (Scala-specific) Creates an encoder that serializes objects of type T using generic Java
* serialization. This encoder maps T into a single byte array (binary) field.
*
* Note that this is extremely inefficient and should only be used as the last resort.
*
* T must be publicly accessible.
*
* @since 1.6.0
*/
def javaSerialization[T: ClassTag]: Encoder[T] = genericSerializer(useKryo = false)
/**
* Creates an encoder that serializes objects of type T using generic Java serialization.
* This encoder maps T into a single byte array (binary) field.
*
* Note that this is extremely inefficient and should only be used as the last resort.
*
* T must be publicly accessible.
*
* @since 1.6.0
*/
def javaSerialization[T](clazz: Class[T]): Encoder[T] = javaSerialization(ClassTag[T](clazz))
/** Throws an exception if T is not a public class. */
private def validatePublicClass[T: ClassTag](): Unit = {
if (!Modifier.isPublic(classTag[T].runtimeClass.getModifiers)) {
throw new UnsupportedOperationException(
s"${classTag[T].runtimeClass.getName} is not a public class. " +
"Only public classes are supported.")
}
}
/** A way to construct encoders using generic serializers. */
private def genericSerializer[T: ClassTag](useKryo: Boolean): Encoder[T] = {
if (classTag[T].runtimeClass.isPrimitive) {
throw new UnsupportedOperationException("Primitive types are not supported.")
}
validatePublicClass[T]()
ExpressionEncoder[T](
schema = new StructType().add("value", BinaryType),
flat = true,
serializer = Seq(
EncodeUsingSerializer(
BoundReference(0, ObjectType(classOf[AnyRef]), nullable = true), kryo = useKryo)),
deserializer =
DecodeUsingSerializer[T](
Cast(GetColumnByOrdinal(0, BinaryType), BinaryType),
classTag[T],
kryo = useKryo),
clsTag = classTag[T]
)
}
/**
* An encoder for 2-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2](
e1: Encoder[T1],
e2: Encoder[T2]): Encoder[(T1, T2)] = {
ExpressionEncoder.tuple(encoderFor(e1), encoderFor(e2))
}
/**
* An encoder for 3-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2, T3](
e1: Encoder[T1],
e2: Encoder[T2],
e3: Encoder[T3]): Encoder[(T1, T2, T3)] = {
ExpressionEncoder.tuple(encoderFor(e1), encoderFor(e2), encoderFor(e3))
}
/**
* An encoder for 4-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2, T3, T4](
e1: Encoder[T1],
e2: Encoder[T2],
e3: Encoder[T3],
e4: Encoder[T4]): Encoder[(T1, T2, T3, T4)] = {
ExpressionEncoder.tuple(encoderFor(e1), encoderFor(e2), encoderFor(e3), encoderFor(e4))
}
/**
* An encoder for 5-ary tuples.
*
* @since 1.6.0
*/
def tuple[T1, T2, T3, T4, T5](
e1: Encoder[T1],
e2: Encoder[T2],
e3: Encoder[T3],
e4: Encoder[T4],
e5: Encoder[T5]): Encoder[(T1, T2, T3, T4, T5)] = {
ExpressionEncoder.tuple(
encoderFor(e1), encoderFor(e2), encoderFor(e3), encoderFor(e4), encoderFor(e5))
}
/**
* An encoder for Scala's product type (tuples, case classes, etc).
* @since 2.0.0
*/
def product[T <: Product : TypeTag]: Encoder[T] = ExpressionEncoder()
/**
* An encoder for Scala's primitive int type.
* @since 2.0.0
*/
def scalaInt: Encoder[Int] = ExpressionEncoder()
/**
* An encoder for Scala's primitive long type.
* @since 2.0.0
*/
def scalaLong: Encoder[Long] = ExpressionEncoder()
/**
* An encoder for Scala's primitive double type.
* @since 2.0.0
*/
def scalaDouble: Encoder[Double] = ExpressionEncoder()
/**
* An encoder for Scala's primitive float type.
* @since 2.0.0
*/
def scalaFloat: Encoder[Float] = ExpressionEncoder()
/**
* An encoder for Scala's primitive byte type.
* @since 2.0.0
*/
def scalaByte: Encoder[Byte] = ExpressionEncoder()
/**
* An encoder for Scala's primitive short type.
* @since 2.0.0
*/
def scalaShort: Encoder[Short] = ExpressionEncoder()
/**
* An encoder for Scala's primitive boolean type.
* @since 2.0.0
*/
def scalaBoolean: Encoder[Boolean] = ExpressionEncoder()
}
| gioenn/xSpark | sql/catalyst/src/main/scala/org/apache/spark/sql/Encoders.scala | Scala | apache-2.0 | 8,998 |
// // This test doesn't do anything. It is to test if the Pi Worker does what is suppose to do.
// package pi
// import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
// import akka.bita.RandomScheduleHelper
// import akka.bita.pattern.Patterns._
// import akka.util.duration._
// import org.scalatest._
// import akka.testkit.TestProbe
// import util.BitaTests
// class PiWorkerSpec extends BitaTests {
// // The name of this test battery
// override def name = "pi"
// // This will hold the actor/testcase/application under test
// def run {
// system = ActorSystem("ActorSystem")
// if (random) {
// RandomScheduleHelper.setMaxDelay(250) // Increase the delay between messages to 250 ms
// RandomScheduleHelper.setSystem(system)
// }
// try {
// val probe = new TestProbe(system) // Use a testprobe to represent the tests.
// val worker = system.actorOf(Worker())
// probe.send(worker, Work(2000, 1000)) // Ask the result
// val result = probe.expectMsgType[Result](timeout.duration)
// if (result == Result(1.6666664467593578E-4)) {
// println(Console.GREEN + Console.BOLD+"**SUCCESS**"+Console.RESET)
// bugDetected = false
// } else {
// println(Console.RED + Console.BOLD+"**FAILURE**"+Console.RESET)
// bugDetected = true
// }
// } catch {
// case e: AssertionError => {
// bugDetected = true
// println(Console.YELLOW + Console.BOLD+"**WARNING** %s".format(e.getMessage()) + Console.RESET)
// }
// case e: java.util.concurrent.TimeoutException => {
// bugDetected = true
// println(Console.YELLOW + Console.BOLD+"**WARNING** %s".format(e.getMessage()) + Console.RESET)
// }
// }
// }
// } | Tjoene/thesis | benchmark/src/test/scala/pi/PiWorkerSpec.scala | Scala | gpl-2.0 | 1,965 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.util.Locale
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, GlobalTempView, LocalTempView, PersistedView, UnresolvedAttribute, UnresolvedNamespace, UnresolvedRelation, UnresolvedStar, UnresolvedTable, UnresolvedTableOrView}
import org.apache.spark.sql.catalyst.catalog.{ArchiveResource, BucketSpec, FileResource, FunctionResource, FunctionResourceType, JarResource}
import org.apache.spark.sql.catalyst.expressions.{EqualTo, Literal}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.connector.catalog.TableChange.ColumnPosition.{after, first}
import org.apache.spark.sql.connector.expressions.{ApplyTransform, BucketTransform, DaysTransform, FieldReference, HoursTransform, IdentityTransform, LiteralValue, MonthsTransform, Transform, YearsTransform}
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType, TimestampType}
import org.apache.spark.unsafe.types.UTF8String
class DDLParserSuite extends AnalysisTest {
import CatalystSqlParser._
private def assertUnsupported(sql: String, containsThesePhrases: Seq[String] = Seq()): Unit = {
val e = intercept[ParseException] {
parsePlan(sql)
}
assert(e.getMessage.toLowerCase(Locale.ROOT).contains("operation not allowed"))
containsThesePhrases.foreach { p =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(p.toLowerCase(Locale.ROOT)))
}
}
private def intercept(sqlCommand: String, messages: String*): Unit =
interceptParseException(parsePlan)(sqlCommand, messages: _*)
private def parseCompare(sql: String, expected: LogicalPlan): Unit = {
comparePlans(parsePlan(sql), expected, checkAnalysis = false)
}
test("SPARK-30098: create table without provider should " +
"use default data source under non-legacy mode") {
val createSql = "CREATE TABLE my_tab(a INT COMMENT 'test', b STRING)"
val defaultProvider = conf.defaultDataSourceName
val expectedPlan = CreateTableStatement(
Seq("my_tab"),
new StructType()
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType),
Seq.empty[Transform],
None,
Map.empty[String, String],
defaultProvider,
Map.empty[String, String],
None,
None,
false)
parseCompare(createSql, expectedPlan)
}
test("create/replace table using - schema") {
val createSql = "CREATE TABLE my_tab(a INT COMMENT 'test', b STRING NOT NULL) USING parquet"
val replaceSql = "REPLACE TABLE my_tab(a INT COMMENT 'test', b STRING NOT NULL) USING parquet"
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType()
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType, nullable = false)),
Seq.empty[Transform],
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
intercept("CREATE TABLE my_tab(a: INT COMMENT 'test', b: STRING) USING parquet",
"no viable alternative at input")
}
test("create/replace table - with IF NOT EXISTS") {
val sql = "CREATE TABLE IF NOT EXISTS my_tab(a INT, b STRING) USING parquet"
testCreateOrReplaceDdl(
sql,
TableSpec(
Seq("my_tab"),
Some(new StructType().add("a", IntegerType).add("b", StringType)),
Seq.empty[Transform],
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
None),
expectedIfNotExists = true)
}
test("create/replace table - with partitioned by") {
val createSql = "CREATE TABLE my_tab(a INT comment 'test', b STRING) " +
"USING parquet PARTITIONED BY (a)"
val replaceSql = "REPLACE TABLE my_tab(a INT comment 'test', b STRING) " +
"USING parquet PARTITIONED BY (a)"
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType()
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)),
Seq(IdentityTransform(FieldReference("a"))),
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("create/replace table - partitioned by transforms") {
val createSql =
"""
|CREATE TABLE my_tab (a INT, b STRING, ts TIMESTAMP) USING parquet
|PARTITIONED BY (
| a,
| bucket(16, b),
| years(ts),
| months(ts),
| days(ts),
| hours(ts),
| foo(a, "bar", 34))
""".stripMargin
val replaceSql =
"""
|REPLACE TABLE my_tab (a INT, b STRING, ts TIMESTAMP) USING parquet
|PARTITIONED BY (
| a,
| bucket(16, b),
| years(ts),
| months(ts),
| days(ts),
| hours(ts),
| foo(a, "bar", 34))
""".stripMargin
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType()
.add("a", IntegerType)
.add("b", StringType)
.add("ts", TimestampType)),
Seq(
IdentityTransform(FieldReference("a")),
BucketTransform(LiteralValue(16, IntegerType), Seq(FieldReference("b"))),
YearsTransform(FieldReference("ts")),
MonthsTransform(FieldReference("ts")),
DaysTransform(FieldReference("ts")),
HoursTransform(FieldReference("ts")),
ApplyTransform("foo", Seq(
FieldReference("a"),
LiteralValue(UTF8String.fromString("bar"), StringType),
LiteralValue(34, IntegerType)))),
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("create/replace table - with bucket") {
val createSql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet " +
"CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS"
val replaceSql = "REPLACE TABLE my_tab(a INT, b STRING) USING parquet " +
"CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS"
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType().add("a", IntegerType).add("b", StringType)),
Seq.empty[Transform],
Some(BucketSpec(5, Seq("a"), Seq("b"))),
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("create/replace table - with comment") {
val createSql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet COMMENT 'abc'"
val replaceSql = "REPLACE TABLE my_tab(a INT, b STRING) USING parquet COMMENT 'abc'"
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType().add("a", IntegerType).add("b", StringType)),
Seq.empty[Transform],
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
Some("abc"))
Seq(createSql, replaceSql).foreach{ sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("create/replace table - with table properties") {
val createSql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet" +
" TBLPROPERTIES('test' = 'test')"
val replaceSql = "REPLACE TABLE my_tab(a INT, b STRING) USING parquet" +
" TBLPROPERTIES('test' = 'test')"
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType().add("a", IntegerType).add("b", StringType)),
Seq.empty[Transform],
None,
Map("test" -> "test"),
"parquet",
Map.empty[String, String],
None,
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("create/replace table - with location") {
val createSql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet LOCATION '/tmp/file'"
val replaceSql = "REPLACE TABLE my_tab(a INT, b STRING) USING parquet LOCATION '/tmp/file'"
val expectedTableSpec = TableSpec(
Seq("my_tab"),
Some(new StructType().add("a", IntegerType).add("b", StringType)),
Seq.empty[Transform],
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
Some("/tmp/file"),
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("create/replace table - byte length literal table name") {
val createSql = "CREATE TABLE 1m.2g(a INT) USING parquet"
val replaceSql = "REPLACE TABLE 1m.2g(a INT) USING parquet"
val expectedTableSpec = TableSpec(
Seq("1m", "2g"),
Some(new StructType().add("a", IntegerType)),
Seq.empty[Transform],
None,
Map.empty[String, String],
"parquet",
Map.empty[String, String],
None,
None)
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
}
}
test("Duplicate clauses - create/replace table") {
def createTableHeader(duplicateClause: String): String = {
s"CREATE TABLE my_tab(a INT, b STRING) USING parquet $duplicateClause $duplicateClause"
}
def replaceTableHeader(duplicateClause: String): String = {
s"CREATE TABLE my_tab(a INT, b STRING) USING parquet $duplicateClause $duplicateClause"
}
intercept(createTableHeader("TBLPROPERTIES('test' = 'test2')"),
"Found duplicate clauses: TBLPROPERTIES")
intercept(createTableHeader("LOCATION '/tmp/file'"),
"Found duplicate clauses: LOCATION")
intercept(createTableHeader("COMMENT 'a table'"),
"Found duplicate clauses: COMMENT")
intercept(createTableHeader("CLUSTERED BY(b) INTO 256 BUCKETS"),
"Found duplicate clauses: CLUSTERED BY")
intercept(createTableHeader("PARTITIONED BY (b)"),
"Found duplicate clauses: PARTITIONED BY")
intercept(replaceTableHeader("TBLPROPERTIES('test' = 'test2')"),
"Found duplicate clauses: TBLPROPERTIES")
intercept(replaceTableHeader("LOCATION '/tmp/file'"),
"Found duplicate clauses: LOCATION")
intercept(replaceTableHeader("COMMENT 'a table'"),
"Found duplicate clauses: COMMENT")
intercept(replaceTableHeader("CLUSTERED BY(b) INTO 256 BUCKETS"),
"Found duplicate clauses: CLUSTERED BY")
intercept(replaceTableHeader("PARTITIONED BY (b)"),
"Found duplicate clauses: PARTITIONED BY")
}
test("support for other types in OPTIONS") {
val createSql =
"""
|CREATE TABLE table_name USING json
|OPTIONS (a 1, b 0.1, c TRUE)
""".stripMargin
val replaceSql =
"""
|REPLACE TABLE table_name USING json
|OPTIONS (a 1, b 0.1, c TRUE)
""".stripMargin
Seq(createSql, replaceSql).foreach { sql =>
testCreateOrReplaceDdl(
sql,
TableSpec(
Seq("table_name"),
Some(new StructType),
Seq.empty[Transform],
Option.empty[BucketSpec],
Map.empty[String, String],
"json",
Map("a" -> "1", "b" -> "0.1", "c" -> "true"),
None,
None),
expectedIfNotExists = false)
}
}
test("Test CTAS against native tables") {
val s1 =
"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING parquet
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val s2 =
"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING parquet
|LOCATION '/user/external/page_view'
|COMMENT 'This is the staging page view table'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val s3 =
"""
|CREATE TABLE IF NOT EXISTS mydb.page_view
|USING parquet
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val s4 =
"""
|REPLACE TABLE mydb.page_view
|USING parquet
|COMMENT 'This is the staging page view table'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src
""".stripMargin
val expectedTableSpec = TableSpec(
Seq("mydb", "page_view"),
None,
Seq.empty[Transform],
None,
Map("p1" -> "v1", "p2" -> "v2"),
"parquet",
Map.empty[String, String],
Some("/user/external/page_view"),
Some("This is the staging page view table"))
Seq(s1, s2, s3, s4).foreach { sql =>
testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = true)
}
}
test("drop table") {
parseCompare("DROP TABLE testcat.ns1.ns2.tbl",
DropTableStatement(Seq("testcat", "ns1", "ns2", "tbl"), ifExists = false, purge = false))
parseCompare(s"DROP TABLE db.tab",
DropTableStatement(Seq("db", "tab"), ifExists = false, purge = false))
parseCompare(s"DROP TABLE IF EXISTS db.tab",
DropTableStatement(Seq("db", "tab"), ifExists = true, purge = false))
parseCompare(s"DROP TABLE tab",
DropTableStatement(Seq("tab"), ifExists = false, purge = false))
parseCompare(s"DROP TABLE IF EXISTS tab",
DropTableStatement(Seq("tab"), ifExists = true, purge = false))
parseCompare(s"DROP TABLE tab PURGE",
DropTableStatement(Seq("tab"), ifExists = false, purge = true))
parseCompare(s"DROP TABLE IF EXISTS tab PURGE",
DropTableStatement(Seq("tab"), ifExists = true, purge = true))
}
test("drop view") {
parseCompare(s"DROP VIEW testcat.db.view",
DropViewStatement(Seq("testcat", "db", "view"), ifExists = false))
parseCompare(s"DROP VIEW db.view", DropViewStatement(Seq("db", "view"), ifExists = false))
parseCompare(s"DROP VIEW IF EXISTS db.view",
DropViewStatement(Seq("db", "view"), ifExists = true))
parseCompare(s"DROP VIEW view", DropViewStatement(Seq("view"), ifExists = false))
parseCompare(s"DROP VIEW IF EXISTS view", DropViewStatement(Seq("view"), ifExists = true))
}
private def testCreateOrReplaceDdl(
sqlStatement: String,
tableSpec: TableSpec,
expectedIfNotExists: Boolean): Unit = {
val parsedPlan = parsePlan(sqlStatement)
val newTableToken = sqlStatement.split(" ")(0).trim.toUpperCase(Locale.ROOT)
parsedPlan match {
case create: CreateTableStatement if newTableToken == "CREATE" =>
assert(create.ifNotExists == expectedIfNotExists)
case ctas: CreateTableAsSelectStatement if newTableToken == "CREATE" =>
assert(ctas.ifNotExists == expectedIfNotExists)
case replace: ReplaceTableStatement if newTableToken == "REPLACE" =>
case replace: ReplaceTableAsSelectStatement if newTableToken == "REPLACE" =>
case other =>
fail("First token in statement does not match the expected parsed plan; CREATE TABLE" +
" should create a CreateTableStatement, and REPLACE TABLE should create a" +
s" ReplaceTableStatement. Statement: $sqlStatement, plan type:" +
s" ${parsedPlan.getClass.getName}.")
}
assert(TableSpec(parsedPlan) === tableSpec)
}
// ALTER VIEW view_name SET TBLPROPERTIES ('comment' = new_comment);
// ALTER VIEW view_name UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
test("alter view: alter view properties") {
val sql1_view = "ALTER VIEW table_name SET TBLPROPERTIES ('test' = 'test', " +
"'comment' = 'new_comment')"
val sql2_view = "ALTER VIEW table_name UNSET TBLPROPERTIES ('comment', 'test')"
val sql3_view = "ALTER VIEW table_name UNSET TBLPROPERTIES IF EXISTS ('comment', 'test')"
comparePlans(parsePlan(sql1_view),
AlterViewSetPropertiesStatement(
Seq("table_name"), Map("test" -> "test", "comment" -> "new_comment")))
comparePlans(parsePlan(sql2_view),
AlterViewUnsetPropertiesStatement(
Seq("table_name"), Seq("comment", "test"), ifExists = false))
comparePlans(parsePlan(sql3_view),
AlterViewUnsetPropertiesStatement(
Seq("table_name"), Seq("comment", "test"), ifExists = true))
}
// ALTER TABLE table_name SET TBLPROPERTIES ('comment' = new_comment);
// ALTER TABLE table_name UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
test("alter table: alter table properties") {
val sql1_table = "ALTER TABLE table_name SET TBLPROPERTIES ('test' = 'test', " +
"'comment' = 'new_comment')"
val sql2_table = "ALTER TABLE table_name UNSET TBLPROPERTIES ('comment', 'test')"
val sql3_table = "ALTER TABLE table_name UNSET TBLPROPERTIES IF EXISTS ('comment', 'test')"
comparePlans(
parsePlan(sql1_table),
AlterTableSetPropertiesStatement(
Seq("table_name"), Map("test" -> "test", "comment" -> "new_comment")))
comparePlans(
parsePlan(sql2_table),
AlterTableUnsetPropertiesStatement(
Seq("table_name"), Seq("comment", "test"), ifExists = false))
comparePlans(
parsePlan(sql3_table),
AlterTableUnsetPropertiesStatement(
Seq("table_name"), Seq("comment", "test"), ifExists = true))
}
test("alter table: add column") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x int"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, None, None)
)))
}
test("alter table: add multiple columns") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMNS x int, y string"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, None, None),
QualifiedColType(Seq("y"), StringType, true, None, None)
)))
}
test("alter table: add column with COLUMNS") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMNS x int"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, None, None)
)))
}
test("alter table: add column with COLUMNS (...)") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMNS (x int)"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, None, None)
)))
}
test("alter table: add column with COLUMNS (...) and COMMENT") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMNS (x int COMMENT 'doc')"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, Some("doc"), None)
)))
}
test("alter table: add non-nullable column") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x int NOT NULL"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, false, None, None)
)))
}
test("alter table: add column with COMMENT") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x int COMMENT 'doc'"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, Some("doc"), None)
)))
}
test("alter table: add column with position") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x int FIRST"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, None, Some(first()))
)))
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x int AFTER y"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x"), IntegerType, true, None, Some(after("y")))
)))
}
test("alter table: add column with nested column name") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x.y.z int COMMENT 'doc'"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x", "y", "z"), IntegerType, true, Some("doc"), None)
)))
}
test("alter table: add multiple columns with nested column name") {
comparePlans(
parsePlan("ALTER TABLE table_name ADD COLUMN x.y.z int COMMENT 'doc', a.b string FIRST"),
AlterTableAddColumnsStatement(Seq("table_name"), Seq(
QualifiedColType(Seq("x", "y", "z"), IntegerType, true, Some("doc"), None),
QualifiedColType(Seq("a", "b"), StringType, true, None, Some(first()))
)))
}
test("alter table: set location") {
comparePlans(
parsePlan("ALTER TABLE a.b.c SET LOCATION 'new location'"),
AlterTableSetLocationStatement(Seq("a", "b", "c"), None, "new location"))
comparePlans(
parsePlan("ALTER TABLE a.b.c PARTITION(ds='2017-06-10') SET LOCATION 'new location'"),
AlterTableSetLocationStatement(
Seq("a", "b", "c"),
Some(Map("ds" -> "2017-06-10")),
"new location"))
}
test("alter table: rename column") {
comparePlans(
parsePlan("ALTER TABLE table_name RENAME COLUMN a.b.c TO d"),
AlterTableRenameColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
"d"))
}
test("alter table: update column type using ALTER") {
comparePlans(
parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c TYPE bigint"),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
Some(LongType),
None,
None,
None))
}
test("alter table: update column type") {
comparePlans(
parsePlan("ALTER TABLE table_name CHANGE COLUMN a.b.c TYPE bigint"),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
Some(LongType),
None,
None,
None))
}
test("alter table: update column comment") {
comparePlans(
parsePlan("ALTER TABLE table_name CHANGE COLUMN a.b.c COMMENT 'new comment'"),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
None,
None,
Some("new comment"),
None))
}
test("alter table: update column position") {
comparePlans(
parsePlan("ALTER TABLE table_name CHANGE COLUMN a.b.c FIRST"),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
None,
None,
None,
Some(first())))
}
test("alter table: mutiple property changes are not allowed") {
intercept[ParseException] {
parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c " +
"TYPE bigint COMMENT 'new comment'")}
intercept[ParseException] {
parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c " +
"TYPE bigint COMMENT AFTER d")}
intercept[ParseException] {
parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c " +
"TYPE bigint COMMENT 'new comment' AFTER d")}
}
test("alter table: SET/DROP NOT NULL") {
comparePlans(
parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c SET NOT NULL"),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
None,
Some(false),
None,
None))
comparePlans(
parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c DROP NOT NULL"),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
None,
Some(true),
None,
None))
}
test("alter table: drop column") {
comparePlans(
parsePlan("ALTER TABLE table_name DROP COLUMN a.b.c"),
AlterTableDropColumnsStatement(Seq("table_name"), Seq(Seq("a", "b", "c"))))
}
test("alter table: drop multiple columns") {
val sql = "ALTER TABLE table_name DROP COLUMN x, y, a.b.c"
Seq(sql, sql.replace("COLUMN", "COLUMNS")).foreach { drop =>
comparePlans(
parsePlan(drop),
AlterTableDropColumnsStatement(
Seq("table_name"),
Seq(Seq("x"), Seq("y"), Seq("a", "b", "c"))))
}
}
test("alter table: hive style change column") {
val sql1 = "ALTER TABLE table_name CHANGE COLUMN a.b.c c INT"
val sql2 = "ALTER TABLE table_name CHANGE COLUMN a.b.c c INT COMMENT 'new_comment'"
val sql3 = "ALTER TABLE table_name CHANGE COLUMN a.b.c c INT AFTER other_col"
comparePlans(
parsePlan(sql1),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
Some(IntegerType),
None,
None,
None))
comparePlans(
parsePlan(sql2),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
Some(IntegerType),
None,
Some("new_comment"),
None))
comparePlans(
parsePlan(sql3),
AlterTableAlterColumnStatement(
Seq("table_name"),
Seq("a", "b", "c"),
Some(IntegerType),
None,
None,
Some(after("other_col"))))
// renaming column not supported in hive style ALTER COLUMN.
intercept("ALTER TABLE table_name CHANGE COLUMN a.b.c new_name INT",
"please run RENAME COLUMN instead")
// ALTER COLUMN for a partition is not supported.
intercept("ALTER TABLE table_name PARTITION (a='1') CHANGE COLUMN a.b.c c INT")
}
test("alter table: hive style replace columns") {
val sql1 = "ALTER TABLE table_name REPLACE COLUMNS (x string)"
val sql2 = "ALTER TABLE table_name REPLACE COLUMNS (x string COMMENT 'x1')"
val sql3 = "ALTER TABLE table_name REPLACE COLUMNS (x string COMMENT 'x1', y int)"
val sql4 = "ALTER TABLE table_name REPLACE COLUMNS (x string COMMENT 'x1', y int COMMENT 'y1')"
comparePlans(
parsePlan(sql1),
AlterTableReplaceColumnsStatement(
Seq("table_name"),
Seq(QualifiedColType(Seq("x"), StringType, true, None, None))))
comparePlans(
parsePlan(sql2),
AlterTableReplaceColumnsStatement(
Seq("table_name"),
Seq(QualifiedColType(Seq("x"), StringType, true, Some("x1"), None))))
comparePlans(
parsePlan(sql3),
AlterTableReplaceColumnsStatement(
Seq("table_name"),
Seq(
QualifiedColType(Seq("x"), StringType, true, Some("x1"), None),
QualifiedColType(Seq("y"), IntegerType, true, None, None)
)))
comparePlans(
parsePlan(sql4),
AlterTableReplaceColumnsStatement(
Seq("table_name"),
Seq(
QualifiedColType(Seq("x"), StringType, true, Some("x1"), None),
QualifiedColType(Seq("y"), IntegerType, true, Some("y1"), None)
)))
intercept("ALTER TABLE table_name PARTITION (a='1') REPLACE COLUMNS (x string)",
"Operation not allowed: ALTER TABLE table PARTITION partition_spec REPLACE COLUMNS")
intercept("ALTER TABLE table_name REPLACE COLUMNS (x string NOT NULL)",
"NOT NULL is not supported in Hive-style REPLACE COLUMNS")
intercept("ALTER TABLE table_name REPLACE COLUMNS (x string FIRST)",
"Column position is not supported in Hive-style REPLACE COLUMNS")
}
test("alter table/view: rename table/view") {
comparePlans(
parsePlan("ALTER TABLE a.b.c RENAME TO x.y.z"),
RenameTableStatement(Seq("a", "b", "c"), Seq("x", "y", "z"), isView = false))
comparePlans(
parsePlan("ALTER VIEW a.b.c RENAME TO x.y.z"),
RenameTableStatement(Seq("a", "b", "c"), Seq("x", "y", "z"), isView = true))
}
test("describe table column") {
comparePlans(parsePlan("DESCRIBE t col"),
DescribeColumnStatement(
Seq("t"), Seq("col"), isExtended = false))
comparePlans(parsePlan("DESCRIBE t `abc.xyz`"),
DescribeColumnStatement(
Seq("t"), Seq("abc.xyz"), isExtended = false))
comparePlans(parsePlan("DESCRIBE t abc.xyz"),
DescribeColumnStatement(
Seq("t"), Seq("abc", "xyz"), isExtended = false))
comparePlans(parsePlan("DESCRIBE t `a.b`.`x.y`"),
DescribeColumnStatement(
Seq("t"), Seq("a.b", "x.y"), isExtended = false))
comparePlans(parsePlan("DESCRIBE TABLE t col"),
DescribeColumnStatement(
Seq("t"), Seq("col"), isExtended = false))
comparePlans(parsePlan("DESCRIBE TABLE EXTENDED t col"),
DescribeColumnStatement(
Seq("t"), Seq("col"), isExtended = true))
comparePlans(parsePlan("DESCRIBE TABLE FORMATTED t col"),
DescribeColumnStatement(
Seq("t"), Seq("col"), isExtended = true))
val caught = intercept[AnalysisException](
parsePlan("DESCRIBE TABLE t PARTITION (ds='1970-01-01') col"))
assert(caught.getMessage.contains(
"DESC TABLE COLUMN for a specific partition is not supported"))
}
test("describe database") {
val sql1 = "DESCRIBE DATABASE EXTENDED a.b"
val sql2 = "DESCRIBE DATABASE a.b"
comparePlans(parsePlan(sql1),
DescribeNamespace(UnresolvedNamespace(Seq("a", "b")), extended = true))
comparePlans(parsePlan(sql2),
DescribeNamespace(UnresolvedNamespace(Seq("a", "b")), extended = false))
}
test("SPARK-17328 Fix NPE with EXPLAIN DESCRIBE TABLE") {
comparePlans(parsePlan("describe t"),
DescribeRelation(UnresolvedTableOrView(Seq("t")), Map.empty, isExtended = false))
comparePlans(parsePlan("describe table t"),
DescribeRelation(UnresolvedTableOrView(Seq("t")), Map.empty, isExtended = false))
comparePlans(parsePlan("describe table extended t"),
DescribeRelation(UnresolvedTableOrView(Seq("t")), Map.empty, isExtended = true))
comparePlans(parsePlan("describe table formatted t"),
DescribeRelation(UnresolvedTableOrView(Seq("t")), Map.empty, isExtended = true))
}
test("insert table: basic append") {
Seq(
"INSERT INTO TABLE testcat.ns1.ns2.tbl SELECT * FROM source",
"INSERT INTO testcat.ns1.ns2.tbl SELECT * FROM source"
).foreach { sql =>
parseCompare(sql,
InsertIntoStatement(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Map.empty,
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("source"))),
overwrite = false, ifPartitionNotExists = false))
}
}
test("insert table: append from another catalog") {
parseCompare("INSERT INTO TABLE testcat.ns1.ns2.tbl SELECT * FROM testcat2.db.tbl",
InsertIntoStatement(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Map.empty,
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("testcat2", "db", "tbl"))),
overwrite = false, ifPartitionNotExists = false))
}
test("insert table: append with partition") {
parseCompare(
"""
|INSERT INTO testcat.ns1.ns2.tbl
|PARTITION (p1 = 3, p2)
|SELECT * FROM source
""".stripMargin,
InsertIntoStatement(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Map("p1" -> Some("3"), "p2" -> None),
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("source"))),
overwrite = false, ifPartitionNotExists = false))
}
test("insert table: overwrite") {
Seq(
"INSERT OVERWRITE TABLE testcat.ns1.ns2.tbl SELECT * FROM source",
"INSERT OVERWRITE testcat.ns1.ns2.tbl SELECT * FROM source"
).foreach { sql =>
parseCompare(sql,
InsertIntoStatement(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Map.empty,
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("source"))),
overwrite = true, ifPartitionNotExists = false))
}
}
test("insert table: overwrite with partition") {
parseCompare(
"""
|INSERT OVERWRITE TABLE testcat.ns1.ns2.tbl
|PARTITION (p1 = 3, p2)
|SELECT * FROM source
""".stripMargin,
InsertIntoStatement(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Map("p1" -> Some("3"), "p2" -> None),
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("source"))),
overwrite = true, ifPartitionNotExists = false))
}
test("insert table: overwrite with partition if not exists") {
parseCompare(
"""
|INSERT OVERWRITE TABLE testcat.ns1.ns2.tbl
|PARTITION (p1 = 3) IF NOT EXISTS
|SELECT * FROM source
""".stripMargin,
InsertIntoStatement(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Map("p1" -> Some("3")),
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("source"))),
overwrite = true, ifPartitionNotExists = true))
}
test("insert table: if not exists with dynamic partition fails") {
val exc = intercept[AnalysisException] {
parsePlan(
"""
|INSERT OVERWRITE TABLE testcat.ns1.ns2.tbl
|PARTITION (p1 = 3, p2) IF NOT EXISTS
|SELECT * FROM source
""".stripMargin)
}
assert(exc.getMessage.contains("IF NOT EXISTS with dynamic partitions"))
assert(exc.getMessage.contains("p2"))
}
test("insert table: if not exists without overwrite fails") {
val exc = intercept[AnalysisException] {
parsePlan(
"""
|INSERT INTO TABLE testcat.ns1.ns2.tbl
|PARTITION (p1 = 3) IF NOT EXISTS
|SELECT * FROM source
""".stripMargin)
}
assert(exc.getMessage.contains("INSERT INTO ... IF NOT EXISTS"))
}
test("delete from table: delete all") {
parseCompare("DELETE FROM testcat.ns1.ns2.tbl",
DeleteFromTable(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
None))
}
test("delete from table: with alias and where clause") {
parseCompare("DELETE FROM testcat.ns1.ns2.tbl AS t WHERE t.a = 2",
DeleteFromTable(
SubqueryAlias("t", UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl"))),
Some(EqualTo(UnresolvedAttribute("t.a"), Literal(2)))))
}
test("delete from table: columns aliases is not allowed") {
val exc = intercept[ParseException] {
parsePlan("DELETE FROM testcat.ns1.ns2.tbl AS t(a,b,c,d) WHERE d = 2")
}
assert(exc.getMessage.contains("Columns aliases are not allowed in DELETE."))
}
test("update table: basic") {
parseCompare(
"""
|UPDATE testcat.ns1.ns2.tbl
|SET a='Robert', b=32
""".stripMargin,
UpdateTable(
UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl")),
Seq(Assignment(UnresolvedAttribute("a"), Literal("Robert")),
Assignment(UnresolvedAttribute("b"), Literal(32))),
None))
}
test("update table: with alias and where clause") {
parseCompare(
"""
|UPDATE testcat.ns1.ns2.tbl AS t
|SET t.a='Robert', t.b=32
|WHERE t.c=2
""".stripMargin,
UpdateTable(
SubqueryAlias("t", UnresolvedRelation(Seq("testcat", "ns1", "ns2", "tbl"))),
Seq(Assignment(UnresolvedAttribute("t.a"), Literal("Robert")),
Assignment(UnresolvedAttribute("t.b"), Literal(32))),
Some(EqualTo(UnresolvedAttribute("t.c"), Literal(2)))))
}
test("update table: columns aliases is not allowed") {
val exc = intercept[ParseException] {
parsePlan(
"""
|UPDATE testcat.ns1.ns2.tbl AS t(a,b,c,d)
|SET b='Robert', c=32
|WHERE d=2
""".stripMargin)
}
assert(exc.getMessage.contains("Columns aliases are not allowed in UPDATE."))
}
test("merge into table: basic") {
parseCompare(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING testcat2.ns1.ns2.tbl AS source
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update') THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin,
MergeIntoTable(
SubqueryAlias("target", UnresolvedRelation(Seq("testcat1", "ns1", "ns2", "tbl"))),
SubqueryAlias("source", UnresolvedRelation(Seq("testcat2", "ns1", "ns2", "tbl"))),
EqualTo(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Seq(DeleteAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("delete")))),
UpdateAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("update"))),
Seq(Assignment(UnresolvedAttribute("target.col2"),
UnresolvedAttribute("source.col2"))))),
Seq(InsertAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("insert"))),
Seq(Assignment(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Assignment(UnresolvedAttribute("target.col2"), UnresolvedAttribute("source.col2")))))))
}
test("merge into table: using subquery") {
parseCompare(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING (SELECT * FROM testcat2.ns1.ns2.tbl) AS source
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update') THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin,
MergeIntoTable(
SubqueryAlias("target", UnresolvedRelation(Seq("testcat1", "ns1", "ns2", "tbl"))),
SubqueryAlias("source", Project(Seq(UnresolvedStar(None)),
UnresolvedRelation(Seq("testcat2", "ns1", "ns2", "tbl")))),
EqualTo(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Seq(DeleteAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("delete")))),
UpdateAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("update"))),
Seq(Assignment(UnresolvedAttribute("target.col2"),
UnresolvedAttribute("source.col2"))))),
Seq(InsertAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("insert"))),
Seq(Assignment(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Assignment(UnresolvedAttribute("target.col2"), UnresolvedAttribute("source.col2")))))))
}
test("merge into table: cte") {
parseCompare(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING (WITH s as (SELECT * FROM testcat2.ns1.ns2.tbl) SELECT * FROM s) AS source
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update') THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin,
MergeIntoTable(
SubqueryAlias("target", UnresolvedRelation(Seq("testcat1", "ns1", "ns2", "tbl"))),
SubqueryAlias("source", With(Project(Seq(UnresolvedStar(None)),
UnresolvedRelation(Seq("s"))),
Seq("s" -> SubqueryAlias("s", Project(Seq(UnresolvedStar(None)),
UnresolvedRelation(Seq("testcat2", "ns1", "ns2", "tbl"))))))),
EqualTo(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Seq(DeleteAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("delete")))),
UpdateAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("update"))),
Seq(Assignment(UnresolvedAttribute("target.col2"),
UnresolvedAttribute("source.col2"))))),
Seq(InsertAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("insert"))),
Seq(Assignment(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Assignment(UnresolvedAttribute("target.col2"), UnresolvedAttribute("source.col2")))))))
}
test("merge into table: no additional condition") {
parseCompare(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING testcat2.ns1.ns2.tbl AS source
|ON target.col1 = source.col1
|WHEN MATCHED THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin,
MergeIntoTable(
SubqueryAlias("target", UnresolvedRelation(Seq("testcat1", "ns1", "ns2", "tbl"))),
SubqueryAlias("source", UnresolvedRelation(Seq("testcat2", "ns1", "ns2", "tbl"))),
EqualTo(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Seq(UpdateAction(None,
Seq(Assignment(UnresolvedAttribute("target.col2"), UnresolvedAttribute("source.col2"))))),
Seq(InsertAction(None,
Seq(Assignment(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Assignment(UnresolvedAttribute("target.col2"), UnresolvedAttribute("source.col2")))))))
}
test("merge into table: star") {
parseCompare(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING testcat2.ns1.ns2.tbl AS source
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update') THEN UPDATE SET *
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
MergeIntoTable(
SubqueryAlias("target", UnresolvedRelation(Seq("testcat1", "ns1", "ns2", "tbl"))),
SubqueryAlias("source", UnresolvedRelation(Seq("testcat2", "ns1", "ns2", "tbl"))),
EqualTo(UnresolvedAttribute("target.col1"), UnresolvedAttribute("source.col1")),
Seq(DeleteAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("delete")))),
UpdateAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("update"))), Seq())),
Seq(InsertAction(Some(EqualTo(UnresolvedAttribute("target.col2"), Literal("insert"))),
Seq()))))
}
test("merge into table: columns aliases are not allowed") {
Seq("target(c1, c2)" -> "source", "target" -> "source(c1, c2)").foreach {
case (targetAlias, sourceAlias) =>
val exc = intercept[ParseException] {
parsePlan(
s"""
|MERGE INTO testcat1.ns1.ns2.tbl AS $targetAlias
|USING testcat2.ns1.ns2.tbl AS $sourceAlias
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update') THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin)
}
assert(exc.getMessage.contains("Columns aliases are not allowed in MERGE."))
}
}
test("merge into table: at most two matched clauses") {
val exc = intercept[ParseException] {
parsePlan(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING testcat2.ns1.ns2.tbl AS source
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update1') THEN UPDATE SET target.col2 = source.col2
|WHEN MATCHED AND (target.col2='update2') THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin)
}
assert(exc.getMessage.contains("There should be at most 2 'WHEN MATCHED' clauses."))
}
test("merge into table: at most one not matched clause") {
val exc = intercept[ParseException] {
parsePlan(
"""
|MERGE INTO testcat1.ns1.ns2.tbl AS target
|USING testcat2.ns1.ns2.tbl AS source
|ON target.col1 = source.col1
|WHEN MATCHED AND (target.col2='delete') THEN DELETE
|WHEN MATCHED AND (target.col2='update1') THEN UPDATE SET target.col2 = source.col2
|WHEN NOT MATCHED AND (target.col2='insert1')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
|WHEN NOT MATCHED AND (target.col2='insert2')
|THEN INSERT (target.col1, target.col2) values (source.col1, source.col2)
""".stripMargin)
}
assert(exc.getMessage.contains("There should be at most 1 'WHEN NOT MATCHED' clause."))
}
test("show tables") {
comparePlans(
parsePlan("SHOW TABLES"),
ShowTables(UnresolvedNamespace(Seq.empty[String]), None))
comparePlans(
parsePlan("SHOW TABLES FROM testcat.ns1.ns2.tbl"),
ShowTables(UnresolvedNamespace(Seq("testcat", "ns1", "ns2", "tbl")), None))
comparePlans(
parsePlan("SHOW TABLES IN testcat.ns1.ns2.tbl"),
ShowTables(UnresolvedNamespace(Seq("testcat", "ns1", "ns2", "tbl")), None))
comparePlans(
parsePlan("SHOW TABLES IN tbl LIKE '*dog*'"),
ShowTables(UnresolvedNamespace(Seq("tbl")), Some("*dog*")))
}
test("show table extended") {
comparePlans(
parsePlan("SHOW TABLE EXTENDED LIKE '*test*'"),
ShowTableStatement(None, "*test*", None))
comparePlans(
parsePlan("SHOW TABLE EXTENDED FROM testcat.ns1.ns2 LIKE '*test*'"),
ShowTableStatement(Some(Seq("testcat", "ns1", "ns2")), "*test*", None))
comparePlans(
parsePlan("SHOW TABLE EXTENDED IN testcat.ns1.ns2 LIKE '*test*'"),
ShowTableStatement(Some(Seq("testcat", "ns1", "ns2")), "*test*", None))
comparePlans(
parsePlan("SHOW TABLE EXTENDED LIKE '*test*' PARTITION(ds='2008-04-09', hr=11)"),
ShowTableStatement(None, "*test*", Some(Map("ds" -> "2008-04-09", "hr" -> "11"))))
comparePlans(
parsePlan("SHOW TABLE EXTENDED FROM testcat.ns1.ns2 LIKE '*test*' " +
"PARTITION(ds='2008-04-09')"),
ShowTableStatement(Some(Seq("testcat", "ns1", "ns2")), "*test*",
Some(Map("ds" -> "2008-04-09"))))
comparePlans(
parsePlan("SHOW TABLE EXTENDED IN testcat.ns1.ns2 LIKE '*test*' " +
"PARTITION(ds='2008-04-09')"),
ShowTableStatement(Some(Seq("testcat", "ns1", "ns2")), "*test*",
Some(Map("ds" -> "2008-04-09"))))
}
test("create namespace -- backward compatibility with DATABASE/DBPROPERTIES") {
val expected = CreateNamespaceStatement(
Seq("a", "b", "c"),
ifNotExists = true,
Map(
"a" -> "a",
"b" -> "b",
"c" -> "c",
"comment" -> "namespace_comment",
"location" -> "/home/user/db"))
comparePlans(
parsePlan(
"""
|CREATE NAMESPACE IF NOT EXISTS a.b.c
|WITH PROPERTIES ('a'='a', 'b'='b', 'c'='c')
|COMMENT 'namespace_comment' LOCATION '/home/user/db'
""".stripMargin),
expected)
comparePlans(
parsePlan(
"""
|CREATE DATABASE IF NOT EXISTS a.b.c
|WITH DBPROPERTIES ('a'='a', 'b'='b', 'c'='c')
|COMMENT 'namespace_comment' LOCATION '/home/user/db'
""".stripMargin),
expected)
}
test("create namespace -- check duplicates") {
def createDatabase(duplicateClause: String): String = {
s"""
|CREATE NAMESPACE IF NOT EXISTS a.b.c
|$duplicateClause
|$duplicateClause
""".stripMargin
}
val sql1 = createDatabase("COMMENT 'namespace_comment'")
val sql2 = createDatabase("LOCATION '/home/user/db'")
val sql3 = createDatabase("WITH PROPERTIES ('a'='a', 'b'='b', 'c'='c')")
val sql4 = createDatabase("WITH DBPROPERTIES ('a'='a', 'b'='b', 'c'='c')")
intercept(sql1, "Found duplicate clauses: COMMENT")
intercept(sql2, "Found duplicate clauses: LOCATION")
intercept(sql3, "Found duplicate clauses: WITH PROPERTIES")
intercept(sql4, "Found duplicate clauses: WITH DBPROPERTIES")
}
test("create namespace - property values must be set") {
assertUnsupported(
sql = "CREATE NAMESPACE a.b.c WITH PROPERTIES('key_without_value', 'key_with_value'='x')",
containsThesePhrases = Seq("key_without_value"))
}
test("create namespace -- either PROPERTIES or DBPROPERTIES is allowed") {
val sql =
s"""
|CREATE NAMESPACE IF NOT EXISTS a.b.c
|WITH PROPERTIES ('a'='a', 'b'='b', 'c'='c')
|WITH DBPROPERTIES ('a'='a', 'b'='b', 'c'='c')
""".stripMargin
intercept(sql, "Either PROPERTIES or DBPROPERTIES is allowed")
}
test("create namespace - support for other types in PROPERTIES") {
val sql =
"""
|CREATE NAMESPACE a.b.c
|LOCATION '/home/user/db'
|WITH PROPERTIES ('a'=1, 'b'=0.1, 'c'=TRUE)
""".stripMargin
comparePlans(
parsePlan(sql),
CreateNamespaceStatement(
Seq("a", "b", "c"),
ifNotExists = false,
Map(
"a" -> "1",
"b" -> "0.1",
"c" -> "true",
"location" -> "/home/user/db")))
}
test("drop namespace") {
comparePlans(
parsePlan("DROP NAMESPACE a.b.c"),
DropNamespace(
UnresolvedNamespace(Seq("a", "b", "c")), ifExists = false, cascade = false))
comparePlans(
parsePlan("DROP NAMESPACE IF EXISTS a.b.c"),
DropNamespace(
UnresolvedNamespace(Seq("a", "b", "c")), ifExists = true, cascade = false))
comparePlans(
parsePlan("DROP NAMESPACE IF EXISTS a.b.c RESTRICT"),
DropNamespace(
UnresolvedNamespace(Seq("a", "b", "c")), ifExists = true, cascade = false))
comparePlans(
parsePlan("DROP NAMESPACE IF EXISTS a.b.c CASCADE"),
DropNamespace(
UnresolvedNamespace(Seq("a", "b", "c")), ifExists = true, cascade = true))
comparePlans(
parsePlan("DROP NAMESPACE a.b.c CASCADE"),
DropNamespace(
UnresolvedNamespace(Seq("a", "b", "c")), ifExists = false, cascade = true))
}
test("set namespace properties") {
comparePlans(
parsePlan("ALTER DATABASE a.b.c SET PROPERTIES ('a'='a', 'b'='b', 'c'='c')"),
AlterNamespaceSetProperties(
UnresolvedNamespace(Seq("a", "b", "c")), Map("a" -> "a", "b" -> "b", "c" -> "c")))
comparePlans(
parsePlan("ALTER SCHEMA a.b.c SET PROPERTIES ('a'='a')"),
AlterNamespaceSetProperties(
UnresolvedNamespace(Seq("a", "b", "c")), Map("a" -> "a")))
comparePlans(
parsePlan("ALTER NAMESPACE a.b.c SET PROPERTIES ('b'='b')"),
AlterNamespaceSetProperties(
UnresolvedNamespace(Seq("a", "b", "c")), Map("b" -> "b")))
comparePlans(
parsePlan("ALTER DATABASE a.b.c SET DBPROPERTIES ('a'='a', 'b'='b', 'c'='c')"),
AlterNamespaceSetProperties(
UnresolvedNamespace(Seq("a", "b", "c")), Map("a" -> "a", "b" -> "b", "c" -> "c")))
comparePlans(
parsePlan("ALTER SCHEMA a.b.c SET DBPROPERTIES ('a'='a')"),
AlterNamespaceSetProperties(
UnresolvedNamespace(Seq("a", "b", "c")), Map("a" -> "a")))
comparePlans(
parsePlan("ALTER NAMESPACE a.b.c SET DBPROPERTIES ('b'='b')"),
AlterNamespaceSetProperties(
UnresolvedNamespace(Seq("a", "b", "c")), Map("b" -> "b")))
}
test("set namespace location") {
comparePlans(
parsePlan("ALTER DATABASE a.b.c SET LOCATION '/home/user/db'"),
AlterNamespaceSetLocation(
UnresolvedNamespace(Seq("a", "b", "c")), "/home/user/db"))
comparePlans(
parsePlan("ALTER SCHEMA a.b.c SET LOCATION '/home/user/db'"),
AlterNamespaceSetLocation(
UnresolvedNamespace(Seq("a", "b", "c")), "/home/user/db"))
comparePlans(
parsePlan("ALTER NAMESPACE a.b.c SET LOCATION '/home/user/db'"),
AlterNamespaceSetLocation(
UnresolvedNamespace(Seq("a", "b", "c")), "/home/user/db"))
}
test("show databases: basic") {
comparePlans(
parsePlan("SHOW DATABASES"),
ShowNamespaces(UnresolvedNamespace(Seq.empty[String]), None))
comparePlans(
parsePlan("SHOW DATABASES LIKE 'defau*'"),
ShowNamespaces(UnresolvedNamespace(Seq.empty[String]), Some("defau*")))
}
test("show databases: FROM/IN operator is not allowed") {
def verify(sql: String): Unit = {
val exc = intercept[ParseException] { parsePlan(sql) }
assert(exc.getMessage.contains("FROM/IN operator is not allowed in SHOW DATABASES"))
}
verify("SHOW DATABASES FROM testcat.ns1.ns2")
verify("SHOW DATABASES IN testcat.ns1.ns2")
}
test("show namespaces") {
comparePlans(
parsePlan("SHOW NAMESPACES"),
ShowNamespaces(UnresolvedNamespace(Seq.empty[String]), None))
comparePlans(
parsePlan("SHOW NAMESPACES FROM testcat.ns1.ns2"),
ShowNamespaces(UnresolvedNamespace(Seq("testcat", "ns1", "ns2")), None))
comparePlans(
parsePlan("SHOW NAMESPACES IN testcat.ns1.ns2"),
ShowNamespaces(UnresolvedNamespace(Seq("testcat", "ns1", "ns2")), None))
comparePlans(
parsePlan("SHOW NAMESPACES IN testcat.ns1 LIKE '*pattern*'"),
ShowNamespaces(UnresolvedNamespace(Seq("testcat", "ns1")), Some("*pattern*")))
}
test("analyze table statistics") {
comparePlans(parsePlan("analyze table a.b.c compute statistics"),
AnalyzeTableStatement(Seq("a", "b", "c"), Map.empty, noScan = false))
comparePlans(parsePlan("analyze table a.b.c compute statistics noscan"),
AnalyzeTableStatement(Seq("a", "b", "c"), Map.empty, noScan = true))
comparePlans(parsePlan("analyze table a.b.c partition (a) compute statistics nOscAn"),
AnalyzeTableStatement(Seq("a", "b", "c"), Map("a" -> None), noScan = true))
// Partitions specified
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds='2008-04-09', hr=11) COMPUTE STATISTICS"),
AnalyzeTableStatement(
Seq("a", "b", "c"), Map("ds" -> Some("2008-04-09"), "hr" -> Some("11")), noScan = false))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds='2008-04-09', hr=11) COMPUTE STATISTICS noscan"),
AnalyzeTableStatement(
Seq("a", "b", "c"), Map("ds" -> Some("2008-04-09"), "hr" -> Some("11")), noScan = true))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds='2008-04-09') COMPUTE STATISTICS noscan"),
AnalyzeTableStatement(Seq("a", "b", "c"), Map("ds" -> Some("2008-04-09")), noScan = true))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds='2008-04-09', hr) COMPUTE STATISTICS"),
AnalyzeTableStatement(
Seq("a", "b", "c"), Map("ds" -> Some("2008-04-09"), "hr" -> None), noScan = false))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds='2008-04-09', hr) COMPUTE STATISTICS noscan"),
AnalyzeTableStatement(
Seq("a", "b", "c"), Map("ds" -> Some("2008-04-09"), "hr" -> None), noScan = true))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds, hr=11) COMPUTE STATISTICS noscan"),
AnalyzeTableStatement(
Seq("a", "b", "c"), Map("ds" -> None, "hr" -> Some("11")), noScan = true))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds, hr) COMPUTE STATISTICS"),
AnalyzeTableStatement(Seq("a", "b", "c"), Map("ds" -> None, "hr" -> None), noScan = false))
comparePlans(
parsePlan("ANALYZE TABLE a.b.c PARTITION(ds, hr) COMPUTE STATISTICS noscan"),
AnalyzeTableStatement(Seq("a", "b", "c"), Map("ds" -> None, "hr" -> None), noScan = true))
intercept("analyze table a.b.c compute statistics xxxx",
"Expected `NOSCAN` instead of `xxxx`")
intercept("analyze table a.b.c partition (a) compute statistics xxxx",
"Expected `NOSCAN` instead of `xxxx`")
}
test("analyze table column statistics") {
intercept("ANALYZE TABLE a.b.c COMPUTE STATISTICS FOR COLUMNS", "")
comparePlans(
parsePlan("ANALYZE TABLE a.b.c COMPUTE STATISTICS FOR COLUMNS key, value"),
AnalyzeColumnStatement(Seq("a", "b", "c"), Option(Seq("key", "value")), allColumns = false))
// Partition specified - should be ignored
comparePlans(
parsePlan(
s"""
|ANALYZE TABLE a.b.c PARTITION(ds='2017-06-10')
|COMPUTE STATISTICS FOR COLUMNS key, value
""".stripMargin),
AnalyzeColumnStatement(Seq("a", "b", "c"), Option(Seq("key", "value")), allColumns = false))
// Partition specified should be ignored in case of COMPUTE STATISTICS FOR ALL COLUMNS
comparePlans(
parsePlan(
s"""
|ANALYZE TABLE a.b.c PARTITION(ds='2017-06-10')
|COMPUTE STATISTICS FOR ALL COLUMNS
""".stripMargin),
AnalyzeColumnStatement(Seq("a", "b", "c"), None, allColumns = true))
intercept("ANALYZE TABLE a.b.c COMPUTE STATISTICS FOR ALL COLUMNS key, value",
"mismatched input 'key' expecting <EOF>")
intercept("ANALYZE TABLE a.b.c COMPUTE STATISTICS FOR ALL",
"missing 'COLUMNS' at '<EOF>'")
}
test("MSCK REPAIR TABLE") {
comparePlans(
parsePlan("MSCK REPAIR TABLE a.b.c"),
RepairTableStatement(Seq("a", "b", "c")))
}
test("LOAD DATA INTO table") {
comparePlans(
parsePlan("LOAD DATA INPATH 'filepath' INTO TABLE a.b.c"),
LoadDataStatement(Seq("a", "b", "c"), "filepath", false, false, None))
comparePlans(
parsePlan("LOAD DATA LOCAL INPATH 'filepath' INTO TABLE a.b.c"),
LoadDataStatement(Seq("a", "b", "c"), "filepath", true, false, None))
comparePlans(
parsePlan("LOAD DATA LOCAL INPATH 'filepath' OVERWRITE INTO TABLE a.b.c"),
LoadDataStatement(Seq("a", "b", "c"), "filepath", true, true, None))
comparePlans(
parsePlan(
s"""
|LOAD DATA LOCAL INPATH 'filepath' OVERWRITE INTO TABLE a.b.c
|PARTITION(ds='2017-06-10')
""".stripMargin),
LoadDataStatement(
Seq("a", "b", "c"),
"filepath",
true,
true,
Some(Map("ds" -> "2017-06-10"))))
}
test("SHOW CREATE table") {
comparePlans(
parsePlan("SHOW CREATE TABLE a.b.c"),
ShowCreateTableStatement(Seq("a", "b", "c")))
}
test("CACHE TABLE") {
comparePlans(
parsePlan("CACHE TABLE a.b.c"),
CacheTableStatement(Seq("a", "b", "c"), None, false, Map.empty))
comparePlans(
parsePlan("CACHE LAZY TABLE a.b.c"),
CacheTableStatement(Seq("a", "b", "c"), None, true, Map.empty))
comparePlans(
parsePlan("CACHE LAZY TABLE a.b.c OPTIONS('storageLevel' 'DISK_ONLY')"),
CacheTableStatement(Seq("a", "b", "c"), None, true, Map("storageLevel" -> "DISK_ONLY")))
intercept("CACHE TABLE a.b.c AS SELECT * FROM testData",
"It is not allowed to add catalog/namespace prefix a.b")
}
test("UNCACHE TABLE") {
comparePlans(
parsePlan("UNCACHE TABLE a.b.c"),
UncacheTableStatement(Seq("a", "b", "c"), ifExists = false))
comparePlans(
parsePlan("UNCACHE TABLE IF EXISTS a.b.c"),
UncacheTableStatement(Seq("a", "b", "c"), ifExists = true))
}
test("TRUNCATE table") {
comparePlans(
parsePlan("TRUNCATE TABLE a.b.c"),
TruncateTableStatement(Seq("a", "b", "c"), None))
comparePlans(
parsePlan("TRUNCATE TABLE a.b.c PARTITION(ds='2017-06-10')"),
TruncateTableStatement(Seq("a", "b", "c"), Some(Map("ds" -> "2017-06-10"))))
}
test("SHOW PARTITIONS") {
val sql1 = "SHOW PARTITIONS t1"
val sql2 = "SHOW PARTITIONS db1.t1"
val sql3 = "SHOW PARTITIONS t1 PARTITION(partcol1='partvalue', partcol2='partvalue')"
val sql4 = "SHOW PARTITIONS a.b.c"
val sql5 = "SHOW PARTITIONS a.b.c PARTITION(ds='2017-06-10')"
val parsed1 = parsePlan(sql1)
val expected1 = ShowPartitionsStatement(Seq("t1"), None)
val parsed2 = parsePlan(sql2)
val expected2 = ShowPartitionsStatement(Seq("db1", "t1"), None)
val parsed3 = parsePlan(sql3)
val expected3 = ShowPartitionsStatement(Seq("t1"),
Some(Map("partcol1" -> "partvalue", "partcol2" -> "partvalue")))
val parsed4 = parsePlan(sql4)
val expected4 = ShowPartitionsStatement(Seq("a", "b", "c"), None)
val parsed5 = parsePlan(sql5)
val expected5 = ShowPartitionsStatement(Seq("a", "b", "c"), Some(Map("ds" -> "2017-06-10")))
comparePlans(parsed1, expected1)
comparePlans(parsed2, expected2)
comparePlans(parsed3, expected3)
comparePlans(parsed4, expected4)
comparePlans(parsed5, expected5)
}
test("REFRESH TABLE") {
comparePlans(
parsePlan("REFRESH TABLE a.b.c"),
RefreshTableStatement(Seq("a", "b", "c")))
}
test("show columns") {
val sql1 = "SHOW COLUMNS FROM t1"
val sql2 = "SHOW COLUMNS IN db1.t1"
val sql3 = "SHOW COLUMNS FROM t1 IN db1"
val sql4 = "SHOW COLUMNS FROM db1.t1 IN db1"
val parsed1 = parsePlan(sql1)
val expected1 = ShowColumnsStatement(Seq("t1"), None)
val parsed2 = parsePlan(sql2)
val expected2 = ShowColumnsStatement(Seq("db1", "t1"), None)
val parsed3 = parsePlan(sql3)
val expected3 = ShowColumnsStatement(Seq("t1"), Some(Seq("db1")))
val parsed4 = parsePlan(sql4)
val expected4 = ShowColumnsStatement(Seq("db1", "t1"), Some(Seq("db1")))
comparePlans(parsed1, expected1)
comparePlans(parsed2, expected2)
comparePlans(parsed3, expected3)
comparePlans(parsed4, expected4)
}
test("alter table: recover partitions") {
comparePlans(
parsePlan("ALTER TABLE a.b.c RECOVER PARTITIONS"),
AlterTableRecoverPartitionsStatement(Seq("a", "b", "c")))
}
test("alter table: add partition") {
val sql1 =
"""
|ALTER TABLE a.b.c ADD IF NOT EXISTS PARTITION
|(dt='2008-08-08', country='us') LOCATION 'location1' PARTITION
|(dt='2009-09-09', country='uk')
""".stripMargin
val sql2 = "ALTER TABLE a.b.c ADD PARTITION (dt='2008-08-08') LOCATION 'loc'"
val parsed1 = parsePlan(sql1)
val parsed2 = parsePlan(sql2)
val expected1 = AlterTableAddPartitionStatement(
Seq("a", "b", "c"),
Seq(
(Map("dt" -> "2008-08-08", "country" -> "us"), Some("location1")),
(Map("dt" -> "2009-09-09", "country" -> "uk"), None)),
ifNotExists = true)
val expected2 = AlterTableAddPartitionStatement(
Seq("a", "b", "c"),
Seq((Map("dt" -> "2008-08-08"), Some("loc"))),
ifNotExists = false)
comparePlans(parsed1, expected1)
comparePlans(parsed2, expected2)
}
test("alter view: add partition (not supported)") {
assertUnsupported(
"""
|ALTER VIEW a.b.c ADD IF NOT EXISTS PARTITION
|(dt='2008-08-08', country='us') PARTITION
|(dt='2009-09-09', country='uk')
""".stripMargin)
}
test("alter table: rename partition") {
val sql1 =
"""
|ALTER TABLE table_name PARTITION (dt='2008-08-08', country='us')
|RENAME TO PARTITION (dt='2008-09-09', country='uk')
""".stripMargin
val parsed1 = parsePlan(sql1)
val expected1 = AlterTableRenamePartitionStatement(
Seq("table_name"),
Map("dt" -> "2008-08-08", "country" -> "us"),
Map("dt" -> "2008-09-09", "country" -> "uk"))
comparePlans(parsed1, expected1)
val sql2 =
"""
|ALTER TABLE a.b.c PARTITION (ds='2017-06-10')
|RENAME TO PARTITION (ds='2018-06-10')
""".stripMargin
val parsed2 = parsePlan(sql2)
val expected2 = AlterTableRenamePartitionStatement(
Seq("a", "b", "c"),
Map("ds" -> "2017-06-10"),
Map("ds" -> "2018-06-10"))
comparePlans(parsed2, expected2)
}
// ALTER TABLE table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...]
// ALTER VIEW table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...]
test("alter table: drop partition") {
val sql1_table =
"""
|ALTER TABLE table_name DROP IF EXISTS PARTITION
|(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', country='uk')
""".stripMargin
val sql2_table =
"""
|ALTER TABLE table_name DROP PARTITION
|(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', country='uk')
""".stripMargin
val sql1_view = sql1_table.replace("TABLE", "VIEW")
val sql2_view = sql2_table.replace("TABLE", "VIEW")
val parsed1_table = parsePlan(sql1_table)
val parsed2_table = parsePlan(sql2_table)
val parsed1_purge = parsePlan(sql1_table + " PURGE")
assertUnsupported(sql1_view)
assertUnsupported(sql2_view)
val expected1_table = AlterTableDropPartitionStatement(
Seq("table_name"),
Seq(
Map("dt" -> "2008-08-08", "country" -> "us"),
Map("dt" -> "2009-09-09", "country" -> "uk")),
ifExists = true,
purge = false,
retainData = false)
val expected2_table = expected1_table.copy(ifExists = false)
val expected1_purge = expected1_table.copy(purge = true)
comparePlans(parsed1_table, expected1_table)
comparePlans(parsed2_table, expected2_table)
comparePlans(parsed1_purge, expected1_purge)
val sql3_table = "ALTER TABLE a.b.c DROP IF EXISTS PARTITION (ds='2017-06-10')"
val expected3_table = AlterTableDropPartitionStatement(
Seq("a", "b", "c"),
Seq(Map("ds" -> "2017-06-10")),
ifExists = true,
purge = false,
retainData = false)
val parsed3_table = parsePlan(sql3_table)
comparePlans(parsed3_table, expected3_table)
}
test("show current namespace") {
comparePlans(
parsePlan("SHOW CURRENT NAMESPACE"),
ShowCurrentNamespaceStatement())
}
test("alter table: SerDe properties") {
val sql1 = "ALTER TABLE table_name SET SERDE 'org.apache.class'"
val parsed1 = parsePlan(sql1)
val expected1 = AlterTableSerDePropertiesStatement(
Seq("table_name"), Some("org.apache.class"), None, None)
comparePlans(parsed1, expected1)
val sql2 =
"""
|ALTER TABLE table_name SET SERDE 'org.apache.class'
|WITH SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')
""".stripMargin
val parsed2 = parsePlan(sql2)
val expected2 = AlterTableSerDePropertiesStatement(
Seq("table_name"),
Some("org.apache.class"),
Some(Map("columns" -> "foo,bar", "field.delim" -> ",")),
None)
comparePlans(parsed2, expected2)
val sql3 =
"""
|ALTER TABLE table_name
|SET SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')
""".stripMargin
val parsed3 = parsePlan(sql3)
val expected3 = AlterTableSerDePropertiesStatement(
Seq("table_name"), None, Some(Map("columns" -> "foo,bar", "field.delim" -> ",")), None)
comparePlans(parsed3, expected3)
val sql4 =
"""
|ALTER TABLE table_name PARTITION (test=1, dt='2008-08-08', country='us')
|SET SERDE 'org.apache.class'
|WITH SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')
""".stripMargin
val parsed4 = parsePlan(sql4)
val expected4 = AlterTableSerDePropertiesStatement(
Seq("table_name"),
Some("org.apache.class"),
Some(Map("columns" -> "foo,bar", "field.delim" -> ",")),
Some(Map("test" -> "1", "dt" -> "2008-08-08", "country" -> "us")))
comparePlans(parsed4, expected4)
val sql5 =
"""
|ALTER TABLE table_name PARTITION (test=1, dt='2008-08-08', country='us')
|SET SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')
""".stripMargin
val parsed5 = parsePlan(sql5)
val expected5 = AlterTableSerDePropertiesStatement(
Seq("table_name"),
None,
Some(Map("columns" -> "foo,bar", "field.delim" -> ",")),
Some(Map("test" -> "1", "dt" -> "2008-08-08", "country" -> "us")))
comparePlans(parsed5, expected5)
val sql6 =
"""
|ALTER TABLE a.b.c SET SERDE 'org.apache.class'
|WITH SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')
""".stripMargin
val parsed6 = parsePlan(sql6)
val expected6 = AlterTableSerDePropertiesStatement(
Seq("a", "b", "c"),
Some("org.apache.class"),
Some(Map("columns" -> "foo,bar", "field.delim" -> ",")),
None)
comparePlans(parsed6, expected6)
val sql7 =
"""
|ALTER TABLE a.b.c PARTITION (test=1, dt='2008-08-08', country='us')
|SET SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')
""".stripMargin
val parsed7 = parsePlan(sql7)
val expected7 = AlterTableSerDePropertiesStatement(
Seq("a", "b", "c"),
None,
Some(Map("columns" -> "foo,bar", "field.delim" -> ",")),
Some(Map("test" -> "1", "dt" -> "2008-08-08", "country" -> "us")))
comparePlans(parsed7, expected7)
}
test("alter view: AS Query") {
val parsed = parsePlan("ALTER VIEW a.b.c AS SELECT 1")
val expected = AlterViewAsStatement(
Seq("a", "b", "c"), "SELECT 1", parsePlan("SELECT 1"))
comparePlans(parsed, expected)
}
test("create view -- basic") {
val v1 = "CREATE VIEW view1 AS SELECT * FROM tab1"
val parsed1 = parsePlan(v1)
val expected1 = CreateViewStatement(
Seq("view1"),
Seq.empty[(String, Option[String])],
None,
Map.empty[String, String],
Some("SELECT * FROM tab1"),
parsePlan("SELECT * FROM tab1"),
false,
false,
PersistedView)
comparePlans(parsed1, expected1)
val v2 = "CREATE TEMPORARY VIEW a.b.c AS SELECT * FROM tab1"
val parsed2 = parsePlan(v2)
val expected2 = CreateViewStatement(
Seq("a", "b", "c"),
Seq.empty[(String, Option[String])],
None,
Map.empty[String, String],
Some("SELECT * FROM tab1"),
parsePlan("SELECT * FROM tab1"),
false,
false,
LocalTempView)
comparePlans(parsed2, expected2)
}
test("create view - full") {
val v1 =
"""
|CREATE OR REPLACE VIEW view1
|(col1, col3 COMMENT 'hello')
|TBLPROPERTIES('prop1Key'="prop1Val")
|COMMENT 'BLABLA'
|AS SELECT * FROM tab1
""".stripMargin
val parsed1 = parsePlan(v1)
val expected1 = CreateViewStatement(
Seq("view1"),
Seq("col1" -> None, "col3" -> Some("hello")),
Some("BLABLA"),
Map("prop1Key" -> "prop1Val"),
Some("SELECT * FROM tab1"),
parsePlan("SELECT * FROM tab1"),
false,
true,
PersistedView)
comparePlans(parsed1, expected1)
val v2 =
"""
|CREATE OR REPLACE GLOBAL TEMPORARY VIEW a.b.c
|(col1, col3 COMMENT 'hello')
|TBLPROPERTIES('prop1Key'="prop1Val")
|COMMENT 'BLABLA'
|AS SELECT * FROM tab1
""".stripMargin
val parsed2 = parsePlan(v2)
val expected2 = CreateViewStatement(
Seq("a", "b", "c"),
Seq("col1" -> None, "col3" -> Some("hello")),
Some("BLABLA"),
Map("prop1Key" -> "prop1Val"),
Some("SELECT * FROM tab1"),
parsePlan("SELECT * FROM tab1"),
false,
true,
GlobalTempView)
comparePlans(parsed2, expected2)
}
test("create view -- partitioned view") {
val v1 = "CREATE VIEW view1 partitioned on (ds, hr) as select * from srcpart"
intercept[ParseException] {
parsePlan(v1)
}
}
test("create view - duplicate clauses") {
def createViewStatement(duplicateClause: String): String = {
s"""
|CREATE OR REPLACE VIEW view1
|(col1, col3 COMMENT 'hello')
|$duplicateClause
|$duplicateClause
|AS SELECT * FROM tab1
""".stripMargin
}
val sql1 = createViewStatement("COMMENT 'BLABLA'")
val sql2 = createViewStatement("TBLPROPERTIES('prop1Key'=\\"prop1Val\\")")
intercept(sql1, "Found duplicate clauses: COMMENT")
intercept(sql2, "Found duplicate clauses: TBLPROPERTIES")
}
test("SHOW TBLPROPERTIES table") {
comparePlans(
parsePlan("SHOW TBLPROPERTIES a.b.c"),
ShowTableProperties(UnresolvedTable(Seq("a", "b", "c")), None))
comparePlans(
parsePlan("SHOW TBLPROPERTIES a.b.c('propKey1')"),
ShowTableProperties(UnresolvedTable(Seq("a", "b", "c")), Some("propKey1")))
}
test("DESCRIBE FUNCTION") {
comparePlans(
parsePlan("DESC FUNCTION a"),
DescribeFunctionStatement(Seq("a"), false))
comparePlans(
parsePlan("DESCRIBE FUNCTION a"),
DescribeFunctionStatement(Seq("a"), false))
comparePlans(
parsePlan("DESCRIBE FUNCTION a.b.c"),
DescribeFunctionStatement(Seq("a", "b", "c"), false))
comparePlans(
parsePlan("DESCRIBE FUNCTION EXTENDED a.b.c"),
DescribeFunctionStatement(Seq("a", "b", "c"), true))
}
test("SHOW FUNCTIONS") {
comparePlans(
parsePlan("SHOW FUNCTIONS"),
ShowFunctionsStatement(true, true, None, None))
comparePlans(
parsePlan("SHOW USER FUNCTIONS"),
ShowFunctionsStatement(true, false, None, None))
comparePlans(
parsePlan("SHOW user FUNCTIONS"),
ShowFunctionsStatement(true, false, None, None))
comparePlans(
parsePlan("SHOW SYSTEM FUNCTIONS"),
ShowFunctionsStatement(false, true, None, None))
comparePlans(
parsePlan("SHOW ALL FUNCTIONS"),
ShowFunctionsStatement(true, true, None, None))
comparePlans(
parsePlan("SHOW FUNCTIONS LIKE 'funct*'"),
ShowFunctionsStatement(true, true, Some("funct*"), None))
comparePlans(
parsePlan("SHOW FUNCTIONS LIKE a.b.c"),
ShowFunctionsStatement(true, true, None, Some(Seq("a", "b", "c"))))
val sql = "SHOW other FUNCTIONS"
intercept(sql, s"$sql not supported")
}
test("DROP FUNCTION") {
comparePlans(
parsePlan("DROP FUNCTION a"),
DropFunctionStatement(Seq("a"), false, false))
comparePlans(
parsePlan("DROP FUNCTION a.b.c"),
DropFunctionStatement(Seq("a", "b", "c"), false, false))
comparePlans(
parsePlan("DROP TEMPORARY FUNCTION a.b.c"),
DropFunctionStatement(Seq("a", "b", "c"), false, true))
comparePlans(
parsePlan("DROP FUNCTION IF EXISTS a.b.c"),
DropFunctionStatement(Seq("a", "b", "c"), true, false))
comparePlans(
parsePlan("DROP TEMPORARY FUNCTION IF EXISTS a.b.c"),
DropFunctionStatement(Seq("a", "b", "c"), true, true))
}
test("CREATE FUNCTION") {
parseCompare("CREATE FUNCTION a as 'fun'",
CreateFunctionStatement(Seq("a"), "fun", Seq(), false, false, false))
parseCompare("CREATE FUNCTION a.b.c as 'fun'",
CreateFunctionStatement(Seq("a", "b", "c"), "fun", Seq(), false, false, false))
parseCompare("CREATE OR REPLACE FUNCTION a.b.c as 'fun'",
CreateFunctionStatement(Seq("a", "b", "c"), "fun", Seq(), false, false, true))
parseCompare("CREATE TEMPORARY FUNCTION a.b.c as 'fun'",
CreateFunctionStatement(Seq("a", "b", "c"), "fun", Seq(), true, false, false))
parseCompare("CREATE FUNCTION IF NOT EXISTS a.b.c as 'fun'",
CreateFunctionStatement(Seq("a", "b", "c"), "fun", Seq(), false, true, false))
parseCompare("CREATE FUNCTION a as 'fun' USING JAR 'j'",
CreateFunctionStatement(Seq("a"), "fun", Seq(FunctionResource(JarResource, "j")),
false, false, false))
parseCompare("CREATE FUNCTION a as 'fun' USING ARCHIVE 'a'",
CreateFunctionStatement(Seq("a"), "fun", Seq(FunctionResource(ArchiveResource, "a")),
false, false, false))
parseCompare("CREATE FUNCTION a as 'fun' USING FILE 'f'",
CreateFunctionStatement(Seq("a"), "fun", Seq(FunctionResource(FileResource, "f")),
false, false, false))
parseCompare("CREATE FUNCTION a as 'fun' USING JAR 'j', ARCHIVE 'a', FILE 'f'",
CreateFunctionStatement(Seq("a"), "fun", Seq(FunctionResource(JarResource, "j"),
FunctionResource(ArchiveResource, "a"), FunctionResource(FileResource, "f")),
false, false, false))
intercept("CREATE FUNCTION a as 'fun' USING OTHER 'o'",
"Operation not allowed: CREATE FUNCTION with resource type 'other'")
}
private case class TableSpec(
name: Seq[String],
schema: Option[StructType],
partitioning: Seq[Transform],
bucketSpec: Option[BucketSpec],
properties: Map[String, String],
provider: String,
options: Map[String, String],
location: Option[String],
comment: Option[String])
private object TableSpec {
def apply(plan: LogicalPlan): TableSpec = {
plan match {
case create: CreateTableStatement =>
TableSpec(
create.tableName,
Some(create.tableSchema),
create.partitioning,
create.bucketSpec,
create.properties,
create.provider,
create.options,
create.location,
create.comment)
case replace: ReplaceTableStatement =>
TableSpec(
replace.tableName,
Some(replace.tableSchema),
replace.partitioning,
replace.bucketSpec,
replace.properties,
replace.provider,
replace.options,
replace.location,
replace.comment)
case ctas: CreateTableAsSelectStatement =>
TableSpec(
ctas.tableName,
Some(ctas.asSelect).filter(_.resolved).map(_.schema),
ctas.partitioning,
ctas.bucketSpec,
ctas.properties,
ctas.provider,
ctas.options,
ctas.location,
ctas.comment)
case rtas: ReplaceTableAsSelectStatement =>
TableSpec(
rtas.tableName,
Some(rtas.asSelect).filter(_.resolved).map(_.schema),
rtas.partitioning,
rtas.bucketSpec,
rtas.properties,
rtas.provider,
rtas.options,
rtas.location,
rtas.comment)
case other =>
fail(s"Expected to parse Create, CTAS, Replace, or RTAS plan" +
s" from query, got ${other.getClass.getName}.")
}
}
}
test("comment on") {
comparePlans(
parsePlan("COMMENT ON DATABASE a.b.c IS NULL"),
CommentOnNamespace(UnresolvedNamespace(Seq("a", "b", "c")), ""))
comparePlans(
parsePlan("COMMENT ON DATABASE a.b.c IS 'NULL'"),
CommentOnNamespace(UnresolvedNamespace(Seq("a", "b", "c")), "NULL"))
comparePlans(
parsePlan("COMMENT ON NAMESPACE a.b.c IS ''"),
CommentOnNamespace(UnresolvedNamespace(Seq("a", "b", "c")), ""))
comparePlans(
parsePlan("COMMENT ON TABLE a.b.c IS 'xYz'"),
CommentOnTable(UnresolvedTable(Seq("a", "b", "c")), "xYz"))
}
}
| darionyaphet/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala | Scala | apache-2.0 | 79,755 |
package com.twitter.finagle.service
import com.twitter.conversions.DurationOps._
import com.twitter.util.Duration
import com.twitter.finagle.benchmark.StdBenchAnnotations
import org.openjdk.jmh.annotations.{Scope, State, Benchmark}
// ./sbt 'project finagle-benchmark' 'jmh:run BackoffBenchmark'
class BackoffBenchmark extends StdBenchAnnotations {
import BackoffBenchmark._
@Benchmark
def fromFunction(state: FromFunction): Duration = state.next()
@Benchmark
def constant(state: Constant): Duration = state.next()
@Benchmark
def equalJittered(state: EqualJittered): Duration = state.next()
@Benchmark
def exponentialJittered(state: ExponentialJittered): Duration = state.next()
@Benchmark
def decorrelatedJittered(state: DecorrelatedJittered): Duration = state.next()
}
object BackoffBenchmark {
abstract class BackoffState(var backoff: Stream[Duration]) {
def next(): Duration = {
val head = backoff.head
backoff = backoff.tail
head
}
}
@State(Scope.Thread)
class FromFunction
extends BackoffState(
Backoff.fromFunction(() => 10.seconds) ++ Backoff.const(300.seconds)
)
@State(Scope.Thread)
class Constant
extends BackoffState(
Backoff.const(10.seconds) ++ Backoff.const(300.seconds)
)
@State(Scope.Thread)
class EqualJittered
extends BackoffState(
Backoff.equalJittered(5.seconds, 300.seconds) ++ Backoff.const(300.seconds)
)
@State(Scope.Thread)
class ExponentialJittered
extends BackoffState(
Backoff.exponentialJittered(5.second, 300.seconds) ++ Backoff.const(300.seconds)
)
@State(Scope.Thread)
class DecorrelatedJittered
extends BackoffState(
Backoff.decorrelatedJittered(5.second, 300.seconds) ++ Backoff.const(300.seconds)
)
}
| luciferous/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/service/BackoffBenchmark.scala | Scala | apache-2.0 | 1,823 |
package doodle
package jvm
import doodle.core._
import doodle.core.transform.Transform
import doodle.backend.{BoundingBox, Canvas}
import java.awt.Graphics2D
final class Java2DCanvas(graphics: Graphics2D, center: Point, screenCenter: Point) extends Canvas {
// Convert from canvas coordinates to screen coordinates.
//
// Shift the center of the bounding box to the origin.
//
// Reflect around the Y axis as the canvas Y coordinate is reversed
// compared to the Java2D Y axis.
//
// Then recenter the canvas to the center of the screen.
graphics.transform(
Java2D.toAffineTransform(
Transform.translate(-center.x, -center.y)
.andThen(Transform.horizontalReflection)
.andThen(Transform.translate(screenCenter.x, screenCenter.y))
)
)
def openPath(context: DrawingContext, elements: List[PathElement]): Unit = {
val path = Java2D.toPath2D(elements)
Java2D.strokeAndFill(graphics, path, context)
}
def closedPath(context: DrawingContext, elements: List[PathElement]): Unit = {
val path = Java2D.toPath2D(elements)
path.closePath()
Java2D.strokeAndFill(graphics, path, context)
}
def text(context: DrawingContext,
tx: Transform,
boundingBox: BoundingBox,
characters: String): Unit = {
// We have to do a few different transformations here:
//
// - The canvas Y coordinate is reversed with respect to the
// screen Y coordinate, so normally we have to reverse
// coordinates. However `drawString` will draw text oriented
// correctly on the screen with need to reverse our reverse.
//
// - `drawString` draws from the bottom left corner of the text
// while the origin of the bounding box is the center of the text.
// Thus we need to translate to the bottom left corner.
val bottomLeft = Transform.translate(-boundingBox.width/2, -boundingBox.height/2)
val fullTx = Transform.horizontalReflection andThen tx andThen bottomLeft
val currentTx = graphics.getTransform()
graphics.transform(Java2D.toAffineTransform(fullTx))
context.stroke.foreach { s =>
Java2D.setStroke(graphics, s)
}
context.fillColor.foreach { f =>
Java2D.setFill(graphics, f)
}
context.font map { f =>
graphics.setFont(FontMetrics.toJFont(f))
graphics.drawString(characters, 0, 0)
}
graphics.setTransform(currentTx)
}
}
| Angeldude/doodle | jvm/src/main/scala/doodle/jvm/Java2DCanvas.scala | Scala | apache-2.0 | 2,413 |
package com.twitter.finatra.http.internal.routing
import com.twitter.finatra.conversions.pattern._
import java.util.regex.Matcher
import scala.collection.immutable
import scala.util.matching.Regex
object PathPattern {
/*
* The regex matches and captures route param names. Since a ':' character can be used in normal regular expressions
* for non-capturing groups (e.g., (?:...), we use a negative lookbehind to make sure we don't match a '?:'.
*/
private val CaptureGroupNamesRegex = """(?<!\\?):(\\w+)""".r
/*
* The regex matches and captures wildcard route params (e.g., :*). Since a ':' character can be used in normal
* regular expressions for non-capturing groups (e.g., (?:...), we use a negative lookbehind to make sure we don't
* match a '?:'.
*/
private val CaptureGroupAsteriskRegex = """(?<!\\?):(\\*)$""".r
/*
* The regex for asserting valid uri patterns is constructed to match invalid uri patterns and it's negated in the
* assert. The regex is broken into two clauses joined by an OR '|'.
*
* Part 1: .*?\\((?!\\?:).*?
* Matches uris that have a '(' that is not followed by a literal '?:'. The regex uses negative lookahead to
* accomplish this. Since the '(' can occur anywhere or in multiple in the string, we add leading and trailing
* '.*?' for non-greedy matching of other characters.
*
* Part 2: [\\(]+
* Matches uris that have one or more '(' characters.
*
* By ORing both parts, we cover all invalid uris.
*/
private val ValidPathRegex = """.*?\\((?!\\?:).*?|[\\(]+""".r
private val SomeEmptyMap = Some(Map[String, String]())
def apply(uriPattern: String): PathPattern = {
assert(
!ValidPathRegex.matches(uriPattern),
"Capture groups are not directly supported. Please use :name syntax instead")
val regexStr = uriPattern.
replaceAll( """:\\*$""", """(.*)"""). // The special token :* captures everything after the prefix string
replaceAll( """/:\\w+""", """/([^/]+)""") // Replace "colon word (e.g. :id) with a capture group that stops at the next forward slash
PathPattern(
regex = new Regex(regexStr),
captureNames = captureGroupNames(uriPattern))
}
/* Private */
private def captureGroupNames(uriPattern: String): Seq[String] = {
findNames(uriPattern, CaptureGroupNamesRegex) ++
findNames(uriPattern, CaptureGroupAsteriskRegex)
}
/* We drop(1) to remove the leading ':' */
private def findNames(uriPattern: String, pattern: Regex): Seq[String] = {
pattern.findAllIn(uriPattern).toSeq map {_.drop(1)}
}
}
case class PathPattern(
regex: Regex,
captureNames: Seq[String] = Seq()) {
private val emptyCaptureNames = captureNames.isEmpty
def extract(requestPath: String): Option[Map[String, String]] = {
val matcher = regex.pattern.matcher(requestPath)
if (!matcher.matches)
None
else if (emptyCaptureNames)
PathPattern.SomeEmptyMap
else
Some(extractMatches(matcher))
}
//Optimized
private def extractMatches(matcher: Matcher): Map[String, String] = {
var idx = 0
val builder = immutable.Map.newBuilder[String, String]
for (captureName <- captureNames) {
idx += 1
builder += captureName -> matcher.group(idx)
}
builder.result()
}
}
| tom-chan/finatra | http/src/main/scala/com/twitter/finatra/http/internal/routing/PathPattern.scala | Scala | apache-2.0 | 3,297 |
package com.eigengo.lift.common
import java.util.UUID
@SerialVersionUID(1015l) case class UserId(id: UUID) extends AnyVal {
override def toString: String = id.toString
}
@SerialVersionUID(1016l) object UserId {
def randomId(): UserId = UserId(UUID.randomUUID())
def apply(s: String): UserId = UserId(UUID.fromString(s))
} | imace/open-muvr | server/spark/src/main/scala/com/eigengo/lift/spark/serialization/UserId.scala | Scala | apache-2.0 | 329 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.exception
import org.orbeon.errorified._
import org.orbeon.oxf.common.{OrbeonLocationException, ValidationException}
import org.orbeon.oxf.util.CoreUtils._
import org.orbeon.datatypes.{ExtendedLocationData, LocationData}
import org.orbeon.oxf.util.StringUtils._
import scala.collection.compat._
// Orbeon-specific exception formatter
object OrbeonFormatter extends Formatter {
val Width = 120
val MaxStackLength = 80
override def getThrowableMessage(throwable: Throwable): Option[String] =
throwable match {
case ve: ValidationException => Option(ve.message)
case t => Option(t.getMessage)
}
override def getAllLocationData(t: Throwable): List[SourceLocation] =
OrbeonLocationException.getAllLocationData(t) flatMap sourceLocation
// Create SourceLocation from LocationData
private def sourceLocation(locationData: LocationData): Option[SourceLocation] =
locationData.file.nonAllBlank option {
val (description, params) =
locationData match {
case extended: ExtendedLocationData => (extended.description, extended.params)
case _ => (None, Nil)
}
SourceLocation(
locationData.file,
filterLineCol(locationData.line),
filterLineCol(locationData.col),
description,
params.to(List)
)
}
}
| orbeon/orbeon-forms | common/jvm/src/main/scala/org/orbeon/exception/OrbeonFormatter.scala | Scala | lgpl-2.1 | 2,025 |
package main.scala
trait IEchoClientProxy {
def msg(value: String): String;
} | labs2/FLiMSy | ServerFLiMSy/src/main/scala/IEchoClientProxy.scala | Scala | apache-2.0 | 83 |
package org.openmole.plugin.task.gama
import java.io.FileNotFoundException
import monocle.macros._
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
import org.openmole.core.exception.{InternalProcessingError, UserBadDataError}
import org.openmole.core.fileservice.FileService
import org.openmole.core.networkservice.NetworkService
import org.openmole.core.preference.Preference
import org.openmole.core.serializer.SerializerService
import org.openmole.core.threadprovider.ThreadProvider
import org.openmole.core.workflow.builder._
import org.openmole.core.workflow.task._
import org.openmole.core.workflow.validation._
import org.openmole.core.workspace.{TmpDirectory, Workspace}
import org.openmole.plugin.task.container
import org.openmole.plugin.task.container._
import org.openmole.plugin.task.external._
import org.openmole.tool.outputredirection.OutputRedirection
import scala.xml.XML
object GAMATask {
implicit def isTask: InputOutputBuilder[GAMATask] = InputOutputBuilder(GAMATask.config)
implicit def isExternal: ExternalBuilder[GAMATask] = ExternalBuilder(GAMATask.external)
implicit def isInfo = InfoBuilder(info)
implicit def isMapped = MappedInputOutputBuilder(GAMATask.mapped)
def inputXML = "/_model_input_.xml"
def workspaceDirectory = "/_workspace_"
def volumes(
workspace: File,
model: String) = (model, Seq(workspace -> workspaceDirectory))
def prepare(
workspace: File,
model: String,
experiment: String,
install: Seq[String],
installContainerSystem: ContainerSystem,
image: ContainerImage,
clearCache: Boolean)(implicit tmpDirectory: TmpDirectory, serializerService: SerializerService, outputRedirection: OutputRedirection, networkService: NetworkService, threadProvider: ThreadProvider, preference: Preference, _workspace: Workspace, fileService: FileService) = {
val (modelName, volumesValue) = volumes(workspace, model)
def installCommands =
install ++ Seq(s"gama-headless -xml '$experiment' '$workspaceDirectory/$modelName' '$inputXML'", s"ls '${inputXML}'")
def error(retCode: Int) =
retCode match {
case 2 => Some(s"""the file $inputXML has not been generated by "gama-headless -xml"""")
case _ => None
}
ContainerTask.prepare(installContainerSystem, image, installCommands, volumesValue.map { case (lv, cv) ⇒ lv.getAbsolutePath -> cv }, error, clearCache = clearCache)
}
def apply(
project: File,
gaml: String,
experiment: String,
finalStep: FromContext[Int],
seed: OptionalArgument[Val[Long]] = None,
frameRate: OptionalArgument[Int] = None,
install: Seq[String] = Seq.empty,
containerImage: ContainerImage = "gamaplatform/gama:1.8.2",
memory: OptionalArgument[Information] = None,
version: OptionalArgument[String] = None,
errorOnReturnValue: Boolean = true,
returnValue: OptionalArgument[Val[Int]] = None,
stdOut: OptionalArgument[Val[String]] = None,
stdErr: OptionalArgument[Val[String]] = None,
environmentVariables: Seq[EnvironmentVariable] = Vector.empty,
hostFiles: Seq[HostFile] = Vector.empty,
// workDirectory: OptionalArgument[String] = None,
clearContainerCache: Boolean = false,
containerSystem: ContainerSystem = ContainerSystem.default,
installContainerSystem: ContainerSystem = ContainerSystem.default)(implicit name: sourcecode.Name, definitionScope: DefinitionScope, newFile: TmpDirectory, _workspace: Workspace, preference: Preference, fileService: FileService, threadProvider: ThreadProvider, outputRedirection: OutputRedirection, networkService: NetworkService, serializerService: SerializerService): GAMATask = {
if (!project.exists()) throw new UserBadDataError(s"The project directory you specify does not exist: ${project}")
if (!(project / gaml).exists()) throw new UserBadDataError(s"The model file you specify does not exist: ${project / gaml}")
val gamaContainerImage: ContainerImage =
(version.option, containerImage) match {
case (None, c) => c
case (Some(v), c: DockerImage) => c.copy(tag = v)
case (Some(_), _: SavedDockerImage) => throw new UserBadDataError(s"Can not set both, a saved docker image, and, set the version of the container.")
}
val preparedImage = prepare(project, gaml, experiment, install, installContainerSystem, gamaContainerImage, clearCache = clearContainerCache)
GAMATask(
project = project,
gaml = gaml,
experiment = experiment,
finalStep = finalStep,
seed = seed,
frameRate = frameRate,
image = preparedImage,
memory = memory,
errorOnReturnValue = errorOnReturnValue,
returnValue = returnValue,
stdOut = stdOut,
stdErr = stdErr,
hostFiles = hostFiles,
environmentVariables = environmentVariables,
containerSystem = containerSystem,
config = InputOutputConfig(),
external = External(),
info = InfoConfig(),
mapped = MappedInputOutputConfig()
) set (
inputs += (seed.option.toSeq: _*),
outputs += (Seq(returnValue.option, stdOut.option, stdErr.option).flatten: _*)
)
}
def modifyInputXML(values: Map[String, String], finalStep: Int, seed: Long, frameRate: Option[Int]) = {
import xml._
import xml.transform._
def value(n: Node): Option[String] =
if (n.label != "Parameter") None
else n.attribute("var").flatMap(_.headOption.map(_.text)).flatMap(values.get) orElse n.attribute("name").flatMap(_.headOption.map(_.text)).flatMap(values.get)
val rewrite =
new RewriteRule {
override def transform(n: Node): Seq[Node] =
(n, value(n), frameRate) match {
case (n: Elem, Some(v), _) ⇒ n.copy(attributes = n.attributes.remove("value").append(Attribute(null, "value", v, Null)))
case (n: Elem, _, _) if n.label == "Simulation" =>
n.copy(attributes =
n.attributes
.remove("finalStep").append(Attribute(null, "finalStep", finalStep.toString, Null))
.remove("seed").append(Attribute(null, "seed", seed.toDouble.toString, Null))
)
case (n: Elem, _, Some(f)) if n.label == "Output" =>
n.copy(attributes = n.attributes.remove("framerate").append(Attribute(null, "framerate", f.toString, Null)))
case _ ⇒ n
}
}
new RuleTransformer(rewrite)
}
def acceptedOutputType(frame: Boolean): Seq[Manifest[_]] = {
def scalar =
Seq(
manifest[Double],
manifest[Int],
manifest[String],
manifest[Boolean]
)
if(!frame) scalar else scalar.map(_.arrayManifest)
}
}
@Lenses case class GAMATask(
project: File,
gaml: String,
experiment: String,
finalStep: FromContext[Int],
seed: OptionalArgument[Val[Long]],
frameRate: OptionalArgument[Int],
image: PreparedImage,
memory: OptionalArgument[Information],
errorOnReturnValue: Boolean,
returnValue: Option[Val[Int]],
stdOut: Option[Val[String]],
stdErr: Option[Val[String]],
hostFiles: Seq[HostFile],
environmentVariables: Seq[EnvironmentVariable],
containerSystem: ContainerSystem,
config: InputOutputConfig,
external: External,
info: InfoConfig,
mapped: MappedInputOutputConfig) extends Task with ValidateTask {
lazy val containerPoolKey = ContainerTask.newCacheKey
override def validate =
container.validateContainer(Vector(), environmentVariables, external) ++ finalStep.validate ++ {
import xml._
val inputXML = XML.loadFile(image.file / _root_.container.FlatImage.rootfsName / GAMATask.inputXML)
val parameters = (inputXML \\ "Simulation" \\ "Parameters" \\ "Parameter").collect { case x: Elem => x }
val outputs = (inputXML \\ "Simulation" \\ "Outputs" \\ "Output").collect { case x: Elem => x }
def gamaParameters = parameters.flatMap(e => e.attribute("var").flatMap(_.headOption).map(_.text)) ++ parameters.flatMap(e => e.attribute("name").flatMap(_.headOption).map(_.text))
def gamaParameterByName(name: String) =
parameters.filter(e => e.attribute("var").flatMap(_.headOption).map(_.text) == Some(name) || e.attribute("name").flatMap(_.headOption).map(_.text) == Some(name)).headOption
def gamaOutputs = outputs.flatMap(e => e.attribute("name").flatMap(_.headOption).map(_.text))
def gamaOutputByName(name: String) =
outputs.filter(e => e.attribute("name").flatMap(_.headOption).map(_.text) == Some(name)).headOption
def validateInputs = {
def typeMatch(v: Val[_], t: String) =
v match {
case Val.caseInt(v) => t == "INT" | t == "FLOAT"
case Val.caseDouble(v) => t == "INT" | t == "FLOAT"
case Val.caseString(v) => t == "STRING"
case Val.caseBoolean(v) => t == "BOOLEAN"
case _ => false
}
mapped.inputs.flatMap { m =>
gamaParameterByName(m.name) match {
case Some(p) =>
val gamaType = p.attribute("type").get.head.text
if(!typeMatch(m.v, gamaType)) Some(new UserBadDataError(s"""Type mismatch between mapped input ${m.v} and input "${m.name}" of type ${gamaType}.""")) else None
case None => Some(new UserBadDataError(s"""Mapped input "${m.name}" has not been found in the simulation among: ${gamaParameters.mkString(", ")}. Make sure it is defined in your gaml file"""))
}
}
}
def validateOutputs = {
val acceptedOutputsTypes = GAMATask.acceptedOutputType(frameRate.option.isDefined)
def accepted(c: Manifest[_]) = acceptedOutputsTypes.exists(t => t == c)
mapped.outputs.flatMap { m =>
gamaOutputByName(m.name) match {
case Some(_) => if(!accepted(m.v.`type`.manifest)) Some(new UserBadDataError(s"""Mapped output ${m} type is not supported (frameRate is ${frameRate.option.isDefined}, it implies that supported types are: ${acceptedOutputsTypes.mkString(", ")})""")) else None
case None => Some(new UserBadDataError(s"""Mapped output "${m.name}" has not been found in the simulation among: ${gamaOutputs.mkString(", ")}. Make sure it is defined in your gaml file."""))
}
}
}
if ((inputXML \\ "Simulation").isEmpty) Seq(new UserBadDataError(s"Experiment ${experiment} has not been found, make sure it is defined in your gaml file"))
else validateInputs ++ validateOutputs
}
override def process(executionContext: TaskExecutionContext) = FromContext { p ⇒
import p._
newFile.withTmpFile("inputs", ".xml") { inputFile ⇒
val seedValue = math.abs(seed.map(_.from(context)).getOrElse(random().nextLong))
def inputMap = mapped.inputs.map { m ⇒ m.name -> context(m.v).toString }.toMap
val inputXML = GAMATask.modifyInputXML(inputMap, finalStep.from(context), seedValue, frameRate.option).transform(XML.loadFile(image.file / _root_.container.FlatImage.rootfsName / GAMATask.inputXML))
def inputFileName = "/_inputs_openmole_.xml"
def outputDirectory = "/_output_"
inputFile.content =
s"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>${inputXML.mkString("")}"""
val (_, volumes) = GAMATask.volumes(project, gaml)
def launchCommand =
memory.option match {
case None => s"gama-headless -hpc 1 $inputFileName $outputDirectory"
case Some(m) => s"gama-headless -m ${m.toMegabytes.toLong}m -hpc 1 $inputFileName $outputDirectory"
}
newFile.withTmpDir { tmpOutputDirectory =>
def containerTask =
ContainerTask(
image = image,
command = launchCommand,
containerSystem = containerSystem,
workDirectory = None,
relativePathRoot = Some(GAMATask.workspaceDirectory),
errorOnReturnValue = errorOnReturnValue,
returnValue = returnValue,
hostFiles = hostFiles,
environmentVariables = environmentVariables,
reuseContainer = true,
stdOut = stdOut,
stdErr = stdErr,
config = config,
external = external,
info = info,
containerPoolKey = containerPoolKey) set(
resources += (inputFile, inputFileName, true),
volumes.map { case (lv, cv) ⇒ resources +=[ContainerTask](lv, cv, true) },
resources += (tmpOutputDirectory, outputDirectory, true)
)
val resultContext = containerTask.process(executionContext).from(context)
def gamaOutputFile =
tmpOutputDirectory.
listFilesSafe.
filter(f => f.isFile && f.getName.startsWith("simulation-outputs") && f.getName.endsWith(".xml")).
sortBy(_.getName).headOption.getOrElse(throw new InternalProcessingError(s"""GAMA result file (simulation-outputsXX.xml) has not been found, the content of the output folder is: [${tmpOutputDirectory.list.mkString(", ")}]"""))
(mapped.outputs.isEmpty, frameRate.option) match {
case (true, _) => resultContext
case (false, None) =>
import xml._
def toVariable(v: Val[_], value: String) =
v match {
case Val.caseInt(v) => Variable(v, value.toInt)
case Val.caseDouble(v) => Variable(v, value.toDouble)
case Val.caseString(v) => Variable(v, value)
case Val.caseBoolean(v) => Variable(v, value.toBoolean)
case _ => throw new UserBadDataError(s"Unsupported type of output variable $v (supported types are Int, Double, String, Boolean)")
}
val outputs = Map[String, Val[_]]() ++ mapped.outputs.map { m => (m.name, m.v) }
def outputValue(e: Elem) =
for {
a <- e.attribute("name").flatMap(_.headOption)
value <- outputs.get(a.text)
} yield toVariable(value, e.child.text)
def extractOutputs(n: Node) =
(n \\ "Variable").flatMap {
case e: Elem => outputValue(e)
case _ => None
}
val simulationOutput = XML.loadFile(gamaOutputFile) \\ "Step"
resultContext ++ extractOutputs(simulationOutput.last)
case (false, Some(f)) =>
import xml._
def toVariable(v: Val[_], value: Array[String]) =
v match {
case Val.caseArrayInt(v) => Variable(v, value.map(_.toInt))
case Val.caseArrayDouble(v) => Variable(v, value.map(_.toDouble))
case Val.caseArrayString(v) => Variable(v, value)
case Val.caseArrayBoolean(v) => Variable(v, value.map(_.toBoolean))
case _ => throw new UserBadDataError(s"Unsupported type of output variable $v (supported types are Array[Int], Array[Double], Array[String], Array[Boolean])")
}
def outputValue(e: Elem, name: String) =
for {
a <- e.attribute("name").flatMap(_.headOption)
if a.text == name
} yield e.child.text
val simulationOutput = XML.loadFile(gamaOutputFile) \\ "Step"
resultContext ++ mapped.outputs.map { m =>
val values =
for {
o <- simulationOutput
v <- o \\ "Variable"
} yield
v match {
case o: Elem => outputValue(o, m.name)
case _ => None
}
toVariable(m.v, values.flatten.toArray)
}
}
}
}
}
}
| openmole/openmole | openmole/plugins/org.openmole.plugin.task.gama/src/main/scala/org/openmole/plugin/task/gama/GAMATask.scala | Scala | agpl-3.0 | 16,475 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.mutation
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.types.ArrayType
import org.apache.spark.storage.StorageLevel
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datamap.Segment
import org.apache.carbondata.core.exception.ConcurrentOperationException
import org.apache.carbondata.core.features.TableOperation
import org.apache.carbondata.core.locks.{CarbonLockFactory, CarbonLockUtil, LockUsage}
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.mutate.CarbonUpdateUtil
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.events.{OperationContext, OperationListenerBus, UpdateTablePostEvent, UpdateTablePreEvent}
import org.apache.carbondata.processing.loading.FailureCauses
private[sql] case class CarbonProjectForUpdateCommand(
plan: LogicalPlan,
databaseNameOp: Option[String],
tableName: String,
columns: List[String])
extends DataCommand {
override def processData(sparkSession: SparkSession): Seq[Row] = {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
IUDCommonUtil.checkIfSegmentListIsSet(sparkSession, plan)
val res = plan find {
case relation: LogicalRelation if relation.relation
.isInstanceOf[CarbonDatasourceHadoopRelation] =>
true
case _ => false
}
if (res.isEmpty) {
return Seq.empty
}
val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession)
setAuditTable(carbonTable)
setAuditInfo(Map("plan" -> plan.simpleString))
columns.foreach { col =>
val dataType = carbonTable.getColumnByName(tableName, col).getColumnSchema.getDataType
if (dataType.isComplexType) {
throw new UnsupportedOperationException("Unsupported operation on Complex data type")
}
}
if (!carbonTable.getTableInfo.isTransactionalTable) {
throw new MalformedCarbonCommandException("Unsupported operation on non transactional table")
}
if (SegmentStatusManager.isCompactionInProgress(carbonTable)) {
throw new ConcurrentOperationException(carbonTable, "compaction", "data update")
}
if (SegmentStatusManager.isLoadInProgressInTable(carbonTable)) {
throw new ConcurrentOperationException(carbonTable, "loading", "data update")
}
if (!carbonTable.canAllow(carbonTable, TableOperation.UPDATE)) {
throw new MalformedCarbonCommandException(
"update operation is not supported for index datamap")
}
// trigger event for Update table
val operationContext = new OperationContext
val updateTablePreEvent: UpdateTablePreEvent =
UpdateTablePreEvent(sparkSession, carbonTable)
OperationListenerBus.getInstance.fireEvent(updateTablePreEvent, operationContext)
val metadataLock = CarbonLockFactory
.getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier,
LockUsage.METADATA_LOCK)
var lockStatus = false
// get the current time stamp which should be same for delete and update.
val currentTime = CarbonUpdateUtil.readCurrentTime
// var dataFrame: DataFrame = null
var dataSet: DataFrame = null
val isPersistEnabled = CarbonProperties.getInstance.isPersistUpdateDataset
try {
lockStatus = metadataLock.lockWithRetries()
if (lockStatus) {
logInfo("Successfully able to get the table metadata file lock")
}
else {
throw new Exception("Table is locked for updation. Please try after some time")
}
// Get RDD.
dataSet = if (isPersistEnabled) {
Dataset.ofRows(sparkSession, plan).persist(StorageLevel.fromString(
CarbonProperties.getInstance.getUpdateDatasetStorageLevel()))
}
else {
Dataset.ofRows(sparkSession, plan)
}
val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
// handle the clean up of IUD.
CarbonUpdateUtil.cleanUpDeltaFiles(carbonTable, false)
// do delete operation.
val segmentsToBeDeleted = DeleteExecution.deleteDeltaExecution(
databaseNameOp,
tableName,
sparkSession,
dataSet.rdd,
currentTime + "",
isUpdateOperation = true,
executionErrors)
if (executionErrors.failureCauses != FailureCauses.NONE) {
throw new Exception(executionErrors.errorMsg)
}
// do update operation.
performUpdate(dataSet,
databaseNameOp,
tableName,
plan,
sparkSession,
currentTime,
executionErrors,
segmentsToBeDeleted)
if (executionErrors.failureCauses != FailureCauses.NONE) {
throw new Exception(executionErrors.errorMsg)
}
// Do IUD Compaction.
HorizontalCompaction.tryHorizontalCompaction(
sparkSession, carbonTable, isUpdateOperation = true)
// trigger event for Update table
val updateTablePostEvent: UpdateTablePostEvent =
UpdateTablePostEvent(sparkSession, carbonTable)
OperationListenerBus.getInstance.fireEvent(updateTablePostEvent, operationContext)
} catch {
case e: HorizontalCompactionException =>
LOGGER.error(
"Update operation passed. Exception in Horizontal Compaction. Please check logs." + e)
// In case of failure , clean all related delta files
CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, e.compactionTimeStamp.toString)
case e: Exception =>
LOGGER.error("Exception in update operation", e)
// ****** start clean up.
// In case of failure , clean all related delete delta files
CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, currentTime + "")
// *****end clean up.
if (null != e.getMessage) {
sys.error("Update operation failed. " + e.getMessage)
}
if (null != e.getCause && null != e.getCause.getMessage) {
sys.error("Update operation failed. " + e.getCause.getMessage)
}
sys.error("Update operation failed. please check logs.")
}
finally {
if (null != dataSet && isPersistEnabled) {
dataSet.unpersist()
}
if (lockStatus) {
CarbonLockUtil.fileUnlock(metadataLock, LockUsage.METADATA_LOCK)
}
}
Seq.empty
}
private def performUpdate(
dataFrame: Dataset[Row],
databaseNameOp: Option[String],
tableName: String,
plan: LogicalPlan,
sparkSession: SparkSession,
currentTime: Long,
executorErrors: ExecutionErrors,
deletedSegments: Seq[Segment]): Unit = {
def isDestinationRelation(relation: CarbonDatasourceHadoopRelation): Boolean = {
val dbName = CarbonEnv.getDatabaseName(databaseNameOp)(sparkSession)
(databaseNameOp.isDefined &&
databaseNameOp.get == dbName &&
tableName == relation.identifier.getCarbonTableIdentifier.getTableName) ||
(tableName == relation.identifier.getCarbonTableIdentifier.getTableName)
}
// from the dataFrame schema iterate through all the column to be updated and
// check for the data type, if the data type is complex then throw exception
def checkForUnsupportedDataType(dataFrame: DataFrame): Unit = {
dataFrame.schema.foreach(col => {
// the new column to be updated will be appended with "-updatedColumn" suffix
if (col.name.endsWith(CarbonCommonConstants.UPDATED_COL_EXTENSION) &&
col.dataType.isInstanceOf[ArrayType]) {
throw new UnsupportedOperationException("Unsupported data type: Array")
}
})
}
def getHeader(relation: CarbonDatasourceHadoopRelation, plan: LogicalPlan): String = {
var header = ""
var found = false
plan match {
case Project(pList, _) if (!found) =>
found = true
header = pList
.filter(field => !field.name
.equalsIgnoreCase(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID))
.map(col => if (col.name.endsWith(CarbonCommonConstants.UPDATED_COL_EXTENSION)) {
col.name
.substring(0, col.name.lastIndexOf(CarbonCommonConstants.UPDATED_COL_EXTENSION))
}
else {
col.name
}).mkString(",")
}
header
}
// check for the data type of the new value to be updated
checkForUnsupportedDataType(dataFrame)
val ex = dataFrame.queryExecution.analyzed
val res = ex find {
case relation: LogicalRelation
if relation.relation.isInstanceOf[CarbonDatasourceHadoopRelation] &&
isDestinationRelation(relation.relation.asInstanceOf[CarbonDatasourceHadoopRelation]) =>
true
case _ => false
}
val carbonRelation: CarbonDatasourceHadoopRelation = res match {
case Some(relation: LogicalRelation) =>
relation.relation.asInstanceOf[CarbonDatasourceHadoopRelation]
case _ => sys.error("")
}
val updateTableModel = UpdateTableModel(true, currentTime, executorErrors, deletedSegments)
val header = getHeader(carbonRelation, plan)
CarbonLoadDataCommand(
Some(carbonRelation.identifier.getCarbonTableIdentifier.getDatabaseName),
carbonRelation.identifier.getCarbonTableIdentifier.getTableName,
null,
Seq(),
Map(("fileheader" -> header)),
false,
null,
Some(dataFrame),
Some(updateTableModel)).run(sparkSession)
executorErrors.errorMsg = updateTableModel.executorErrors.errorMsg
executorErrors.failureCauses = updateTableModel.executorErrors.failureCauses
Seq.empty
}
override protected def opName: String = "UPDATE DATA"
}
| manishgupta88/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala | Scala | apache-2.0 | 11,030 |
package model
import upickle._
case class LoanApplicationRegister(
asOfDate: Int,
respondentId: String,
agencyCode: Int,
loanType: Int,
propertyType: Int,
purpose: Int,
occupancy: Int,
amount: String,
actionType: Int,
msa: String,
state: String,
county: String,
tract: String,
fips: String,
reason1: String,
reason2: String,
reason3: String,
editStatus: String,
preApprovals: Int,
ethnicity: Int,
coEthnicity: Int,
race1: Int,
race2: String,
race3: String,
race4: String,
race5: String,
coRace1: Int,
coRace2: String,
coRace3: String,
coRace4: String,
coRace5: String,
sex: Int,
coSex: Int,
purchaserType: Int,
rateSpread: String,
hoepaStatus: Int,
lienStatus: Int,
sequenceNumber: String)
object LoanApplicationRegister {
implicit val larWriter = upickle.Writer[LoanApplicationRegister] {
case t => Js.Obj(
("asOfDate",Js.Num(t.asOfDate)),
("respondentId", Js.Str(t.respondentId)),
("agencyCode",Js.Num(t.agencyCode)),
("loanType",Js.Num(t.loanType)),
("propertyType",Js.Num(t.propertyType)),
("purpose",Js.Num(t.purpose)),
("occupancy",Js.Num(t.occupancy)),
("amount",Js.Str(t.amount)),
("actionType",Js.Num(t.actionType)),
("msa",Js.Str(t.msa)),
("state",Js.Str(t.state)),
("county",Js.Str(t.county)),
("tract",Js.Str(t.tract)),
("fips",Js.Str(t.fips)),
("reason1",Js.Str(t.reason1)),
("reason2",Js.Str(t.reason2)),
("reason3",Js.Str(t.reason3)),
("editStatus",Js.Str(t.editStatus)),
("preApprovals",Js.Num(t.preApprovals)),
("ethnicity",Js.Num(t.ethnicity)),
("coEthnicity",Js.Num(t.ethnicity)),
("race1",Js.Num(t.race1)),
("race2",Js.Str(t.race2)),
("race3",Js.Str(t.race3)),
("race4",Js.Str(t.race4)),
("race5",Js.Str(t.race5)),
("coRace1",Js.Num(t.coRace1)),
("coRace2",Js.Str(t.coRace2)),
("coRace3",Js.Str(t.coRace3)),
("coRace4",Js.Str(t.coRace4)),
("coRace5",Js.Str(t.coRace5)),
("sex",Js.Num(t.sex)),
("coSex",Js.Num(t.coSex)),
("purchaserType",Js.Num(t.purchaserType)),
("rateSpread",Js.Str(t.rateSpread)),
("hoepaStatus",Js.Num(t.hoepaStatus)),
("lienStatus",Js.Num(t.lienStatus)),
("sequenceNumber",Js.Str(t.sequenceNumber))
)
}
implicit val larReader = upickle.Reader[LoanApplicationRegister] {
case Js.Obj(
asOfDate,
respondentId,
agencyCode,
loanType,
propertyType,
purpose,
occupancy,
amount,
actionType,
msa,
state,
county,
tract,
fips,
reason1,
reason2,
reason3,
editStatus,
preApprovals,
ethnicity,
coEthnicity,
race1,
race2,
race3,
race4,
race5,
coRace1,
coRace2,
coRace3,
coRace4,
coRace5,
sex,
coSex,
purchaserType,
rateSpread,
hoepaStatus,
lienStatus,
sequenceNumber) =>
LoanApplicationRegister(
asOfDate._2.value.toString.toDouble.toInt,
respondentId._2.value.toString,
agencyCode._2.value.toString.toDouble.toInt,
loanType._2.value.toString.toDouble.toInt,
propertyType._2.value.toString.toDouble.toInt,
purpose._2.value.toString.toDouble.toInt,
occupancy._2.value.toString.toDouble.toInt,
amount._2.value.toString,
actionType._2.value.toString.toDouble.toInt,
msa._2.value.toString,
state._2.value.toString,
county._2.value.toString,
tract._2.value.toString,
fips._2.value.toString,
reason1._2.value.toString,
reason2._2.value.toString,
reason3._2.value.toString,
editStatus._2.value.toString,
preApprovals._2.value.toString.toDouble.toInt,
ethnicity._2.value.toString.toDouble.toInt,
coEthnicity._2.value.toString.toDouble.toInt,
race1._2.value.toString.toDouble.toInt,
race2._2.value.toString,
race3._2.value.toString,
race4._2.value.toString,
race5._2.value.toString,
coRace1._2.value.toString.toDouble.toInt,
coRace2._2.value.toString,
coRace3._2.value.toString,
coRace4._2.value.toString,
coRace5._2.value.toString,
sex._2.value.toString.toDouble.toInt,
coSex._2.value.toString.toDouble.toInt,
purchaserType._2.value.toString.toDouble.toInt,
rateSpread._2.value.toString,
hoepaStatus._2.value.toString.toDouble.toInt,
lienStatus._2.value.toString.toDouble.toInt,
sequenceNumber._2.value.toString
)
}
} | jmarin/hmdapub | hmdapub/shared/src/main/scala/model/LoanApplicationRegister.scala | Scala | apache-2.0 | 4,826 |
package layer.module
import layer.configuration.Project
/**
* ModuleCompactor.scala
*
* Project: DesignRecovery
* Copyright (c) 2011 Chanjin Park
* License - GNU LESSER GENERAL PUBLIC LICENSE v3.0 (LGPL v3.0)
*
*/
abstract class ModuleCompactor[S <: ModuleStructure, T <: ModuleNode] {
protected def getDirectUpperNode(n: T): T
protected def getDirectLowerNode(n: T): T
private def subsumeIncomingEdges(n1: T, n2: T) = {
n2.inedges.map(_.getSource).filter(_ != n1).forall(node => n1.inedges.map(_.getSource).contains(node))
}
/**
* Constraint for edges of LP node n : outedges(n).forall(e => e.getTarget.level <= n.level)
* Iceberg pattern condition: n is hidden by iceberg pattern <=>
* 0. n has direct upper node of same package, upn
* 1-1. n has no incoming edges except from direct upper node
* 1-2. n has incoming edges, (subsumed by upn) n.inedges.map(_.getSource).forAll(node => upn.inedges.map(_.getSource).contains(node))
* -. n's outedges do not affect hiding algorithm
**/
private def iceberg(n: T): Boolean = {
val lun = getDirectUpperNode(n)
if (lun == n) false
else {
if (n.inedges.map(_.getSource).filter(_ != lun).length == 0) true
else
subsumeIncomingEdges(lun, n)
}
}
/** Fall-through pattern condition: n is hidden by fall-through pattern<=>
* 0. n has greatest lower node of same package, gln
* 1. outedges(n).forall(e => e.getTarget.level <= gln.level) -- n's target nodes will become gln.target nodes
* 2-1. n has no incoming edges
* 2-2. n has incoming edges, (subsumed by gln) n.inedges.map(_.getSource).forAll(node => unn.inedges.map(_.getSource).contains(node)
* */
protected def satisfyLayerConstraint(n: T, gln: T): Boolean
private def fallthrough(n: T): Boolean = {
val gln = getDirectLowerNode(n)
if (gln == n) false
else {
if (!satisfyLayerConstraint(n, gln)) false
else {
if (n.inedges == 0) true
else subsumeIncomingEdges(gln, n)
}
}
}
protected def retains(key: String): Boolean
private def compact(ms: S): Map[T, T] = {
val ibpairs: Map[T, T] = ms.nodes.values.foldLeft(Map[T, T]())((result, n) => {
if (retains(n.value)) result
else {
val ln = n.asInstanceOf[T]
if (iceberg(ln)) result + (ln -> getDirectUpperNode(ln))
else result
}
})
def adjustPairs(pairs: Map[T, T]) = {
pairs.map(pair => {
var nTo: T = pair._2
while (pairs.contains(nTo)) nTo = pairs(nTo)
(pair._1, nTo)
})
}
// ib constraint: pair is increasing (1, 2) & (2, 3) => (1, 2, 3), but (1, 4) & (2, 3) impossible
// (1, 2) & (2, 3) & (3, 4) => (1, 3) & (2, 4) & (3, 4)
val newibpairs = adjustPairs(ibpairs)
val ftpairs: Map[T, T] = ms.nodes.values.foldLeft(Map[T, T]())((result, n) => {
if (retains(n.value)) result
else {
val ln = n.asInstanceOf[T]
if (fallthrough(ln)) result + (ln -> getDirectLowerNode(ln))
else result
}
})
// ft constraint: pair is decreasing (3, 2) & (2, 1) => (1, 2, 3), but (4, 1) & (3, 2) impossible ... =
val newftpairs = adjustPairs(ftpairs).foldLeft(Map[T, T]())((result, pair) => {
// newibparts' keys should not be included in its values
// condition:
assert(newibpairs.values.toList.forall(n => !newibpairs.contains(n)))
if (!newibpairs.contains(pair._2) && !newibpairs.contains(pair._1)) result + pair //&& !newibpairs.contains(pair._1)
else
result
})
print("iceburg compaction: ")
println(newibpairs.mkString(", "))
print("fallthrough compaction: ")
println(newftpairs.mkString(", "))
newftpairs.foldLeft(newibpairs)((result, pair) => {
assert(!newibpairs.contains(pair._1))
result + pair
})
}
protected def createNode(n: T): T
protected def createStructure(tdg: TypeStructure): S
private def merge(mergeList: Map[T, T], ms: S): S = {
val newlps = createStructure(ms.tdg) //new S(ms.tdg)
ms.nodes.values.foreach(n => {
val ln = n.asInstanceOf[T]
if (mergeList.contains(ln)) {
val lnTo = mergeList(ln)
val mergeTo = newlps.nodes.getOrElse(lnTo.value, newlps.addNode(createNode(lnTo)))
//newlps.addNode(new LayerModuleContainerNode(lnTo.value, lnTo.rank, lnTo.pkgname, List(lnTo))));
mergeTo.asInstanceOf[CompositeModuleNode[T]].addModule(ln)
} else {
if (!newlps.nodes.contains(ln.value))
newlps.addNode(createNode(ln)) // new LayerModuleContainerNode(ln.value, ln.rank, ln.pkgname, List(ln)))
}
})
newlps.nodes.values.foreach(n =>
newlps.addTypeNodes(n.asInstanceOf[T], n.asInstanceOf[CompositeModuleNode[T]].getTypeNodes));
newlps.liftEdges
assert(ms.mapT2M.size == newlps.mapT2M.size)
newlps
}
protected def reflectNewStructure(newstructure: S): Unit
def run(ms: S): S = {
reflectNewStructure(ms)
var cms = ms // compact module structure
var done = false
while (!done) {
val cms1 = merge(compact(cms), cms)
if (cms1.nodes.size == cms.nodes.size) done = true
else {
cms = cms1
reflectNewStructure(cms1)
}
}
cms
}
}
object ModuleCompactor {
def retainPackages = Project.get.retainPackages
}
| chanjin/DesignEvolution | src/main/scala/layer/module/ModuleCompactor.scala | Scala | apache-2.0 | 5,345 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference}
import scala.annotation.meta.param
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import scala.util.control.NonFatal
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.exceptions.TestFailedException
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config
import org.apache.spark.rdd.{DeterministicLevel, RDD}
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.shuffle.{FetchFailedException, MetadataFetchFailedException}
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, ThreadUtils, Utils}
class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler)
extends DAGSchedulerEventProcessLoop(dagScheduler) {
override def post(event: DAGSchedulerEvent): Unit = {
try {
// Forward event to `onReceive` directly to avoid processing event asynchronously.
onReceive(event)
} catch {
case NonFatal(e) => onError(e)
}
}
override def onError(e: Throwable): Unit = {
logError("Error in DAGSchedulerEventLoop: ", e)
dagScheduler.stop()
throw e
}
}
class MyCheckpointRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null,
indeterminate: Boolean = false)
extends MyRDD(sc, numPartitions, dependencies, locations, tracker, indeterminate) {
// Allow doCheckpoint() on this RDD.
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
Iterator.empty
}
/**
* An RDD for passing to DAGScheduler. These RDDs will use the dependencies and
* preferredLocations (if any) that are passed to them. They are deliberately not executable
* so we can test that DAGScheduler does not try to execute RDDs locally.
*
* Optionally, one can pass in a list of locations to use as preferred locations for each task,
* and a MapOutputTrackerMaster to enable reduce task locality. We pass the tracker separately
* because, in this test suite, it won't be the same as sc.env.mapOutputTracker.
*/
class MyRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null,
indeterminate: Boolean = false)
extends RDD[(Int, Int)](sc, dependencies) with Serializable {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = (0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
override protected def getOutputDeterministicLevel = {
if (indeterminate) DeterministicLevel.INDETERMINATE else super.getOutputDeterministicLevel
}
override def getPreferredLocations(partition: Partition): Seq[String] = {
if (locations.isDefinedAt(partition.index)) {
locations(partition.index)
} else if (tracker != null && dependencies.size == 1 &&
dependencies(0).isInstanceOf[ShuffleDependency[_, _, _]]) {
// If we have only one shuffle dependency, use the same code path as ShuffledRDD for locality
val dep = dependencies(0).asInstanceOf[ShuffleDependency[_, _, _]]
tracker.getPreferredLocationsForShuffle(dep, partition.index)
} else {
Nil
}
}
override def toString: String = "DAGSchedulerSuiteRDD " + id
}
class DAGSchedulerSuiteDummyException extends Exception
class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLimits {
import DAGSchedulerSuite._
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
val conf = new SparkConf
/** Set of TaskSets the DAGScheduler has requested executed. */
val taskSets = scala.collection.mutable.Buffer[TaskSet]()
/** Stages for which the DAGScheduler has called TaskScheduler.cancelTasks(). */
val cancelledStages = new HashSet[Int]()
val tasksMarkedAsCompleted = new ArrayBuffer[Task[_]]()
val taskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start() = {}
override def stop() = {}
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def submitTasks(taskSet: TaskSet) = {
// normally done by TaskSetManager
taskSet.tasks.foreach(_.epoch = mapOutputTracker.getEpoch)
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {
cancelledStages += stageId
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {
taskSets.filter(_.stageId == stageId).lastOption.foreach { ts =>
val tasks = ts.tasks.filter(_.partitionId == partitionId)
assert(tasks.length == 1)
tasksMarkedAsCompleted += tasks.head
}
}
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
/**
* Listeners which records some information to verify in UTs. Getter-kind methods in this class
* ensures the value is returned after ensuring there's no event to process, as well as the
* value is immutable: prevent showing odd result by race condition.
*/
class EventInfoRecordingListener extends SparkListener {
private val _submittedStageInfos = new HashSet[StageInfo]
private val _successfulStages = new HashSet[Int]
private val _failedStages = new ArrayBuffer[Int]
private val _stageByOrderOfExecution = new ArrayBuffer[Int]
private val _endedTasks = new HashSet[Long]
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = {
_submittedStageInfos += stageSubmitted.stageInfo
}
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = {
val stageInfo = stageCompleted.stageInfo
_stageByOrderOfExecution += stageInfo.stageId
if (stageInfo.failureReason.isEmpty) {
_successfulStages += stageInfo.stageId
} else {
_failedStages += stageInfo.stageId
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
_endedTasks += taskEnd.taskInfo.taskId
}
def submittedStageInfos: Set[StageInfo] = {
waitForListeners()
_submittedStageInfos.toSet
}
def successfulStages: Set[Int] = {
waitForListeners()
_successfulStages.toSet
}
def failedStages: List[Int] = {
waitForListeners()
_failedStages.toList
}
def stageByOrderOfExecution: List[Int] = {
waitForListeners()
_stageByOrderOfExecution.toList
}
def endedTasks: Set[Long] = {
waitForListeners()
_endedTasks.toSet
}
private def waitForListeners(): Unit = sc.listenerBus.waitUntilEmpty()
}
var sparkListener: EventInfoRecordingListener = null
var mapOutputTracker: MapOutputTrackerMaster = null
var broadcastManager: BroadcastManager = null
var securityMgr: SecurityManager = null
var scheduler: DAGScheduler = null
var dagEventProcessLoopTester: DAGSchedulerEventProcessLoop = null
/**
* Set of cache locations to return from our mock BlockManagerMaster.
* Keys are (rdd ID, partition ID). Anything not present will return an empty
* list of cache locations silently.
*/
val cacheLocations = new HashMap[(Int, Int), Seq[BlockManagerId]]
// stub out BlockManagerMaster.getLocations to use our cacheLocations
val blockManagerMaster = new BlockManagerMaster(null, null, conf, true) {
override def getLocations(blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = {
blockIds.map {
_.asRDDId.map(id => (id.rddId -> id.splitIndex)).flatMap(key => cacheLocations.get(key)).
getOrElse(Seq())
}.toIndexedSeq
}
override def removeExecutor(execId: String): Unit = {
// don't need to propagate to the driver, which we don't have
}
}
/** The list of results that DAGScheduler has collected. */
val results = new HashMap[Int, Any]()
var failure: Exception = _
val jobListener = new JobListener() {
override def taskSucceeded(index: Int, result: Any) = results.put(index, result)
override def jobFailed(exception: Exception) = { failure = exception }
}
/** A simple helper class for creating custom JobListeners */
class SimpleListener extends JobListener {
val results = new HashMap[Int, Any]
var failure: Exception = null
override def taskSucceeded(index: Int, result: Any): Unit = results.put(index, result)
override def jobFailed(exception: Exception): Unit = { failure = exception }
}
override def beforeEach(): Unit = {
super.beforeEach()
init(new SparkConf())
}
private def init(testConf: SparkConf): Unit = {
sc = new SparkContext("local[2]", "DAGSchedulerSuite", testConf)
sparkListener = new EventInfoRecordingListener
failure = null
sc.addSparkListener(sparkListener)
taskSets.clear()
tasksMarkedAsCompleted.clear()
cancelledStages.clear()
cacheLocations.clear()
results.clear()
securityMgr = new SecurityManager(conf)
broadcastManager = new BroadcastManager(true, conf, securityMgr)
mapOutputTracker = new MapOutputTrackerMaster(conf, broadcastManager, true) {
override def sendTracker(message: Any): Unit = {
// no-op, just so we can stop this to avoid leaking threads
}
}
scheduler = new DAGScheduler(
sc,
taskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(scheduler)
}
override def afterEach(): Unit = {
try {
scheduler.stop()
dagEventProcessLoopTester.stop()
mapOutputTracker.stop()
broadcastManager.stop()
} finally {
super.afterEach()
}
}
override def afterAll(): Unit = {
super.afterAll()
}
/**
* Type of RDD we use for testing. Note that we should never call the real RDD compute methods.
* This is a pair RDD type so it can always be used in ShuffleDependencies.
*/
type PairOfIntsRDD = RDD[(Int, Int)]
/**
* Process the supplied event as if it were the top of the DAGScheduler event queue, expecting
* the scheduler not to exit.
*
* After processing the event, submit waiting stages as is done on most iterations of the
* DAGScheduler event loop.
*/
private def runEvent(event: DAGSchedulerEvent): Unit = {
dagEventProcessLoopTester.post(event)
}
/**
* When we submit dummy Jobs, this is the compute function we supply. Except in a local test
* below, we do not expect this function to ever be executed; instead, we will return results
* directly through CompletionEvents.
*/
private val jobComputeFunc = (context: TaskContext, it: Iterator[(_)]) =>
it.next.asInstanceOf[Tuple2[_, _]]._1
/** Send the given CompletionEvent messages for the tasks in the TaskSet. */
private def complete(taskSet: TaskSet, results: Seq[(TaskEndReason, Any)]): Unit = {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(taskSet.tasks(i), result._1, result._2))
}
}
}
private def completeWithAccumulator(
accumId: Long,
taskSet: TaskSet,
results: Seq[(TaskEndReason, Any)]): Unit = {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(
taskSet.tasks(i),
result._1,
result._2,
Seq(AccumulatorSuite.createLongAccum("", initValue = 1, id = accumId))))
}
}
}
/** Submits a job to the scheduler and returns the job id. */
private def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc,
listener: JobListener = jobListener,
properties: Properties = null): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, func, partitions, CallSite("", ""), listener, properties))
jobId
}
/** Submits a map stage to the scheduler and returns the job id. */
private def submitMapStage(
shuffleDep: ShuffleDependency[_, _, _],
listener: JobListener = jobListener): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(MapStageSubmitted(jobId, shuffleDep, CallSite("", ""), listener))
jobId
}
/** Sends TaskSetFailed to the scheduler. */
private def failed(taskSet: TaskSet, message: String): Unit = {
runEvent(TaskSetFailed(taskSet, message, None))
}
/** Sends JobCancelled to the DAG scheduler. */
private def cancel(jobId: Int): Unit = {
runEvent(JobCancelled(jobId, None))
}
test("[SPARK-3353] parent stage should have lower stage id") {
sc.parallelize(1 to 10).map(x => (x, x)).reduceByKey(_ + _, 4).count()
val stageByOrderOfExecution = sparkListener.stageByOrderOfExecution
assert(stageByOrderOfExecution.length === 2)
assert(stageByOrderOfExecution(0) < stageByOrderOfExecution(1))
}
/**
* This test ensures that DAGScheduler build stage graph correctly.
*
* Suppose you have the following DAG:
*
* [A] <--(s_A)-- [B] <--(s_B)-- [C] <--(s_C)-- [D]
* \\ /
* <-------------
*
* Here, RDD B has a shuffle dependency on RDD A, and RDD C has shuffle dependency on both
* B and A. The shuffle dependency IDs are numbers in the DAGScheduler, but to make the example
* easier to understand, let's call the shuffled data from A shuffle dependency ID s_A and the
* shuffled data from B shuffle dependency ID s_B.
*
* Note: [] means an RDD, () means a shuffle dependency.
*/
test("[SPARK-13902] Ensure no duplicate stages are created") {
val rddA = new MyRDD(sc, 1, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val s_A = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 1, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val s_B = shuffleDepB.shuffleId
val rddC = new MyRDD(sc, 1, List(shuffleDepA, shuffleDepB), tracker = mapOutputTracker)
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val s_C = shuffleDepC.shuffleId
val rddD = new MyRDD(sc, 1, List(shuffleDepC), tracker = mapOutputTracker)
submit(rddD, Array(0))
assert(scheduler.shuffleIdToMapStage.size === 3)
assert(scheduler.activeJobs.size === 1)
val mapStageA = scheduler.shuffleIdToMapStage(s_A)
val mapStageB = scheduler.shuffleIdToMapStage(s_B)
val mapStageC = scheduler.shuffleIdToMapStage(s_C)
val finalStage = scheduler.activeJobs.head.finalStage
assert(mapStageA.parents.isEmpty)
assert(mapStageB.parents === List(mapStageA))
assert(mapStageC.parents === List(mapStageA, mapStageB))
assert(finalStage.parents === List(mapStageC))
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(3), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("All shuffle files on the slave should be cleaned up when slave lost") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_SERVICE_ENABLED.key, "true")
conf.set("spark.files.fetchFailure.unRegisterOutputOnHost", "true")
init(conf)
runEvent(ExecutorAdded("exec-hostA1", "hostA"))
runEvent(ExecutorAdded("exec-hostA2", "hostA"))
runEvent(ExecutorAdded("exec-hostB", "hostB"))
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(3))
val secondShuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// map stage1 completes successfully, with one task on each executor
complete(taskSets(0), Seq(
(Success,
MapStatus(
BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 5)),
(Success,
MapStatus(
BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 6)),
(Success, makeMapStatus("hostB", 1, mapTaskId = 7))
))
// map stage2 completes successfully, with one task on each executor
complete(taskSets(1), Seq(
(Success,
MapStatus(
BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 8)),
(Success,
MapStatus(
BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 9)),
(Success, makeMapStatus("hostB", 1, mapTaskId = 10))
))
// make sure our test setup is correct
val initialMapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus1.count(_ != null) === 3)
assert(initialMapStatus1.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
assert(initialMapStatus1.map{_.mapId}.toSet === Set(5, 6, 7))
val initialMapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus2.count(_ != null) === 3)
assert(initialMapStatus2.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
assert(initialMapStatus2.map{_.mapId}.toSet === Set(8, 9, 10))
// reduce stage fails with a fetch failure from one host
complete(taskSets(2), Seq(
(FetchFailed(BlockManagerId("exec-hostA2", "hostA", 12345),
firstShuffleId, 0L, 0, 0, "ignored"),
null)
))
// Here is the main assertion -- make sure that we de-register
// the map outputs for both map stage from both executors on hostA
val mapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
assert(mapStatus1.count(_ != null) === 1)
assert(mapStatus1(2).location.executorId === "exec-hostB")
assert(mapStatus1(2).location.host === "hostB")
val mapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
assert(mapStatus2.count(_ != null) === 1)
assert(mapStatus2(2).location.executorId === "exec-hostB")
assert(mapStatus2(2).location.host === "hostB")
}
test("zero split job") {
var numResults = 0
var failureReason: Option[Exception] = None
val fakeListener = new JobListener() {
override def taskSucceeded(partition: Int, value: Any): Unit = numResults += 1
override def jobFailed(exception: Exception): Unit = {
failureReason = Some(exception)
}
}
val jobId = submit(new MyRDD(sc, 0, Nil), Array(), listener = fakeListener)
assert(numResults === 0)
cancel(jobId)
assert(failureReason.isDefined)
assert(failureReason.get.getMessage() === "Job 0 cancelled ")
}
test("run trivial job") {
submit(new MyRDD(sc, 1, Nil), Array(0))
complete(taskSets(0), List((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial job w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0))
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("equals and hashCode AccumulableInfo") {
val accInfo1 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = true, countFailedValues = false)
val accInfo2 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
val accInfo3 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
assert(accInfo1 !== accInfo2)
assert(accInfo2 === accInfo3)
assert(accInfo2.hashCode() === accInfo3.hashCode())
}
test("cache location preferences w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil).cache()
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
cacheLocations(baseRdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(finalRdd, Array(0))
val taskSet = taskSets(0)
assertLocations(taskSet, Seq(Seq("hostA", "hostB")))
complete(taskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("regression test for getCacheLocs") {
val rdd = new MyRDD(sc, 3, Nil).cache()
cacheLocations(rdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
cacheLocations(rdd.id -> 1) =
Seq(makeBlockManagerId("hostB"), makeBlockManagerId("hostC"))
cacheLocations(rdd.id -> 2) =
Seq(makeBlockManagerId("hostC"), makeBlockManagerId("hostD"))
val locs = scheduler.getCacheLocs(rdd).map(_.map(_.host))
assert(locs === Seq(Seq("hostA", "hostB"), Seq("hostB", "hostC"), Seq("hostC", "hostD")))
}
/**
* This test ensures that if a particular RDD is cached, RDDs earlier in the dependency chain
* are not computed. It constructs the following chain of dependencies:
* +---+ shuffle +---+ +---+ +---+
* | A |<--------| B |<---| C |<---| D |
* +---+ +---+ +---+ +---+
* Here, B is derived from A by performing a shuffle, C has a one-to-one dependency on B,
* and D similarly has a one-to-one dependency on C. If none of the RDDs were cached, this
* set of RDDs would result in a two stage job: one ShuffleMapStage, and a ResultStage that
* reads the shuffled data from RDD A. This test ensures that if C is cached, the scheduler
* doesn't perform a shuffle, and instead computes the result using a single ResultStage
* that reads C's cached data.
*/
test("getMissingParentStages should consider all ancestor RDDs' cache statuses") {
val rddA = new MyRDD(sc, 1, Nil)
val rddB = new MyRDD(sc, 1, List(new ShuffleDependency(rddA, new HashPartitioner(1))),
tracker = mapOutputTracker)
val rddC = new MyRDD(sc, 1, List(new OneToOneDependency(rddB))).cache()
val rddD = new MyRDD(sc, 1, List(new OneToOneDependency(rddC)))
cacheLocations(rddC.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(rddD, Array(0))
assert(scheduler.runningStages.size === 1)
// Make sure that the scheduler is running the final result stage.
// Because C is cached, the shuffle map stage to compute A does not need to be run.
assert(scheduler.runningStages.head.isInstanceOf[ResultStage])
}
test("avoid exponential blowup when getting preferred locs list") {
// Build up a complex dependency graph with repeated zip operations, without preferred locations
var rdd: RDD[_] = new MyRDD(sc, 1, Nil)
(1 to 30).foreach(_ => rdd = rdd.zip(rdd))
// getPreferredLocs runs quickly, indicating that exponential graph traversal is avoided.
failAfter(10.seconds) {
val preferredLocs = scheduler.getPreferredLocs(rdd, 0)
// No preferred locations are returned.
assert(preferredLocs.length === 0)
}
}
test("unserializable task") {
val unserializableRdd = new MyRDD(sc, 1, Nil) {
class UnserializableClass
val unserializable = new UnserializableClass
}
submit(unserializableRdd, Array(0))
assert(failure.getMessage.startsWith(
"Job aborted due to stage failure: Task not serializable:"))
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("trivial job failure") {
submit(new MyRDD(sc, 1, Nil), Array(0))
failed(taskSets(0), "some failure")
assert(failure.getMessage === "Job aborted due to stage failure: some failure")
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("trivial job cancellation") {
val rdd = new MyRDD(sc, 1, Nil)
val jobId = submit(rdd, Array(0))
cancel(jobId)
assert(failure.getMessage === s"Job $jobId cancelled ")
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("job cancellation no-kill backend") {
// make sure that the DAGScheduler doesn't crash when the TaskScheduler
// doesn't implement killTask()
val noKillTaskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {
throw new UnsupportedOperationException
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = {
throw new UnsupportedOperationException
}
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {
throw new UnsupportedOperationException
}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {
throw new UnsupportedOperationException
}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
val noKillScheduler = new DAGScheduler(
sc,
noKillTaskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(noKillScheduler)
val jobId = submit(new MyRDD(sc, 1, Nil), Array(0))
cancel(jobId)
// Because the job wasn't actually cancelled, we shouldn't have received a failure message.
assert(failure === null)
// When the task set completes normally, state should be correctly updated.
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
assert(sparkListener.failedStages.isEmpty)
assert(sparkListener.successfulStages.contains(0))
}
test("run trivial shuffle") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
complete(taskSets(1), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial shuffle with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// the 2nd ResultTask failed
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"), null)))
// this will get called
// blockManagerMaster.removeExecutor("exec-hostA")
// ask the scheduler to try it again
scheduler.resubmitFailedStages()
// have the 2nd attempt pass
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
// we can see both result blocks now
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
private val shuffleFileLossTests = Seq(
("slave lost with shuffle service", SlaveLost("", false), true, false),
("worker lost with shuffle service", SlaveLost("", true), true, true),
("worker lost without shuffle service", SlaveLost("", true), false, true),
("executor failure with shuffle service", ExecutorKilled, true, false),
("executor failure without shuffle service", ExecutorKilled, false, true))
for ((eventDescription, event, shuffleServiceOn, expectFileLoss) <- shuffleFileLossTests) {
val maybeLost = if (expectFileLoss) {
"lost"
} else {
"not lost"
}
test(s"shuffle files $maybeLost when $eventDescription") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_SERVICE_ENABLED.key, shuffleServiceOn.toString)
init(conf)
assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
runEvent(ExecutorLost("exec-hostA", event))
if (expectFileLoss) {
intercept[MetadataFetchFailedException] {
mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
}
} else {
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
}
}
}
test("SPARK-28967 properties must be cloned before posting to listener bus for 0 partition") {
val properties = new Properties()
val func = (context: TaskContext, it: Iterator[(_)]) => 1
val resultHandler = (taskIndex: Int, result: Int) => {}
val assertionError = new AtomicReference[TestFailedException](
new TestFailedException("Listener didn't receive expected JobStart event", 0))
val listener = new SparkListener() {
override def onJobStart(event: SparkListenerJobStart): Unit = {
try {
// spark.job.description can be implicitly set for 0 partition jobs.
// So event.properties and properties can be different. See SPARK-29997.
event.properties.remove(SparkContext.SPARK_JOB_DESCRIPTION)
properties.remove(SparkContext.SPARK_JOB_DESCRIPTION)
assert(event.properties.equals(properties), "Expected same content of properties, " +
s"but got properties with different content. props in caller ${properties} /" +
s" props in event ${event.properties}")
assert(event.properties.ne(properties), "Expected instance with different identity, " +
"but got same instance.")
assertionError.set(null)
} catch {
case e: TestFailedException => assertionError.set(e)
}
}
}
sc.addSparkListener(listener)
// 0 partition
val testRdd = new MyRDD(sc, 0, Nil)
val waiter = scheduler.submitJob(testRdd, func, Seq.empty, CallSite.empty,
resultHandler, properties)
sc.listenerBus.waitUntilEmpty()
assert(assertionError.get() === null)
}
// Helper function to validate state when creating tests for task failures
private def checkStageId(stageId: Int, attempt: Int, stageAttempt: TaskSet): Unit = {
assert(stageAttempt.stageId === stageId)
assert(stageAttempt.stageAttemptId == attempt)
}
// Helper functions to extract commonly used code in Fetch Failure test cases
private def setupStageAbortTest(sc: SparkContext): Unit = {
sc.listenerBus.addToSharedQueue(new EndListener())
ended = false
jobResult = null
}
// Create a new Listener to confirm that the listenerBus sees the JobEnd message
// when we abort the stage. This message will also be consumed by the EventLoggingListener
// so this will propagate up to the user.
var ended = false
var jobResult : JobResult = null
class EndListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
jobResult = jobEnd.jobResult
ended = true
}
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* successfully.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param numShufflePartitions - The number of partitions in the next stage
*/
private def completeShuffleMapStageSuccessfully(
stageId: Int,
attemptIdx: Int,
numShufflePartitions: Int): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map {
case (task, idx) =>
(Success, makeMapStatus("host" + ('A' + idx).toChar, numShufflePartitions))
}.toSeq)
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* with all FetchFailure.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param shuffleDep - The shuffle dependency of the stage with a fetch failure
*/
private def completeNextStageWithFetchFailure(
stageId: Int,
attemptIdx: Int,
shuffleDep: ShuffleDependency[_, _, _]): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep.shuffleId, 0L, 0, idx, "ignored"), null)
}.toSeq)
}
/**
* Common code to get the next result stage attempt, confirm it's the one we expect, and
* complete it with a success where we return 42.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
*/
private def completeNextResultStageWithSuccess(
stageId: Int,
attemptIdx: Int,
partitionToResult: Int => Int = _ => 42): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
assert(scheduler.stageIdToStage(stageId).isInstanceOf[ResultStage])
val taskResults = stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(Success, partitionToResult(idx))
}
complete(stageAttempt, taskResults.toSeq)
}
/**
* In this test, we simulate a job where many tasks in the same stage fail. We want to show
* that many fetch failures inside a single stage attempt do not trigger an abort
* on their own, but only when there are enough failing stage attempts.
*/
test("Single stage fetch failure should not abort the stage.") {
setupStageAbortTest(sc)
val parts = 8
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
completeShuffleMapStageSuccessfully(0, 0, numShufflePartitions = parts)
completeNextStageWithFetchFailure(1, 0, shuffleDep)
// Resubmit and confirm that now all is well
scheduler.resubmitFailedStages()
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Complete stage 0 and then stage 1 with a "42"
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = parts)
completeNextResultStageWithSuccess(1, 1)
// Confirm job finished successfully
sc.listenerBus.waitUntilEmpty()
assert(ended)
assert(results === (0 until parts).map { idx => idx -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* In this test we simulate a job failure where the first stage completes successfully and
* the second stage fails due to a fetch failure. Multiple successive fetch failures of a stage
* trigger an overall job abort to avoid endless retries.
*/
test("Multiple consecutive stage fetch failures should lead to job being aborted.") {
setupStageAbortTest(sc)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDep)
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
if (attempt < scheduler.maxConsecutiveStageAttempts - 1) {
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
} else {
// Stage should have been aborted and removed from running stages
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty()
assert(ended)
jobResult match {
case JobFailed(reason) =>
assert(reason.getMessage.contains("ResultStage 1 () has failed the maximum"))
case other => fail(s"expected JobFailed, not $other")
}
}
}
}
/**
* In this test, we create a job with two consecutive shuffles, and simulate 2 failures for each
* shuffle fetch. In total In total, the job has had four failures overall but not four failures
* for a particular stage, and as such should not be aborted.
*/
test("Failures in different stages should not trigger an overall abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// In the first two iterations, Stage 0 succeeds and stage 1 fails. In the next two iterations,
// stage 2 fails.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
if (attempt < scheduler.maxConsecutiveStageAttempts / 2) {
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
} else {
completeShuffleMapStageSuccessfully(1, attempt, numShufflePartitions = 1)
// Fail stage 2
completeNextStageWithFetchFailure(2,
attempt - scheduler.maxConsecutiveStageAttempts / 2, shuffleDepTwo)
}
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
}
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 4, numShufflePartitions = 1)
// Succeed stage2 with a "42"
completeNextResultStageWithSuccess(2, scheduler.maxConsecutiveStageAttempts / 2)
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
/**
* In this test we demonstrate that only consecutive failures trigger a stage abort. A stage may
* fail multiple times, succeed, then fail a few more times (because its run again by downstream
* dependencies). The total number of failed attempts for one stage will go over the limit,
* but that doesn't matter, since they have successes in the middle.
*/
test("Non-consecutive stage failures don't trigger abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// First, execute stages 0 and 1, failing stage 1 up to MAX-1 times.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts - 1) {
// Make each task in stage 0 success
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
}
// Rerun stage 0 and 1 to step through the task set
completeShuffleMapStageSuccessfully(0, 3, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 3, numShufflePartitions = 1)
// Fail stage 2 so that stage 1 is resubmitted when we call scheduler.resubmitFailedStages()
completeNextStageWithFetchFailure(2, 0, shuffleDepTwo)
scheduler.resubmitFailedStages()
// Rerun stage 0 to step through the task set
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
// Now again, fail stage 1 (up to MAX_FAILURES) but confirm that this doesn't trigger an abort
// since we succeeded in between.
completeNextStageWithFetchFailure(1, 4, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Next, succeed all and confirm output
// Rerun stage 0 + 1
completeShuffleMapStageSuccessfully(0, 5, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 5, numShufflePartitions = 1)
// Succeed stage 2 and verify results
completeNextResultStageWithSuccess(2, 1)
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty()
assert(ended)
assert(results === Map(0 -> 42))
}
test("trivial shuffle with multiple fetch failures") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(sparkListener.failedStages.contains(1))
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 1L, 1, 1, "ignored"),
null))
// The SparkListener should not receive redundant failure events.
assert(sparkListener.failedStages.size === 1)
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 1)
sc.listenerBus.waitUntilEmpty()
assertDataStructuresEmpty()
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(1)))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 0)
sc.listenerBus.waitUntilEmpty()
assertDataStructuresEmpty()
}
test("Fail the job if a barrier ResultTask failed") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
.barrier()
.mapPartitions(iter => iter)
submit(reduceRdd, Array(0, 1))
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first ResultTask fails
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
TaskKilled("test"),
null))
// Assert the stage has been cancelled.
sc.listenerBus.waitUntilEmpty()
assert(failure.getMessage.startsWith("Job aborted due to stage failure: Could not recover " +
"from a failed barrier ResultStage."))
}
/**
* This tests the case where another FetchFailed comes in while the map stage is getting
* re-run.
*/
test("late fetch failures don't cause multiple concurrent attempts for the same map stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(sparkListener.failedStages.contains(1))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1L, 1, 1, "ignored"),
null))
// Another ResubmitFailedStages event should not result in another attempt for the map
// stage being run concurrently.
// NOTE: the actual ResubmitFailedStages may get called at any time during this, but it
// shouldn't effect anything -- our calling it just makes *SURE* it gets called between the
// desired event and our check.
runEvent(ResubmitFailedStages)
assert(countSubmittedMapStageAttempts() === 2)
}
/**
* This tests the case where a late FetchFailed comes in after the map stage has finished getting
* retried and a new reduce stage starts running.
*/
test("extremely late fetch failures don't cause multiple concurrent attempts for " +
"the same stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
def countSubmittedReduceStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 1)
}
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 0)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The reduce stage should have been submitted.
assert(countSubmittedReduceStageAttempts() === 1)
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
// Trigger resubmission of the failed map stage and finish the re-started map task.
runEvent(ResubmitFailedStages)
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
// Because the map stage finished, another attempt for the reduce stage should have been
// submitted, resulting in 2 total attempts for each the map and the reduce stage.
assert(countSubmittedMapStageAttempts() === 2)
assert(countSubmittedReduceStageAttempts() === 2)
// A late FetchFailed arrives from the second task in the original reduce stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1L, 1, 1, "ignored"),
null))
// Running ResubmitFailedStages shouldn't result in any more attempts for the map stage, because
// the FetchFailed should have been ignored
runEvent(ResubmitFailedStages)
// The FetchFailed from the original reduce stage should be ignored.
assert(countSubmittedMapStageAttempts() === 2)
}
test("task events always posted in speculation / when stage is killed") {
val baseRdd = new MyRDD(sc, 4, Nil)
val finalRdd = new MyRDD(sc, 4, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0, 1, 2, 3))
// complete two tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(1)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
assert(sparkListener.endedTasks.size === 2)
// finish other 2 tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(2)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(3)))
assert(sparkListener.endedTasks.size === 4)
// verify the stage is done
assert(!scheduler.stageIdToStage.contains(0))
// Stage should be complete. Finish one other Successful task to simulate what can happen
// with a speculative task and make sure the event is sent out
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(5)))
assert(sparkListener.endedTasks.size === 5)
// make sure non successful tasks also send out event
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), UnknownReason, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(6)))
assert(sparkListener.endedTasks.size === 6)
}
test("ignore late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// pretend we were told hostA went away
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// now start completing some tasks in the shuffle map stage, under different hosts
// and epochs, and make sure scheduler updates its state correctly
val taskSet = taskSets(0)
val shuffleStage = scheduler.stageIdToStage(taskSet.stageId).asInstanceOf[ShuffleMapStage]
assert(shuffleStage.numAvailableOutputs === 0)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 0)
// should work because it's a non-failed host (so the available map outputs will increase)
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostB", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should work because it's a new epoch, which will increase the number of available map
// outputs, and also finish the stage
taskSet.tasks(1).epoch = newEpoch
runEvent(makeCompletionEvent(
taskSet.tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 2)
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostA")))
// finish the next stage normally, which completes the job
complete(taskSets(1), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
test("run shuffle with map stage failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// Fail the map stage. This should cause the entire job to fail.
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(failure.getMessage === s"Job aborted due to stage failure: $stageFailureMessage")
// Listener bus should get told about the map stage failing, but not the reduce stage
// (since the reduce stage hasn't been started yet).
assert(sparkListener.failedStages.toSet === Set(0))
assertDataStructuresEmpty()
}
/**
* Run two jobs, with a shared dependency. We simulate a fetch failure in the second job, which
* requires regenerating some outputs of the shared dependency. One key aspect of this test is
* that the second job actually uses a different stage for the shared dependency (a "skipped"
* stage).
*/
test("shuffle fetch failure in a reused shuffle dependency") {
// Run the first job successfully, which creates one shuffle dependency
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(0, 0, 2)
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42, 1 -> 42))
assertDataStructuresEmpty()
// submit another job w/ the shared dependency, and have a fetch failure
val reduce2 = new MyRDD(sc, 2, List(shuffleDep))
submit(reduce2, Array(0, 1))
// Note that the stage numbering here is only b/c the shared dependency produces a new, skipped
// stage. If instead it reused the existing stage, then this would be stage 2
completeNextStageWithFetchFailure(3, 0, shuffleDep)
scheduler.resubmitFailedStages()
// the scheduler now creates a new task set to regenerate the missing map output, but this time
// using a different stage, the "skipped" one
// SPARK-9809 -- this stage is submitted without a task for each partition (because some of
// the shuffle map output is still available from stage 0); make sure we've still got internal
// accumulators setup
assert(scheduler.stageIdToStage(2).latestInfo.taskMetrics != null)
completeShuffleMapStageSuccessfully(2, 0, 2)
completeNextResultStageWithSuccess(3, 1, idx => idx + 1234)
assert(results === Map(0 -> 1234, 1 -> 1235))
assertDataStructuresEmpty()
}
/**
* This test runs a three stage job, with a fetch failure in stage 1. but during the retry, we
* have completions from both the first & second attempt of stage 1. So all the map output is
* available before we finish any task set for stage 1. We want to make sure that we don't
* submit stage 2 until the map output for stage 1 is registered
*/
test("don't submit stage until its dependencies map outputs are registered (SPARK-5259)") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// things start out smoothly, stage 0 completes with no issues
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostA", shuffleMapRdd.partitions.length))
))
// then one executor dies, and a task fails in stage 1
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(null, firstShuffleId, 2L, 2, 0, "Fetch failed"),
null))
// so we resubmit stage 0, which completes happily
scheduler.resubmitFailedStages()
val stage0Resubmit = taskSets(2)
assert(stage0Resubmit.stageId == 0)
assert(stage0Resubmit.stageAttemptId === 1)
val task = stage0Resubmit.tasks(0)
assert(task.partitionId === 2)
runEvent(makeCompletionEvent(
task,
Success,
makeMapStatus("hostC", shuffleMapRdd.partitions.length)))
// now here is where things get tricky : we will now have a task set representing
// the second attempt for stage 1, but we *also* have some tasks for the first attempt for
// stage 1 still going
val stage1Resubmit = taskSets(3)
assert(stage1Resubmit.stageId == 1)
assert(stage1Resubmit.stageAttemptId === 1)
assert(stage1Resubmit.tasks.length === 3)
// we'll have some tasks finish from the first attempt, and some finish from the second attempt,
// so that we actually have all stage outputs, though no attempt has completed all its
// tasks
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(3).tasks(1),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
// late task finish from the first attempt
runEvent(makeCompletionEvent(
taskSets(1).tasks(2),
Success,
makeMapStatus("hostB", reduceRdd.partitions.length)))
// What should happen now is that we submit stage 2. However, we might not see an error
// b/c of DAGScheduler's error handling (it tends to swallow errors and just log them). But
// we can check some conditions.
// Note that the really important thing here is not so much that we submit stage 2 *immediately*
// but that we don't end up with some error from these interleaved completions. It would also
// be OK (though sub-optimal) if stage 2 simply waited until the resubmission of stage 1 had
// all its tasks complete
// check that we have all the map output for stage 0 (it should have been there even before
// the last round of completions from stage 1, but just to double check it hasn't been messed
// up) and also the newly available stage 1
val stageToReduceIdxs = Seq(
0 -> (0 until 3),
1 -> (0 until 1)
)
for {
(stage, reduceIdxs) <- stageToReduceIdxs
reduceIdx <- reduceIdxs
} {
// this would throw an exception if the map status hadn't been registered
val statuses = mapOutputTracker.getMapSizesByExecutorId(stage, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 2 has been submitted
assert(taskSets.size == 5)
val stage2TaskSet = taskSets(4)
assert(stage2TaskSet.stageId == 2)
assert(stage2TaskSet.stageAttemptId == 0)
}
/**
* We lose an executor after completing some shuffle map tasks on it. Those tasks get
* resubmitted, and when they finish the job completes normally
*/
test("register map outputs correctly after ExecutorLost and task Resubmitted") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 5, List(firstShuffleDep))
submit(reduceRdd, Array(0))
// complete some of the tasks from the first stage, on one host
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
// now that host goes down
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
// so we resubmit those tasks
runEvent(makeCompletionEvent(taskSets(0).tasks(0), Resubmitted, null))
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Resubmitted, null))
// now complete everything on a different host
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))
))
// now we should submit stage 1, and the map output from stage 0 should be registered
// check that we have all the map output for stage 0
(0 until reduceRdd.partitions.length).foreach { reduceIdx =>
val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 1 has been submitted
assert(taskSets.size == 2)
val stage1TaskSet = taskSets(1)
assert(stage1TaskSet.stageId == 1)
assert(stage1TaskSet.stageAttemptId == 0)
}
/**
* Makes sure that failures of stage used by multiple jobs are correctly handled.
*
* This test creates the following dependency graph:
*
* shuffleMapRdd1 shuffleMapRDD2
* | \\ |
* | \\ |
* | \\ |
* | \\ |
* reduceRdd1 reduceRdd2
*
* We start both shuffleMapRdds and then fail shuffleMapRdd1. As a result, the job listeners for
* reduceRdd1 and reduceRdd2 should both be informed that the job failed. shuffleMapRDD2 should
* also be cancelled, because it is only used by reduceRdd2 and reduceRdd2 cannot complete
* without shuffleMapRdd1.
*/
test("failure of stage used by two jobs") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, Nil)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val reduceRdd1 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val reduceRdd2 = new MyRDD(sc, 2, List(shuffleDep1, shuffleDep2), tracker = mapOutputTracker)
// We need to make our own listeners for this test, since by default submit uses the same
// listener for all jobs, and here we want to capture the failure for each job separately.
class FailureRecordingJobListener() extends JobListener {
var failureMessage: String = _
override def taskSucceeded(index: Int, result: Any): Unit = {}
override def jobFailed(exception: Exception): Unit = { failureMessage = exception.getMessage }
}
val listener1 = new FailureRecordingJobListener()
val listener2 = new FailureRecordingJobListener()
submit(reduceRdd1, Array(0, 1), listener = listener1)
submit(reduceRdd2, Array(0, 1), listener = listener2)
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(cancelledStages.toSet === Set(0, 2))
// Make sure the listeners got told about both failed stages.
assert(sparkListener.successfulStages.isEmpty)
assert(sparkListener.failedStages.toSet === Set(0, 2))
assert(listener1.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assert(listener2.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assertDataStructuresEmpty()
}
def checkJobPropertiesAndPriority(taskSet: TaskSet, expected: String, priority: Int): Unit = {
assert(taskSet.properties != null)
assert(taskSet.properties.getProperty("testProperty") === expected)
assert(taskSet.priority === priority)
}
def launchJobsThatShareStageAndCancelFirst(): ShuffleDependency[Int, Int, Nothing] = {
val baseRdd = new MyRDD(sc, 1, Nil)
val shuffleDep1 = new ShuffleDependency(baseRdd, new HashPartitioner(1))
val intermediateRdd = new MyRDD(sc, 1, List(shuffleDep1))
val shuffleDep2 = new ShuffleDependency(intermediateRdd, new HashPartitioner(1))
val finalRdd1 = new MyRDD(sc, 1, List(shuffleDep2))
val finalRdd2 = new MyRDD(sc, 1, List(shuffleDep2))
val job1Properties = new Properties()
val job2Properties = new Properties()
job1Properties.setProperty("testProperty", "job1")
job2Properties.setProperty("testProperty", "job2")
// Run jobs 1 & 2, both referencing the same stage, then cancel job1.
// Note that we have to submit job2 before we cancel job1 to have them actually share
// *Stages*, and not just shuffle dependencies, due to skipped stages (at least until
// we address SPARK-10193.)
val jobId1 = submit(finalRdd1, Array(0), properties = job1Properties)
val jobId2 = submit(finalRdd2, Array(0), properties = job2Properties)
assert(scheduler.activeJobs.nonEmpty)
val testProperty1 = scheduler.jobIdToActiveJob(jobId1).properties.getProperty("testProperty")
// remove job1 as an ActiveJob
cancel(jobId1)
// job2 should still be running
assert(scheduler.activeJobs.nonEmpty)
val testProperty2 = scheduler.jobIdToActiveJob(jobId2).properties.getProperty("testProperty")
assert(testProperty1 != testProperty2)
// NB: This next assert isn't necessarily the "desired" behavior; it's just to document
// the current behavior. We've already submitted the TaskSet for stage 0 based on job1, but
// even though we have cancelled that job and are now running it because of job2, we haven't
// updated the TaskSet's properties. Changing the properties to "job2" is likely the more
// correct behavior.
val job1Id = 0 // TaskSet priority for Stages run with "job1" as the ActiveJob
checkJobPropertiesAndPriority(taskSets(0), "job1", job1Id)
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
shuffleDep1
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active
*/
test("stage used by two jobs, the first no longer active (SPARK-6880)") {
launchJobsThatShareStageAndCancelFirst()
// The next check is the key for SPARK-6880. For the stage which was shared by both job1 and
// job2 but never had any tasks submitted for job1, the properties of job2 are now used to run
// the stage.
checkJobPropertiesAndPriority(taskSets(1), "job2", 1)
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
assert(taskSets(2).properties != null)
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active, even when
* there are fetch failures
*/
test("stage used by two jobs, some fetch failures, and the first job no longer active " +
"(SPARK-6880)") {
val shuffleDep1 = launchJobsThatShareStageAndCancelFirst()
val job2Id = 1 // TaskSet priority for Stages run with "job2" as the ActiveJob
// lets say there is a fetch failure in this task set, which makes us go back and
// run stage 0, attempt 1
complete(taskSets(1), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// stage 0, attempt 1 should have the properties of job2
assert(taskSets(2).stageId === 0)
assert(taskSets(2).stageAttemptId === 1)
checkJobPropertiesAndPriority(taskSets(2), "job2", job2Id)
// run the rest of the stages normally, checking that they have the correct properties
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(3), "job2", job2Id)
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(4), "job2", job2Id)
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from a task that ran on that executor. We want to make sure the
* stage is resubmitted so that the task that ran on the failed executor is re-executed, and
* that the stage is only marked as finished once that task completes.
*/
test("run trivial shuffle with out-of-band executor failure and retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
// Tell the DAGScheduler that hostA was lost.
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers the
// stage complete), but the tasks that ran on HostA need to be re-run, so the DAGScheduler
// should re-submit the stage with one task (the task that originally ran on HostA).
assert(taskSets.size === 2)
assert(taskSets(1).tasks.size === 1)
// Make sure that the stage that was re-submitted was the ShuffleMapStage (not the reduce
// stage, which shouldn't be run until all of the tasks in the ShuffleMapStage complete on
// alive executors).
assert(taskSets(1).tasks(0).isInstanceOf[ShuffleMapTask])
// have hostC complete the resubmitted task
complete(taskSets(1), Seq((Success, makeMapStatus("hostC", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// Make sure that the reduce stage was now submitted.
assert(taskSets.size === 3)
assert(taskSets(2).tasks(0).isInstanceOf[ResultTask[_, _]])
// Complete the reduce stage.
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("recursive shuffle failures") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker)
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// have the first stage complete normally
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// have the second stage complete normally
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostC", 1))))
// fail the third stage because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDepTwo.shuffleId, 0L, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// have DAGScheduler try again
scheduler.resubmitFailedStages()
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 2))))
complete(taskSets(4), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(5), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("cached post-shuffle") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
cacheLocations(shuffleTwoRdd.id -> 0) = Seq(makeBlockManagerId("hostD"))
cacheLocations(shuffleTwoRdd.id -> 1) = Seq(makeBlockManagerId("hostC"))
// complete stage 0
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// complete stage 1
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// pretend stage 2 failed because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDepTwo.shuffleId, 0L, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// DAGScheduler should notice the cached copy of the second shuffle and try to get it rerun.
scheduler.resubmitFailedStages()
assertLocations(taskSets(3), Seq(Seq("hostD")))
// allow hostD to recover
complete(taskSets(3), Seq((Success, makeMapStatus("hostD", 1))))
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
val acc = new LongAccumulator {
override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
}
sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("misbehaved accumulator should not impact other accumulators") {
val bad = new LongAccumulator {
override def merge(other: AccumulatorV2[java.lang.Long, java.lang.Long]): Unit = {
throw new DAGSchedulerSuiteDummyException
}
}
sc.register(bad, "bad")
val good = sc.longAccumulator("good")
sc.parallelize(1 to 10, 2).foreach { item =>
bad.add(1)
good.add(1)
}
// This is to ensure the `bad` accumulator did fail to update its value
assert(bad.value == 0L)
// Should be able to update the "good" accumulator
assert(good.value == 10L)
}
/**
* The job will be failed on first task throwing an error.
* Any subsequent task WILL throw a legitimate java.lang.UnsupportedOperationException.
* If multiple tasks, there exists a race condition between the SparkDriverExecutionExceptions
* and their differing causes as to which will represent result for job...
*/
test("misbehaved resultHandler should not crash DAGScheduler and SparkContext") {
failAfter(1.minute) { // If DAGScheduler crashes, the following test will hang forever
for (error <- Seq(
new DAGSchedulerSuiteDummyException,
new AssertionError, // E.g., assert(foo == bar) fails
new NotImplementedError // E.g., call a method with `???` implementation.
)) {
val e = intercept[SparkDriverExecutionException] {
// Number of parallelized partitions implies number of tasks of job
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
// For a robust test assertion, limit number of job tasks to 1; that is,
// if multiple RDD partitions, use id of any one partition, say, first partition id=0
Seq(0),
(part: Int, result: Int) => throw error)
}
assert(e.getCause eq error)
// Make sure we can still run commands on our SparkContext
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
}
}
test(s"invalid ${SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL} should not crash DAGScheduler") {
sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "invalid")
try {
intercept[SparkException] {
sc.parallelize(1 to 1, 1).foreach { _ =>
throw new DAGSchedulerSuiteDummyException
}
}
// Verify the above job didn't crash DAGScheduler by running a simple job
assert(sc.parallelize(1 to 10, 2).count() === 10)
} finally {
sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
}
test("getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[DAGSchedulerSuiteDummyException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPartitions: Array[Partition] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.reduceByKey(_ + _, 1).count()
}
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[SparkException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPreferredLocations(split: Partition): Seq[String] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.count()
}
assert(e1.getMessage.contains(classOf[DAGSchedulerSuiteDummyException].getName))
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("accumulator not calculated for resubmitted result stage") {
// just for register
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(accum.value === 1)
assertDataStructuresEmpty()
}
test("accumulator not calculated for resubmitted task in result stage") {
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 2, Nil)
submit(finalRdd, Array(0, 1))
// finish the first task
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
// finish the first task again (simulate a speculative task or a resubmitted task)
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
// The accumulator should only be updated once.
assert(accum.value === 1)
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Success, 42))
assertDataStructuresEmpty()
}
test("accumulators are updated on exception failures and task killed") {
val acc1 = AccumulatorSuite.createLongAccum("ingenieur")
val acc2 = AccumulatorSuite.createLongAccum("boulanger")
val acc3 = AccumulatorSuite.createLongAccum("agriculteur")
assert(AccumulatorContext.get(acc1.id).isDefined)
assert(AccumulatorContext.get(acc2.id).isDefined)
assert(AccumulatorContext.get(acc3.id).isDefined)
val accUpdate1 = new LongAccumulator
accUpdate1.metadata = acc1.metadata
accUpdate1.setValue(15)
val accUpdate2 = new LongAccumulator
accUpdate2.metadata = acc2.metadata
accUpdate2.setValue(13)
val accUpdate3 = new LongAccumulator
accUpdate3.metadata = acc3.metadata
accUpdate3.setValue(18)
val accumUpdates1 = Seq(accUpdate1, accUpdate2)
val accumInfo1 = accumUpdates1.map(AccumulatorSuite.makeInfo)
val exceptionFailure = new ExceptionFailure(
new SparkException("fondue?"),
accumInfo1).copy(accums = accumUpdates1)
submit(new MyRDD(sc, 1, Nil), Array(0))
runEvent(makeCompletionEvent(taskSets.head.tasks.head, exceptionFailure, "result"))
assert(AccumulatorContext.get(acc1.id).get.value === 15L)
assert(AccumulatorContext.get(acc2.id).get.value === 13L)
val accumUpdates2 = Seq(accUpdate3)
val accumInfo2 = accumUpdates2.map(AccumulatorSuite.makeInfo)
val taskKilled = new TaskKilled( "test", accumInfo2, accums = accumUpdates2)
runEvent(makeCompletionEvent(taskSets.head.tasks.head, taskKilled, "result"))
assert(AccumulatorContext.get(acc3.id).get.value === 18L)
}
test("reduce tasks should be placed locally with map output") {
// Create a shuffleMapRdd with 1 partition
val shuffleMapRdd = new MyRDD(sc, 1, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run on the same host that map task ran
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostA")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("reduce task locality preferences should only include machines with largest map outputs") {
val numMapTasks = 4
// Create a shuffleMapRdd with more partitions
val shuffleMapRdd = new MyRDD(sc, numMapTasks, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
val statuses = (1 to numMapTasks).map { i =>
(Success, makeMapStatus("host" + i, 1, (10*i).toByte))
}
complete(taskSets(0), statuses)
// Reducer should prefer the last 3 hosts as they have 20%, 30% and 40% of data
val hosts = (1 to numMapTasks).map(i => "host" + i).reverse.take(numMapTasks - 1)
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(hosts))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("stages with both narrow and shuffle dependencies use narrow ones for locality") {
// Create an RDD that has both a shuffle dependency and a narrow dependency (e.g. for a join)
val rdd1 = new MyRDD(sc, 1, Nil)
val rdd2 = new MyRDD(sc, 1, Nil, locations = Seq(Seq("hostB")))
val shuffleDep = new ShuffleDependency(rdd1, new HashPartitioner(1))
val narrowDep = new OneToOneDependency(rdd2)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep, narrowDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run where RDD 2 has preferences, even though it also has a shuffle dep
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostB")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("Spark exceptions should include call site in stack trace") {
val e = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map { _ => throw new RuntimeException("uh-oh!") }.count()
}
// Does not include message, ONLY stack trace.
val stackTraceString = Utils.exceptionString(e)
// should actually include the RDD operation that invoked the method:
assert(stackTraceString.contains("org.apache.spark.rdd.RDD.count"))
// should include the FunSuite setup:
assert(stackTraceString.contains("org.scalatest.FunSuite"))
}
test("catch errors in event loop") {
// this is a test of our testing framework -- make sure errors in event loop don't get ignored
// just run some bad event that will throw an exception -- we'll give a null TaskEndReason
val rdd1 = new MyRDD(sc, 1, Nil)
submit(rdd1, Array(0))
intercept[Exception] {
complete(taskSets(0), Seq(
(null, makeMapStatus("hostA", 1))))
}
}
test("simple map stage submission") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
assert(results.size === 0) // No results yet
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage; it should directly do the reduce
submit(reduceRdd, Array(0))
completeNextResultStageWithSuccess(2, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with reduce stage also depending on the data") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit the map stage by itself
submitMapStage(shuffleDep)
// Submit a reduce job that depends on this map stage
submit(reduceRdd, Array(0))
// Complete tasks for the map stage
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
// Complete tasks for the reduce stage
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage, but where one reduce will fail a fetch
submit(reduceRdd, Array(0, 1))
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"), null)))
// Ask the scheduler to try it again; TaskSet 2 will rerun the map task that we couldn't fetch
// from, then TaskSet 3 will run the reduce stage
scheduler.resubmitFailedStages()
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
// Run another reduce job without a failure; this should just work
submit(reduceRdd, Array(0, 1))
complete(taskSets(4), Seq(
(Success, 44),
(Success, 45)))
assert(results === Map(0 -> 44, 1 -> 45))
results.clear()
assertDataStructuresEmpty()
// Resubmit the map stage; this should also just work
submitMapStage(shuffleDep)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
}
/**
* In this test, we have three RDDs with shuffle dependencies, and we submit map stage jobs
* that are waiting on each one, as well as a reduce job on the last one. We test that all of
* these jobs complete even if there are some fetch failures in both shuffles.
*/
test("map stage submission with multiple shared stages and failures") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val rdd3 = new MyRDD(sc, 2, List(dep2), tracker = mapOutputTracker)
val listener1 = new SimpleListener
val listener2 = new SimpleListener
val listener3 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
submit(rdd3, Array(0, 1), listener = listener3)
// Complete the first stage
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting the second stage, show a fetch failure
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
assert(listener2.results.size === 0) // Second stage listener should not have a result yet
// Stage 0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
assert(listener2.results.size === 0) // Second stage listener should still not have a result
// Stage 1 should now be running as task set 3; make its first task succeed
assert(taskSets(3).stageId === 1)
complete(taskSets(3), Seq(
(Success, makeMapStatus("hostB", rdd2.partitions.length)),
(Success, makeMapStatus("hostD", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep2.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostD")))
assert(listener2.results.size === 1)
// Finally, the reduce job should be running as task set 4; make it see a fetch failure,
// then make it run again and succeed
assert(taskSets(4).stageId === 2)
complete(taskSets(4), Seq(
(Success, 52),
(FetchFailed(makeBlockManagerId("hostD"), dep2.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// TaskSet 5 will rerun stage 1's lost task, then TaskSet 6 will rerun stage 2
assert(taskSets(5).stageId === 1)
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostE", rdd2.partitions.length))))
complete(taskSets(6), Seq(
(Success, 53)))
assert(listener3.results === Map(0 -> 52, 1 -> 53))
assertDataStructuresEmpty()
}
test("Trigger mapstage's job listener in submitMissingTasks") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val listener1 = new SimpleListener
val listener2 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
// Complete the stage0.
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting stage1, trigger a fetch failure.
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// Stage1 listener should not have a result yet
assert(listener2.results.size === 0)
// Speculative task succeeded in stage1.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
Success,
makeMapStatus("hostD", rdd2.partitions.length)))
// stage1 listener still should not have a result, though there's no missing partitions
// in it. Because stage1 has been failed and is not inside `runningStages` at this moment.
assert(listener2.results.size === 0)
// Stage0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
Set(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// After stage0 is finished, stage1 will be submitted and found there is no missing
// partitions in it. Then listener got triggered.
assert(listener2.results.size === 1)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from that executor. We want to make sure the stage is not reported
* as done until all tasks have completed.
*
* Most of the functionality in this test is tested in "run trivial shuffle with out-of-band
* executor failure and retry". However, that test uses ShuffleMapStages that are followed by
* a ResultStage, whereas in this test, the ShuffleMapStage is tested in isolation, without a
* ResultStage after it.
*/
test("map stage submission with executor failure late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 3, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
submitMapStage(shuffleDep)
val oldTaskSet = taskSets(0)
runEvent(makeCompletionEvent(oldTaskSet.tasks(0), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// Pretend host A was lost. This will cause the TaskSetManager to resubmit task 0, because it
// completed on hostA.
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// Suppose we also get a completed event from task 1 on the same host; this should be ignored
runEvent(makeCompletionEvent(oldTaskSet.tasks(1), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// A completion from another task should work because it's a non-failed host
runEvent(makeCompletionEvent(oldTaskSet.tasks(2), Success, makeMapStatus("hostB", 2)))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers
// the stage complete), but the task that ran on hostA needs to be re-run, so the map stage
// shouldn't be marked as complete, and the DAGScheduler should re-submit the stage.
assert(results.size === 0)
assert(taskSets.size === 2)
// Now complete tasks in the second task set
val newTaskSet = taskSets(1)
// 2 tasks should have been re-submitted, for tasks 0 and 1 (which ran on hostA).
assert(newTaskSet.tasks.size === 2)
// Complete task 0 from the original task set (i.e., not hte one that's currently active).
// This should still be counted towards the job being complete (but there's still one
// outstanding task).
runEvent(makeCompletionEvent(newTaskSet.tasks(0), Success, makeMapStatus("hostB", 2)))
assert(results.size === 0)
// Complete the final task, from the currently active task set. There's still one
// running task, task 0 in the currently active stage attempt, but the success of task 0 means
// the DAGScheduler can mark the stage as finished.
runEvent(makeCompletionEvent(newTaskSet.tasks(1), Success, makeMapStatus("hostB", 2)))
assert(results.size === 1) // Map stage job should now finally be complete
assertDataStructuresEmpty()
// Also test that a reduce stage using this shuffled data can immediately run
val reduceRDD = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
results.clear()
submit(reduceRDD, Array(0, 1))
complete(taskSets(2), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
}
/**
* Checks the DAGScheduler's internal logic for traversing an RDD DAG by making sure that
* getShuffleDependencies correctly returns the direct shuffle dependencies of a particular
* RDD. The test creates the following RDD graph (where n denotes a narrow dependency and s
* denotes a shuffle dependency):
*
* A <------------s---------,
* \\
* B <--s-- C <--s-- D <--n------ E
*
* Here, the direct shuffle dependency of C is just the shuffle dependency on B. The direct
* shuffle dependencies of E are the shuffle dependency on A and the shuffle dependency on C.
*/
test("getShuffleDependencies correctly returns only direct shuffle parents") {
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val rddB = new MyRDD(sc, 2, Nil)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val rddC = new MyRDD(sc, 1, List(shuffleDepB))
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val rddD = new MyRDD(sc, 1, List(shuffleDepC))
val narrowDepD = new OneToOneDependency(rddD)
val rddE = new MyRDD(sc, 1, List(shuffleDepA, narrowDepD), tracker = mapOutputTracker)
assert(scheduler.getShuffleDependencies(rddA) === Set())
assert(scheduler.getShuffleDependencies(rddB) === Set())
assert(scheduler.getShuffleDependencies(rddC) === Set(shuffleDepB))
assert(scheduler.getShuffleDependencies(rddD) === Set(shuffleDepC))
assert(scheduler.getShuffleDependencies(rddE) === Set(shuffleDepA, shuffleDepC))
}
test("SPARK-17644: After one stage is aborted for too many failed attempts, subsequent stages" +
"still behave correctly on fetch failures") {
// Runs a job that always encounters a fetch failure, so should eventually be aborted
def runJobWithPersistentFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0L, 0, 0, "test")
case (x, _) => x
}.count()
}
// Runs a job that encounters a single fetch failure but succeeds on the second attempt
def runJobWithTemporaryFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) && FailThisAttempt._fail.getAndSet(false) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0L, 0, 0, "test")
}
}
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
// Run a second job that will fail due to a fetch failure.
// This job will hang without the fix for SPARK-17644.
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
failAfter(10.seconds) {
try {
runJobWithTemporaryFetchFailure
} catch {
case e: Throwable => fail("A job with one fetch failure should eventually succeed")
}
}
}
test("[SPARK-19263] DAGScheduler should not submit multiple active tasksets," +
" even with late completions from earlier stage attempts") {
// Create 3 RDDs with shuffle dependencies on each other: rddA <--- rddB <--- rddC
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(2))
val shuffleIdA = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 2, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(2))
val rddC = new MyRDD(sc, 2, List(shuffleDepB), tracker = mapOutputTracker)
submit(rddC, Array(0, 1))
// Complete both tasks in rddA.
assert(taskSets(0).stageId === 0 && taskSets(0).stageAttemptId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
// Fetch failed for task(stageId=1, stageAttemptId=0, partitionId=0) running on hostA
// and task(stageId=1, stageAttemptId=0, partitionId=1) is still running.
assert(taskSets(1).stageId === 1 && taskSets(1).stageAttemptId === 0)
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleIdA, 0L, 0, 0,
"Fetch failure of task: stageId=1, stageAttempt=0, partitionId=0"),
result = null))
// Both original tasks in rddA should be marked as failed, because they ran on the
// failed hostA, so both should be resubmitted. Complete them on hostB successfully.
scheduler.resubmitFailedStages()
assert(taskSets(2).stageId === 0 && taskSets(2).stageAttemptId === 1
&& taskSets(2).tasks.size === 2)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostB", 2))))
// Complete task(stageId=1, stageAttemptId=0, partitionId=1) running on failed hostA
// successfully. The success should be ignored because the task started before the
// executor failed, so the output may have been lost.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1), Success, makeMapStatus("hostA", 2)))
// task(stageId=1, stageAttemptId=1, partitionId=1) should be marked completed when
// task(stageId=1, stageAttemptId=0, partitionId=1) finished
// ideally we would verify that but no way to get into task scheduler to verify
// Both tasks in rddB should be resubmitted, because none of them has succeeded truly.
// Complete the task(stageId=1, stageAttemptId=1, partitionId=0) successfully.
// Task(stageId=1, stageAttemptId=1, partitionId=1) of this new active stage attempt
// is still running.
assert(taskSets(3).stageId === 1 && taskSets(3).stageAttemptId === 1
&& taskSets(3).tasks.size === 2)
runEvent(makeCompletionEvent(
taskSets(3).tasks(0), Success, makeMapStatus("hostB", 2)))
// At this point there should be no active task set for stageId=1 and we need
// to resubmit because the output from (stageId=1, stageAttemptId=0, partitionId=1)
// was ignored due to executor failure
assert(taskSets.size === 5)
assert(taskSets(4).stageId === 1 && taskSets(4).stageAttemptId === 2
&& taskSets(4).tasks.size === 1)
// Complete task(stageId=1, stageAttempt=2, partitionId=1) successfully.
runEvent(makeCompletionEvent(
taskSets(4).tasks(0), Success, makeMapStatus("hostB", 2)))
// Now the ResultStage should be submitted, because all of the tasks of rddB have
// completed successfully on alive executors.
assert(taskSets.size === 6 && taskSets(5).tasks(0).isInstanceOf[ResultTask[_, _]])
complete(taskSets(5), Seq(
(Success, 1),
(Success, 1)))
}
test("task end event should have updated accumulators (SPARK-20342)") {
val tasks = 10
val accumId = new AtomicLong()
val foundCount = new AtomicLong()
val listener = new SparkListener() {
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
event.taskInfo.accumulables.find(_.id == accumId.get).foreach { _ =>
foundCount.incrementAndGet()
}
}
}
sc.addSparkListener(listener)
// Try a few times in a loop to make sure. This is not guaranteed to fail when the bug exists,
// but it should at least make the test flaky. If the bug is fixed, this should always pass.
(1 to 10).foreach { i =>
foundCount.set(0L)
val accum = sc.longAccumulator(s"accum$i")
accumId.set(accum.id)
sc.parallelize(1 to tasks, tasks).foreach { _ =>
accum.add(1L)
}
sc.listenerBus.waitUntilEmpty()
assert(foundCount.get() === tasks)
}
}
test("Barrier task failures from the same stage attempt don't trigger multiple stage retries") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// The first map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
}
test("Barrier task failures from a previous stage attempt don't trigger stage retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// The first map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
// The second map task failure doesn't trigger stage retry.
runEvent(ResubmitFailedStages)
assert(countSubmittedMapStageAttempts() === 2)
}
private def constructIndeterminateStageFetchFailed(): (Int, Int) = {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = true)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleId1 = shuffleDep1.shuffleId
val shuffleMapRdd2 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val shuffleId2 = shuffleDep2.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep2), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
// Finish the first shuffle map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
// Finish the second shuffle map stage.
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
// The first task of the final stage failed with fetch failure
runEvent(makeCompletionEvent(
taskSets(2).tasks(0),
FetchFailed(makeBlockManagerId("hostC"), shuffleId2, 0L, 0, 0, "ignored"),
null))
(shuffleId1, shuffleId2)
}
test("SPARK-25341: abort stage while using old fetch protocol") {
// reset the test context with using old fetch protocol
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_USE_OLD_FETCH_PROTOCOL.key, "true")
init(conf)
// Construct the scenario of indeterminate stage fetch failed.
constructIndeterminateStageFetchFailed()
// The job should fail because Spark can't rollback the shuffle map stage while
// using old protocol.
assert(failure != null && failure.getMessage.contains(
"Spark can only do this while using the new shuffle block fetching protocol"))
}
test("SPARK-25341: retry all the succeeding stages when the map stage is indeterminate") {
val (shuffleId1, shuffleId2) = constructIndeterminateStageFetchFailed()
// Check status for all failedStages
val failedStages = scheduler.failedStages.toSeq
assert(failedStages.map(_.id) == Seq(1, 2))
// Shuffle blocks of "hostC" is lost, so first task of the `shuffleMapRdd2` needs to retry.
assert(failedStages.collect {
case stage: ShuffleMapStage if stage.shuffleDep.shuffleId == shuffleId2 => stage
}.head.findMissingPartitions() == Seq(0))
// The result stage is still waiting for its 2 tasks to complete
assert(failedStages.collect {
case stage: ResultStage => stage
}.head.findMissingPartitions() == Seq(0, 1))
scheduler.resubmitFailedStages()
// The first task of the `shuffleMapRdd2` failed with fetch failure
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId1, 0L, 0, 0, "ignored"),
null))
val newFailedStages = scheduler.failedStages.toSeq
assert(newFailedStages.map(_.id) == Seq(0, 1))
scheduler.resubmitFailedStages()
// First shuffle map stage resubmitted and reran all tasks.
assert(taskSets(4).stageId == 0)
assert(taskSets(4).stageAttemptId == 1)
assert(taskSets(4).tasks.length == 2)
// Finish all stage.
complete(taskSets(4), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostC", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
complete(taskSets(6), Seq((Success, 11), (Success, 12)))
// Job successful ended.
assert(results === Map(0 -> 11, 1 -> 12))
results.clear()
assertDataStructuresEmpty()
}
test("SPARK-25341: continuous indeterminate stage roll back") {
// shuffleMapRdd1/2/3 are all indeterminate.
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = true)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleId1 = shuffleDep1.shuffleId
val shuffleMapRdd2 = new MyRDD(
sc, 2, List(shuffleDep1), tracker = mapOutputTracker, indeterminate = true)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val shuffleId2 = shuffleDep2.shuffleId
val shuffleMapRdd3 = new MyRDD(
sc, 2, List(shuffleDep2), tracker = mapOutputTracker, indeterminate = true)
val shuffleDep3 = new ShuffleDependency(shuffleMapRdd3, new HashPartitioner(2))
val shuffleId3 = shuffleDep3.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep3), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1), properties = new Properties())
// Finish the first 2 shuffle map stages.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
// Executor lost on hostB, both of stage 0 and 1 should be reran.
runEvent(makeCompletionEvent(
taskSets(2).tasks(0),
FetchFailed(makeBlockManagerId("hostB"), shuffleId2, 0L, 0, 0, "ignored"),
null))
mapOutputTracker.removeOutputsOnHost("hostB")
assert(scheduler.failedStages.toSeq.map(_.id) == Seq(1, 2))
scheduler.resubmitFailedStages()
def checkAndCompleteRetryStage(
taskSetIndex: Int,
stageId: Int,
shuffleId: Int): Unit = {
assert(taskSets(taskSetIndex).stageId == stageId)
assert(taskSets(taskSetIndex).stageAttemptId == 1)
assert(taskSets(taskSetIndex).tasks.length == 2)
complete(taskSets(taskSetIndex), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
}
// Check all indeterminate stage roll back.
checkAndCompleteRetryStage(3, 0, shuffleId1)
checkAndCompleteRetryStage(4, 1, shuffleId2)
checkAndCompleteRetryStage(5, 2, shuffleId3)
// Result stage success, all job ended.
complete(taskSets(6), Seq((Success, 11), (Success, 12)))
assert(results === Map(0 -> 11, 1 -> 12))
results.clear()
assertDataStructuresEmpty()
}
test("SPARK-29042: Sampled RDD with unordered input should be indeterminate") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = false)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
assert(shuffleMapRdd2.outputDeterministicLevel == DeterministicLevel.UNORDERED)
val sampledRdd = shuffleMapRdd2.sample(true, 0.3, 1000L)
assert(sampledRdd.outputDeterministicLevel == DeterministicLevel.INDETERMINATE)
}
private def assertResultStageFailToRollback(mapRdd: MyRDD): Unit = {
val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(taskSets.length - 1, 0, numShufflePartitions = 2)
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// Finish the first task of the result stage
runEvent(makeCompletionEvent(
taskSets.last.tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Fail the second task with FetchFailed.
runEvent(makeCompletionEvent(
taskSets.last.tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
// The job should fail because Spark can't rollback the result stage.
assert(failure != null && failure.getMessage.contains("Spark cannot rollback"))
}
test("SPARK-23207: cannot rollback a result stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil, indeterminate = true)
assertResultStageFailToRollback(shuffleMapRdd)
}
test("SPARK-23207: local checkpoint fail to rollback (checkpointed before)") {
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.localCheckpoint()
shuffleMapRdd.doCheckpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
test("SPARK-23207: local checkpoint fail to rollback (checkpointing now)") {
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.localCheckpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
private def assertResultStageNotRollbacked(mapRdd: MyRDD): Unit = {
val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(taskSets.length - 1, 0, numShufflePartitions = 2)
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// Finish the first task of the result stage
runEvent(makeCompletionEvent(
taskSets.last.tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Fail the second task with FetchFailed.
runEvent(makeCompletionEvent(
taskSets.last.tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(failure == null, "job should not fail")
val failedStages = scheduler.failedStages.toSeq
assert(failedStages.length == 2)
// Shuffle blocks of "hostA" is lost, so first task of the `shuffleMapRdd2` needs to retry.
assert(failedStages.collect {
case stage: ShuffleMapStage if stage.shuffleDep.shuffleId == shuffleId => stage
}.head.findMissingPartitions() == Seq(0))
// The first task of result stage remains completed.
assert(failedStages.collect {
case stage: ResultStage => stage
}.head.findMissingPartitions() == Seq(1))
}
test("SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)") {
withTempDir { dir =>
sc.setCheckpointDir(dir.getCanonicalPath)
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.checkpoint()
shuffleMapRdd.doCheckpoint()
assertResultStageNotRollbacked(shuffleMapRdd)
}
}
test("SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)") {
withTempDir { dir =>
sc.setCheckpointDir(dir.getCanonicalPath)
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.checkpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
}
test("SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete") {
val latch = new CountDownLatch(1)
val jobListener = new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
latch.countDown()
}
}
sc.addSparkListener(jobListener)
sc.emptyRDD[Int].countApprox(10000).getFinalValue()
assert(latch.await(10, TimeUnit.SECONDS))
}
test("Completions in zombie tasksets update status of non-zombie taskset") {
val parts = 4
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
assert(taskSets.length == 1)
// Finish the first task of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// The second task of the shuffle map stage failed with FetchFailed.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleDep.shuffleId, 0L, 0, 0, "ignored"),
null))
scheduler.resubmitFailedStages()
assert(taskSets.length == 2)
// The first partition has completed already, so the new attempt only need to run 3 tasks.
assert(taskSets(1).tasks.length == 3)
// Finish the first task of the second attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Finish the third task of the first attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
assert(tasksMarkedAsCompleted.length == 1)
assert(tasksMarkedAsCompleted.head.partitionId == 2)
// Finish the forth task of the first attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
assert(tasksMarkedAsCompleted.length == 2)
assert(tasksMarkedAsCompleted.last.partitionId == 3)
// Now the shuffle map stage is completed, and the next stage is submitted.
assert(taskSets.length == 3)
// Finish
complete(taskSets(2), Seq((Success, 42), (Success, 42), (Success, 42), (Success, 42)))
assertDataStructuresEmpty()
}
/**
* Assert that the supplied TaskSet has exactly the given hosts as its preferred locations.
* Note that this checks only the host and not the executor ID.
*/
private def assertLocations(taskSet: TaskSet, hosts: Seq[Seq[String]]): Unit = {
assert(hosts.size === taskSet.tasks.size)
for ((taskLocs, expectedLocs) <- taskSet.tasks.map(_.preferredLocations).zip(hosts)) {
assert(taskLocs.map(_.host).toSet === expectedLocs.toSet)
}
}
private def assertDataStructuresEmpty(): Unit = {
assert(scheduler.activeJobs.isEmpty)
assert(scheduler.failedStages.isEmpty)
assert(scheduler.jobIdToActiveJob.isEmpty)
assert(scheduler.jobIdToStageIds.isEmpty)
assert(scheduler.stageIdToStage.isEmpty)
assert(scheduler.runningStages.isEmpty)
assert(scheduler.shuffleIdToMapStage.isEmpty)
assert(scheduler.waitingStages.isEmpty)
assert(scheduler.outputCommitCoordinator.isEmpty)
}
// Nothing in this test should break if the task info's fields are null, but
// OutputCommitCoordinator requires the task info itself to not be null.
private def createFakeTaskInfo(): TaskInfo = {
val info = new TaskInfo(0, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def createFakeTaskInfoWithId(taskId: Long): TaskInfo = {
val info = new TaskInfo(taskId, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def makeCompletionEvent(
task: Task[_],
reason: TaskEndReason,
result: Any,
extraAccumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty,
metricPeaks: Array[Long] = Array.empty,
taskInfo: TaskInfo = createFakeTaskInfo()): CompletionEvent = {
val accumUpdates = reason match {
case Success => task.metrics.accumulators()
case ef: ExceptionFailure => ef.accums
case tk: TaskKilled => tk.accums
case _ => Seq.empty
}
CompletionEvent(task, reason, result, accumUpdates ++ extraAccumUpdates, metricPeaks, taskInfo)
}
}
object DAGSchedulerSuite {
def makeMapStatus(host: String, reduces: Int, sizes: Byte = 2, mapTaskId: Long = -1): MapStatus =
MapStatus(makeBlockManagerId(host), Array.fill[Long](reduces)(sizes), mapTaskId)
def makeBlockManagerId(host: String): BlockManagerId =
BlockManagerId("exec-" + host, host, 12345)
}
object FailThisAttempt {
val _fail = new AtomicBoolean(true)
}
| jkbradley/spark | core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala | Scala | apache-2.0 | 132,906 |
package im.mange.acceptance.driveby.scalatest.condition
import im.mange.acceptance.driveby.scalatest.WebSpecification
import im.mange.common.ConditionNotMetException
import im.mange.driveby.Id
import im.mange.driveby.conditions._
import org.scalatest.Matchers
class ElementClassesContainsSpec extends WebSpecification with Matchers {
def `pass for id` {
val id = Id("hasClassWith")
given.page(<b id={id.id} class="wobbleClass hasClass wibbleClass">hasClassWith</b>)
.assert(ElementClassesContains(id, "hasClass"))
}
def `fail for id without` {
val id = Id("hasClassWithout")
val b = given.page(<b id={id.id} class="nothasClass">hasClassWithout</b>)
val thrown = the [ConditionNotMetException] thrownBy { b.assert(ElementClassesContains(id, "hasClass")) }
thrown.getMessage should equal( """> FAILED: Assert ElementClassesContains("Id(hasClassWithout)", "hasClass") but was "nothasClass" (not met within 2000 millis)""")
}
} | alltonp/driveby | src/test/scala/im/mange/acceptance/driveby/scalatest/condition/ElementClassesContainsSpec.scala | Scala | apache-2.0 | 967 |
package com.danielasfregola.twitter4s.entities.v2
// https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/place
final case class Place(full_name: String,
id: String,
contained_within: Seq[String],
country: Option[String],
country_code: Option[String],
geo: Option[GeoJSON],
name: Option[String],
place_type: Option[String])
final case class GeoJSON(value: String) extends AnyVal
| DanielaSfregola/twitter4s | src/main/scala/com/danielasfregola/twitter4s/entities/v2/Place.scala | Scala | apache-2.0 | 568 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.partial
import java.util.{HashMap => JHashMap}
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.HashMap
import org.apache.spark.util.StatCounter
/**
* An ApproximateEvaluator for sums by key. Returns a map of key to confidence interval.
*/
private[spark] class GroupedSumEvaluator[T](totalOutputs: Int, confidence: Double)
extends ApproximateEvaluator[JHashMap[T, StatCounter], Map[T, BoundedDouble]] {
var outputsMerged = 0
var sums = new JHashMap[T, StatCounter] // Sum of counts for each key
override def merge(outputId: Int, taskResult: JHashMap[T, StatCounter]) {
outputsMerged += 1
val iter = taskResult.entrySet.iterator()
while (iter.hasNext) {
val entry = iter.next()
val old = sums.get(entry.getKey)
if (old != null) {
old.merge(entry.getValue)
} else {
sums.put(entry.getKey, entry.getValue)
}
}
}
override def currentResult(): Map[T, BoundedDouble] = {
if (outputsMerged == totalOutputs) {
val result = new JHashMap[T, BoundedDouble](sums.size)
val iter = sums.entrySet.iterator()
while (iter.hasNext) {
val entry = iter.next()
val sum = entry.getValue.sum
result.put(entry.getKey, new BoundedDouble(sum, 1.0, sum, sum))
}
result.asScala
} else if (outputsMerged == 0) {
new HashMap[T, BoundedDouble]
} else {
val p = outputsMerged.toDouble / totalOutputs
val studentTCacher = new StudentTCacher(confidence)
val result = new JHashMap[T, BoundedDouble](sums.size)
val iter = sums.entrySet.iterator()
while (iter.hasNext) {
val entry = iter.next()
val counter = entry.getValue
val meanEstimate = counter.mean
val meanVar = counter.sampleVariance / counter.count
val countEstimate = (counter.count + 1 - p) / p
val countVar = (counter.count + 1) * (1 - p) / (p * p)
val sumEstimate = meanEstimate * countEstimate
val sumVar = (meanEstimate * meanEstimate * countVar) +
(countEstimate * countEstimate * meanVar) +
(meanVar * countVar)
val sumStdev = math.sqrt(sumVar)
val confFactor = studentTCacher.get(counter.count)
val low = sumEstimate - confFactor * sumStdev
val high = sumEstimate + confFactor * sumStdev
result.put(entry.getKey, new BoundedDouble(sumEstimate, confidence, low, high))
}
result.asScala
}
}
}
| gioenn/xSpark | core/src/main/scala/org/apache/spark/partial/GroupedSumEvaluator.scala | Scala | apache-2.0 | 3,353 |
package org.beaucatcher.mongo
class MongoException(message: String, cause: Throwable) extends Exception(message, cause) {
def this(message: String) = this(message, null)
}
class DocumentTooLargeMongoException(message: String, cause: Throwable) extends MongoException(message, cause) {
def this(message: String) = this(message, null)
}
class DuplicateKeyMongoException(message: String, cause: Throwable) extends MongoException(message, cause) {
def this(message: String) = this(message, null)
}
/**
* Exception that indicates a bug in something (mongod itself or the library).
*/
class BugInSomethingMongoException(message: String, cause: Throwable) extends MongoException(message, cause) {
def this(message: String) = this(message, null)
}
| havocp/beaucatcher | base/src/main/scala/org/beaucatcher/mongo/MongoException.scala | Scala | apache-2.0 | 762 |
package com.sksamuel.elastic4s.requests.searches.aggs
import com.sksamuel.elastic4s.requests.script.Script
import com.sksamuel.elastic4s.ext.OptionImplicits._
case class SumAggregation(name: String,
field: Option[String] = None,
missing: Option[AnyRef] = None,
script: Option[Script] = None,
subaggs: Seq[AbstractAggregation] = Nil,
metadata: Map[String, AnyRef] = Map.empty)
extends Aggregation {
type T = SumAggregation
def field(field: String): SumAggregation = copy(field = field.some)
def missing(missing: AnyRef): SumAggregation = copy(missing = missing.some)
def script(script: Script): SumAggregation = copy(script = script.some)
override def subAggregations(aggs: Iterable[AbstractAggregation]): T = copy(subaggs = aggs.toSeq)
override def metadata(map: Map[String, AnyRef]): T = copy(metadata = map)
}
| sksamuel/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/SumAggregation.scala | Scala | apache-2.0 | 987 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.table
import java.math.BigDecimal
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.api._
import org.apache.flink.table.runtime.stream.table.GroupWindowITCase._
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData}
import org.apache.flink.table.utils.Top3
import org.apache.flink.test.util.AbstractTestBase
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import _root_.scala.collection.mutable
/**
* We only test some aggregations until better testing of constructed DataStream
* programs is possible.
*/
class GroupWindowTableAggregateITCase extends AbstractTestBase {
private val queryConfig = new StreamQueryConfig()
queryConfig.withIdleStateRetentionTime(Time.hours(1), Time.hours(2))
val data = List(
(1L, 1, "Hi"),
(2L, 2, "Hello"),
(4L, 2, "Hello"),
(8L, 3, "Hello world"),
(16L, 3, "Hello world"))
val data2 = List(
(1L, 1, 1d, 1f, new BigDecimal("1"), "Hi"),
(2L, 2, 2d, 2f, new BigDecimal("2"), "Hallo"),
(3L, 2, 2d, 2f, new BigDecimal("2"), "Hello"),
(4L, 5, 5d, 5f, new BigDecimal("5"), "Hello"),
(7L, 3, 3d, 3f, new BigDecimal("3"), "Hello"),
(8L, 3, 3d, 3f, new BigDecimal("3"), "Hello world"),
(16L, 4, 4d, 4f, new BigDecimal("4"), "Hello world"),
(32L, 4, 4d, 4f, new BigDecimal("4"), null.asInstanceOf[String]))
@Test
def testProcessingTimeSlidingGroupWindowOverCount(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(1)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val top3 = new Top3
val stream = StreamTestData.get3TupleDataStream(env)
val table = stream.toTable(tEnv, 'int, 'long, 'string, 'proctime.proctime)
val windowedTable = table
.window(Slide over 4.rows every 2.rows on 'proctime as 'w)
.groupBy('w, 'long)
.flatAggregate(top3('int) as ('x, 'y))
.select('long, 'x, 'y)
val results = windowedTable.toAppendStream[Row](queryConfig)
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq("2,2,2", "2,3,3", "3,4,4", "3,5,5", "4,7,7", "4,8,8", "4,8,8", "4,9,9",
"4,10,10", "5,11,11", "5,12,12", "5,12,12", "5,13,13", "5,14,14", "6,16,16", "6,17,17",
"6,17,17", "6,18,18", "6,19,19", "6,19,19", "6,20,20", "6,21,21")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSessionGroupWindowOverTime(): Unit = {
//To verify the "merge" functionality, we create this test with the following characteristics:
// 1. set the Parallelism to 1, and have the test data out of order
// 2. create a waterMark with 10ms offset to delay the window emission by 10ms
val sessionWindowTestdata = List(
(1L, 1, "Hello"),
(2L, 2, "Hello"),
(8L, 8, "Hello"),
(9L, 9, "Hello World"),
(4L, 4, "Hello"),
(16L, 16, "Hello"))
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setParallelism(1)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(sessionWindowTestdata)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, String)](10L))
val table = stream.toTable(tEnv, 'long, 'int, 'string, 'rowtime.rowtime)
val top3 = new Top3
val windowedTable = table
.window(Session withGap 5.milli on 'rowtime as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq("Hello,2,2", "Hello,4,4", "Hello,8,8", "Hello World,9,9", "Hello,16,16")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testAllProcessingTimeTumblingGroupWindowOverCount(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(1)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = StreamTestData.get3TupleDataStream(env)
val table = stream.toTable(tEnv, 'int, 'long, 'string, 'proctime.proctime)
val top3 = new Top3
val windowedTable = table
.window(Tumble over 7.rows on 'proctime as 'w)
.groupBy('w)
.flatAggregate(top3('int))
.select('f0, 'f1)
val results = windowedTable.toAppendStream[Row](queryConfig)
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq("5,5", "6,6", "7,7", "12,12", "13,13", "14,14", "19,19", "20,20", "21,21")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeTumblingWindow(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = StreamTestData.get3TupleDataStream(env)
.assignTimestampsAndWatermarks(new TimestampAndWatermarkWithOffset[(Int, Long, String)](0L))
val table = stream.toTable(tEnv, 'int, 'long, 'string, 'rowtime.rowtime)
val top3 = new Top3
val windowedTable = table
.window(Tumble over 10.milli on 'rowtime as 'w)
.groupBy('w, 'long)
.flatAggregate(top3('int) as ('x, 'y))
.select('w.start, 'w.end, 'long, 'x, 'y + 1)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,1,1,2",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,2,2,3",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,2,3,4",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,3,4,5",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,3,5,6",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,3,6,7",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,4,7,8",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,4,8,9",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.01,4,9,10",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,4,10,11",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,5,13,14",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,5,14,15",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,5,15,16",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,6,17,18",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,6,18,19",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.02,6,19,20",
"1970-01-01 00:00:00.02,1970-01-01 00:00:00.03,6,21,22",
"1970-01-01 00:00:00.02,1970-01-01 00:00:00.03,6,20,21")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testGroupWindowWithoutKeyInProjection(): Unit = {
val data = List(
(1L, 1, "Hi", 1, 1),
(2L, 2, "Hello", 2, 2),
(4L, 2, "Hello", 2, 2),
(8L, 3, "Hello world", 3, 3),
(16L, 3, "Hello world", 3, 3))
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(1)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'long, 'int, 'string, 'int2, 'int3, 'proctime.proctime)
val top3 = new Top3
val windowedTable = table
.window(Slide over 2.rows every 1.rows on 'proctime as 'w)
.groupBy('w, 'int2, 'int3, 'string)
.flatAggregate(top3('int))
.select('f0, 'f1)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq("1,1", "2,2", "2,2", "2,2", "3,3", "3,3", "3,3")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
// ----------------------------------------------------------------------------------------------
// Sliding windows
// ----------------------------------------------------------------------------------------------
@Test
def testAllEventTimeSlidingGroupWindowOverTime(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 5.milli every 2.milli on 'long as 'w)
.groupBy('w)
.flatAggregate(top3('int))
.select('f0, 'f1, 'w.start, 'w.end, 'w.rowtime)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1,1,1969-12-31 23:59:59.998,1970-01-01 00:00:00.003,1970-01-01 00:00:00.002",
"2,2,1969-12-31 23:59:59.998,1970-01-01 00:00:00.003,1970-01-01 00:00:00.002",
"2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005,1970-01-01 00:00:00.004",
"5,5,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005,1970-01-01 00:00:00.004",
"2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005,1970-01-01 00:00:00.004",
"2,2,1970-01-01 00:00:00.002,1970-01-01 00:00:00.007,1970-01-01 00:00:00.006",
"2,2,1970-01-01 00:00:00.002,1970-01-01 00:00:00.007,1970-01-01 00:00:00.006",
"5,5,1970-01-01 00:00:00.002,1970-01-01 00:00:00.007,1970-01-01 00:00:00.006",
"3,3,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009,1970-01-01 00:00:00.008",
"3,3,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009,1970-01-01 00:00:00.008",
"5,5,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009,1970-01-01 00:00:00.008",
"3,3,1970-01-01 00:00:00.006,1970-01-01 00:00:00.011,1970-01-01 00:00:00.01",
"3,3,1970-01-01 00:00:00.006,1970-01-01 00:00:00.011,1970-01-01 00:00:00.01",
"3,3,1970-01-01 00:00:00.008,1970-01-01 00:00:00.013,1970-01-01 00:00:00.012",
"4,4,1970-01-01 00:00:00.012,1970-01-01 00:00:00.017,1970-01-01 00:00:00.016",
"4,4,1970-01-01 00:00:00.014,1970-01-01 00:00:00.019,1970-01-01 00:00:00.018",
"4,4,1970-01-01 00:00:00.016,1970-01-01 00:00:00.021,1970-01-01 00:00:00.02",
"4,4,1970-01-01 00:00:00.028,1970-01-01 00:00:00.033,1970-01-01 00:00:00.032",
"4,4,1970-01-01 00:00:00.03,1970-01-01 00:00:00.035,1970-01-01 00:00:00.034",
"4,4,1970-01-01 00:00:00.032,1970-01-01 00:00:00.037,1970-01-01 00:00:00.036")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeOverlappingFullPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 10.milli every 5.milli on 'long as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,2,2,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hallo,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello world,3,3,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello world,3,3,1970-01-01 00:00:00.005,1970-01-01 00:00:00.015",
"Hello world,4,4,1970-01-01 00:00:00.01,1970-01-01 00:00:00.02",
"Hello world,4,4,1970-01-01 00:00:00.015,1970-01-01 00:00:00.025",
"Hello,2,2,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hello,5,5,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hello,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello,3,3,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello,5,5,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello,3,3,1970-01-01 00:00:00.005,1970-01-01 00:00:00.015",
"Hi,1,1,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hi,1,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"null,4,4,1970-01-01 00:00:00.025,1970-01-01 00:00:00.035",
"null,4,4,1970-01-01 00:00:00.03,1970-01-01 00:00:00.04")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeOverlappingSplitPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val top3 = new Top3
val stream = env
.fromCollection(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val windowedTable = table
.window(Slide over 5.milli every 4.milli on 'long as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hello,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello,5,5,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hallo,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello world,3,3,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009",
"Hello world,3,3,1970-01-01 00:00:00.008,1970-01-01 00:00:00.013",
"Hello,3,3,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009",
"Hi,1,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello,5,5,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009",
"Hello world,4,4,1970-01-01 00:00:00.012,1970-01-01 00:00:00.017",
"null,4,4,1970-01-01 00:00:00.028,1970-01-01 00:00:00.033",
"Hello world,4,4,1970-01-01 00:00:00.016,1970-01-01 00:00:00.021",
"null,4,4,1970-01-01 00:00:00.032,1970-01-01 00:00:00.037")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeNonOverlappingFullPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 5.milli every 10.milli on 'long as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello,5,5,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hi,1,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"null,4,4,1970-01-01 00:00:00.03,1970-01-01 00:00:00.035")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeNonOverlappingSplitPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val top3 = new Top3
val windowedTable = table
.window(Slide over 3.milli every 10.milli on 'long as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"null,4,4,1970-01-01 00:00:00.03,1970-01-01 00:00:00.033",
"Hallo,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003",
"Hi,1,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeGroupWindowWithoutExplicitTimeField(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data2)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Double, Float, BigDecimal, String)](0L))
.map(t => (t._2, t._6))
val table = stream.toTable(tEnv, 'int, 'string, 'rowtime.rowtime)
val top3 = new Top3
val windowedTable = table
.window(Slide over 3.milli every 10.milli on 'rowtime as 'w)
.groupBy('w, 'string)
.flatAggregate(top3('int))
.select('string, 'f0, 'f1, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,2,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003",
"Hi,1,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003",
"null,4,4,1970-01-01 00:00:00.03,1970-01-01 00:00:00.033")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
| fhueske/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/table/GroupWindowTableAggregateITCase.scala | Scala | apache-2.0 | 20,230 |
package fr.hmil.roshttp.exceptions
import java.io.IOException
import fr.hmil.roshttp.response.{HttpResponse, SimpleHttpResponse, StreamHttpResponse}
/** Exception in the HTTP application layer.
*
* In other words, this exception occurs when a bad HTTP status code (>= 400) is received.
*/
case class HttpException[+T <: HttpResponse] private(response: T)(message: String = null)
extends IOException(message)
object HttpException {
def badStatus[T <: HttpResponse](response: T): HttpException[T] =
new HttpException[T](response)(s"Server responded with status ${response.statusCode}")
}
| hmil/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/exceptions/HttpException.scala | Scala | mit | 604 |
package org.smartpony.core.http
import spray.json._
import DefaultJsonProtocol._
import org.apache.http.HttpStatus._
case class Response(statusCode: Int, reason: String, contentType: Option[String],
bodyTextOpt: Option[String], headers: Seq[(String, String)], rawBytes: Array[Byte]) {
def header(key: String): Option[String] = headers.find(key == _._1).map(_._2)
def headers(key: String): Seq[String] = headers.filter(key == _._1).map(_._2)
def cookieValue(name: String): Option[String] = headers("Set-Cookie").
find(_.startsWith(name)).
map{t => t.split(";")(0).replace(name + "=", "")}
lazy val json = bodyTextOpt.map(_.toJson).get
val isOk = statusCode == SC_OK
}
| Dextaa/smartpony | core/src/main/scala/org/smartpony/core/http/Response.scala | Scala | mit | 708 |
package io.scalajs.nodejs
package fs
import io.scalajs.nodejs.buffer.Buffer
import io.scalajs.nodejs.stream.Readable
import io.scalajs.util.PromiseHelper.promiseCallback1
import scala.concurrent.Future
import scala.scalajs.js
import scala.scalajs.js.|
/**
* fs.ReadStream - ReadStream is a Readable Stream.
* @see https://nodejs.org/api/stream.html#stream_class_stream_readable
*/
@js.native
trait ReadStream extends Readable {
/////////////////////////////////////////////////////////////////////////////////
// Properties
/////////////////////////////////////////////////////////////////////////////////
/**
* The number of bytes read so far.
*/
def bytesRead: js.UndefOr[Double] = js.native
/**
* The path to the file the stream is reading from as specified in the first argument to fs.createReadStream().
* If path is passed as a string, then readStream.path will be a string. If path is passed as a Buffer, then
* readStream.path will be a Buffer.
*/
def path: Buffer | String = js.native
/////////////////////////////////////////////////////////////////////////////////
// Methods
/////////////////////////////////////////////////////////////////////////////////
/**
* Undocumented method
* @see https://github.com/nodejs/node-v0.x-archive/blob/cfcb1de130867197cbc9c6012b7e84e08e53d032/lib/fs.js#L1597-L1620
*/
def close(callback: js.Function1[Unit, Any]): Unit = js.native
}
/**
* Read Stream Companion
* @author [email protected]
*/
object ReadStream {
/**
* Read Stream Events
* @author [email protected]
*/
implicit class ReadStreamEvents(val stream: ReadStream) extends AnyVal {
/**
* Emitted when the ReadStream's underlying file descriptor has been closed using the fs.close() method.
* @param listener the event handler
* @since 0.1.93
*/
@inline
def onClose(listener: () => Any): stream.type = stream.on("close", listener)
/**
* Emitted when the ReadStream's file is opened.
* @param listener the event handler
* <ul>
* <li>fd: Integer - file descriptor used by the ReadStream.</li>
* </ul>
* @since 0.1.93
*/
@inline
def onOpen(listener: FileDescriptor => Any): stream.type = stream.on("open", listener)
}
/**
* Read Stream Extensions
* @author [email protected]
*/
implicit class ReadStreamExtensions(val stream: ReadStream) extends AnyVal {
@inline
def closeFuture: Future[Unit] = promiseCallback1[Unit](stream.close)
}
}
| ldaniels528/MEANS.js | app/common/src/main/scala/io/scalajs/nodejs/fs/ReadStream.scala | Scala | apache-2.0 | 2,649 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.util.Locale
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.unsafe.types.CalendarInterval
class DateFunctionsSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("function current_date") {
val df1 = Seq((1, 2), (3, 1)).toDF("a", "b")
val d0 = DateTimeUtils.millisToDays(System.currentTimeMillis())
val d1 = DateTimeUtils.fromJavaDate(df1.select(current_date()).collect().head.getDate(0))
val d2 = DateTimeUtils.fromJavaDate(
sql("""SELECT CURRENT_DATE()""").collect().head.getDate(0))
val d3 = DateTimeUtils.millisToDays(System.currentTimeMillis())
assert(d0 <= d1 && d1 <= d2 && d2 <= d3 && d3 - d0 <= 1)
}
test("function current_timestamp and now") {
val df1 = Seq((1, 2), (3, 1)).toDF("a", "b")
checkAnswer(df1.select(countDistinct(current_timestamp())), Row(1))
// Execution in one query should return the same value
checkAnswer(sql("""SELECT CURRENT_TIMESTAMP() = CURRENT_TIMESTAMP()"""), Row(true))
// Current timestamp should return the current timestamp ...
val before = System.currentTimeMillis
val got = sql("SELECT CURRENT_TIMESTAMP()").collect().head.getTimestamp(0).getTime
val after = System.currentTimeMillis
assert(got >= before && got <= after)
// Now alias
checkAnswer(sql("""SELECT CURRENT_TIMESTAMP() = NOW()"""), Row(true))
}
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val sdfDate = new SimpleDateFormat("yyyy-MM-dd", Locale.US)
val d = new Date(sdf.parse("2015-04-08 13:10:15").getTime)
val ts = new Timestamp(sdf.parse("2013-04-08 13:10:15").getTime)
test("timestamp comparison with date strings") {
val df = Seq(
(1, Timestamp.valueOf("2015-01-01 00:00:00")),
(2, Timestamp.valueOf("2014-01-01 00:00:00"))).toDF("i", "t")
checkAnswer(
df.select("t").filter($"t" <= "2014-06-01"),
Row(Timestamp.valueOf("2014-01-01 00:00:00")) :: Nil)
checkAnswer(
df.select("t").filter($"t" >= "2014-06-01"),
Row(Timestamp.valueOf("2015-01-01 00:00:00")) :: Nil)
}
test("date comparison with date strings") {
val df = Seq(
(1, Date.valueOf("2015-01-01")),
(2, Date.valueOf("2014-01-01"))).toDF("i", "t")
checkAnswer(
df.select("t").filter($"t" <= "2014-06-01"),
Row(Date.valueOf("2014-01-01")) :: Nil)
checkAnswer(
df.select("t").filter($"t" >= "2015"),
Row(Date.valueOf("2015-01-01")) :: Nil)
}
test("date format") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(date_format($"a", "y"), date_format($"b", "y"), date_format($"c", "y")),
Row("2015", "2015", "2013"))
checkAnswer(
df.selectExpr("date_format(a, 'y')", "date_format(b, 'y')", "date_format(c, 'y')"),
Row("2015", "2015", "2013"))
}
test("year") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(year($"a"), year($"b"), year($"c")),
Row(2015, 2015, 2013))
checkAnswer(
df.selectExpr("year(a)", "year(b)", "year(c)"),
Row(2015, 2015, 2013))
}
test("quarter") {
val ts = new Timestamp(sdf.parse("2013-11-08 13:10:15").getTime)
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(quarter($"a"), quarter($"b"), quarter($"c")),
Row(2, 2, 4))
checkAnswer(
df.selectExpr("quarter(a)", "quarter(b)", "quarter(c)"),
Row(2, 2, 4))
}
test("month") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(month($"a"), month($"b"), month($"c")),
Row(4, 4, 4))
checkAnswer(
df.selectExpr("month(a)", "month(b)", "month(c)"),
Row(4, 4, 4))
}
test("dayofmonth") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(dayofmonth($"a"), dayofmonth($"b"), dayofmonth($"c")),
Row(8, 8, 8))
checkAnswer(
df.selectExpr("day(a)", "day(b)", "dayofmonth(c)"),
Row(8, 8, 8))
}
test("dayofyear") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(dayofyear($"a"), dayofyear($"b"), dayofyear($"c")),
Row(98, 98, 98))
checkAnswer(
df.selectExpr("dayofyear(a)", "dayofyear(b)", "dayofyear(c)"),
Row(98, 98, 98))
}
test("hour") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(hour($"a"), hour($"b"), hour($"c")),
Row(0, 13, 13))
checkAnswer(
df.selectExpr("hour(a)", "hour(b)", "hour(c)"),
Row(0, 13, 13))
}
test("minute") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(minute($"a"), minute($"b"), minute($"c")),
Row(0, 10, 10))
checkAnswer(
df.selectExpr("minute(a)", "minute(b)", "minute(c)"),
Row(0, 10, 10))
}
test("second") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(second($"a"), second($"b"), second($"c")),
Row(0, 15, 15))
checkAnswer(
df.selectExpr("second(a)", "second(b)", "second(c)"),
Row(0, 15, 15))
}
test("weekofyear") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(weekofyear($"a"), weekofyear($"b"), weekofyear($"c")),
Row(15, 15, 15))
checkAnswer(
df.selectExpr("weekofyear(a)", "weekofyear(b)", "weekofyear(c)"),
Row(15, 15, 15))
}
test("function date_add") {
val st1 = "2015-06-01 12:34:56"
val st2 = "2015-06-02 12:34:56"
val t1 = Timestamp.valueOf(st1)
val t2 = Timestamp.valueOf(st2)
val s1 = "2015-06-01"
val s2 = "2015-06-02"
val d1 = Date.valueOf(s1)
val d2 = Date.valueOf(s2)
val df = Seq((t1, d1, s1, st1), (t2, d2, s2, st2)).toDF("t", "d", "s", "ss")
checkAnswer(
df.select(date_add(col("d"), 1)),
Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03"))))
checkAnswer(
df.select(date_add(col("t"), 3)),
Seq(Row(Date.valueOf("2015-06-04")), Row(Date.valueOf("2015-06-05"))))
checkAnswer(
df.select(date_add(col("s"), 5)),
Seq(Row(Date.valueOf("2015-06-06")), Row(Date.valueOf("2015-06-07"))))
checkAnswer(
df.select(date_add(col("ss"), 7)),
Seq(Row(Date.valueOf("2015-06-08")), Row(Date.valueOf("2015-06-09"))))
checkAnswer(df.selectExpr("DATE_ADD(null, 1)"), Seq(Row(null), Row(null)))
checkAnswer(
df.selectExpr("""DATE_ADD(d, 1)"""),
Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03"))))
}
test("function date_sub") {
val st1 = "2015-06-01 12:34:56"
val st2 = "2015-06-02 12:34:56"
val t1 = Timestamp.valueOf(st1)
val t2 = Timestamp.valueOf(st2)
val s1 = "2015-06-01"
val s2 = "2015-06-02"
val d1 = Date.valueOf(s1)
val d2 = Date.valueOf(s2)
val df = Seq((t1, d1, s1, st1), (t2, d2, s2, st2)).toDF("t", "d", "s", "ss")
checkAnswer(
df.select(date_sub(col("d"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(col("t"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(col("s"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(col("ss"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(lit(null), 1)).limit(1), Row(null))
checkAnswer(df.selectExpr("""DATE_SUB(d, null)"""), Seq(Row(null), Row(null)))
checkAnswer(
df.selectExpr("""DATE_SUB(d, 1)"""),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
}
test("time_add") {
val t1 = Timestamp.valueOf("2015-07-31 23:59:59")
val t2 = Timestamp.valueOf("2015-12-31 00:00:00")
val d1 = Date.valueOf("2015-07-31")
val d2 = Date.valueOf("2015-12-31")
val i = new CalendarInterval(2, 2000000L)
val df = Seq((1, t1, d1), (3, t2, d2)).toDF("n", "t", "d")
checkAnswer(
df.selectExpr(s"d + $i"),
Seq(Row(Date.valueOf("2015-09-30")), Row(Date.valueOf("2016-02-29"))))
checkAnswer(
df.selectExpr(s"t + $i"),
Seq(Row(Timestamp.valueOf("2015-10-01 00:00:01")),
Row(Timestamp.valueOf("2016-02-29 00:00:02"))))
}
test("time_sub") {
val t1 = Timestamp.valueOf("2015-10-01 00:00:01")
val t2 = Timestamp.valueOf("2016-02-29 00:00:02")
val d1 = Date.valueOf("2015-09-30")
val d2 = Date.valueOf("2016-02-29")
val i = new CalendarInterval(2, 2000000L)
val df = Seq((1, t1, d1), (3, t2, d2)).toDF("n", "t", "d")
checkAnswer(
df.selectExpr(s"d - $i"),
Seq(Row(Date.valueOf("2015-07-30")), Row(Date.valueOf("2015-12-30"))))
checkAnswer(
df.selectExpr(s"t - $i"),
Seq(Row(Timestamp.valueOf("2015-07-31 23:59:59")),
Row(Timestamp.valueOf("2015-12-31 00:00:00"))))
}
test("function add_months") {
val d1 = Date.valueOf("2015-08-31")
val d2 = Date.valueOf("2015-02-28")
val df = Seq((1, d1), (2, d2)).toDF("n", "d")
checkAnswer(
df.select(add_months(col("d"), 1)),
Seq(Row(Date.valueOf("2015-09-30")), Row(Date.valueOf("2015-03-31"))))
checkAnswer(
df.selectExpr("add_months(d, -1)"),
Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-01-31"))))
}
test("function months_between") {
val d1 = Date.valueOf("2015-07-31")
val d2 = Date.valueOf("2015-02-16")
val t1 = Timestamp.valueOf("2014-09-30 23:30:00")
val t2 = Timestamp.valueOf("2015-09-16 12:00:00")
val s1 = "2014-09-15 11:30:00"
val s2 = "2015-10-01 00:00:00"
val df = Seq((t1, d1, s1), (t2, d2, s2)).toDF("t", "d", "s")
checkAnswer(df.select(months_between(col("t"), col("d"))), Seq(Row(-10.0), Row(7.0)))
checkAnswer(df.selectExpr("months_between(t, s)"), Seq(Row(0.5), Row(-0.5)))
}
test("function last_day") {
val df1 = Seq((1, "2015-07-23"), (2, "2015-07-24")).toDF("i", "d")
val df2 = Seq((1, "2015-07-23 00:11:22"), (2, "2015-07-24 11:22:33")).toDF("i", "t")
checkAnswer(
df1.select(last_day(col("d"))),
Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-07-31"))))
checkAnswer(
df2.select(last_day(col("t"))),
Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-07-31"))))
}
test("function next_day") {
val df1 = Seq(("mon", "2015-07-23"), ("tuesday", "2015-07-20")).toDF("dow", "d")
val df2 = Seq(("th", "2015-07-23 00:11:22"), ("xx", "2015-07-24 11:22:33")).toDF("dow", "t")
checkAnswer(
df1.select(next_day(col("d"), "MONDAY")),
Seq(Row(Date.valueOf("2015-07-27")), Row(Date.valueOf("2015-07-27"))))
checkAnswer(
df2.select(next_day(col("t"), "th")),
Seq(Row(Date.valueOf("2015-07-30")), Row(Date.valueOf("2015-07-30"))))
}
test("function to_date") {
val d1 = Date.valueOf("2015-07-22")
val d2 = Date.valueOf("2015-07-01")
val d3 = Date.valueOf("2014-12-31")
val t1 = Timestamp.valueOf("2015-07-22 10:00:00")
val t2 = Timestamp.valueOf("2014-12-31 23:59:59")
val t3 = Timestamp.valueOf("2014-12-31 23:59:59")
val s1 = "2015-07-22 10:00:00"
val s2 = "2014-12-31"
val s3 = "2014-31-12"
val df = Seq((d1, t1, s1), (d2, t2, s2), (d3, t3, s3)).toDF("d", "t", "s")
checkAnswer(
df.select(to_date(col("t"))),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("d"))),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("s"))),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null)))
checkAnswer(
df.selectExpr("to_date(t)"),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.selectExpr("to_date(d)"),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.selectExpr("to_date(s)"),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null)))
// now with format
checkAnswer(
df.select(to_date(col("t"), "yyyy-MM-dd")),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("d"), "yyyy-MM-dd")),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("s"), "yyyy-MM-dd")),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null)))
// now switch format
checkAnswer(
df.select(to_date(col("s"), "yyyy-dd-MM")),
Seq(Row(null), Row(null), Row(Date.valueOf("2014-12-31"))))
// invalid format
checkAnswer(
df.select(to_date(col("s"), "yyyy-hh-MM")),
Seq(Row(null), Row(null), Row(null)))
checkAnswer(
df.select(to_date(col("s"), "yyyy-dd-aa")),
Seq(Row(null), Row(null), Row(null)))
// february
val x1 = "2016-02-29"
val x2 = "2017-02-29"
val df1 = Seq(x1, x2).toDF("x")
checkAnswer(
df1.select(to_date(col("x"))), Row(Date.valueOf("2016-02-29")) :: Row(null) :: Nil)
}
test("function trunc") {
val df = Seq(
(1, Timestamp.valueOf("2015-07-22 10:00:00")),
(2, Timestamp.valueOf("2014-12-31 00:00:00"))).toDF("i", "t")
checkAnswer(
df.select(trunc(col("t"), "YY")),
Seq(Row(Date.valueOf("2015-01-01")), Row(Date.valueOf("2014-01-01"))))
checkAnswer(
df.selectExpr("trunc(t, 'Month')"),
Seq(Row(Date.valueOf("2015-07-01")), Row(Date.valueOf("2014-12-01"))))
}
test("from_unixtime") {
val sdf1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS"
val sdf2 = new SimpleDateFormat(fmt2, Locale.US)
val fmt3 = "yy-MM-dd HH-mm-ss"
val sdf3 = new SimpleDateFormat(fmt3, Locale.US)
val df = Seq((1000, "yyyy-MM-dd HH:mm:ss.SSS"), (-1000, "yy-MM-dd HH-mm-ss")).toDF("a", "b")
checkAnswer(
df.select(from_unixtime(col("a"))),
Seq(Row(sdf1.format(new Timestamp(1000000))), Row(sdf1.format(new Timestamp(-1000000)))))
checkAnswer(
df.select(from_unixtime(col("a"), fmt2)),
Seq(Row(sdf2.format(new Timestamp(1000000))), Row(sdf2.format(new Timestamp(-1000000)))))
checkAnswer(
df.select(from_unixtime(col("a"), fmt3)),
Seq(Row(sdf3.format(new Timestamp(1000000))), Row(sdf3.format(new Timestamp(-1000000)))))
checkAnswer(
df.selectExpr("from_unixtime(a)"),
Seq(Row(sdf1.format(new Timestamp(1000000))), Row(sdf1.format(new Timestamp(-1000000)))))
checkAnswer(
df.selectExpr(s"from_unixtime(a, '$fmt2')"),
Seq(Row(sdf2.format(new Timestamp(1000000))), Row(sdf2.format(new Timestamp(-1000000)))))
checkAnswer(
df.selectExpr(s"from_unixtime(a, '$fmt3')"),
Seq(Row(sdf3.format(new Timestamp(1000000))), Row(sdf3.format(new Timestamp(-1000000)))))
}
test("unix_timestamp") {
val date1 = Date.valueOf("2015-07-24")
val date2 = Date.valueOf("2015-07-25")
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss")
checkAnswer(df.select(unix_timestamp(col("ts"))), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.select(unix_timestamp(col("ss"))), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.select(unix_timestamp(col("d"), fmt)), Seq(
Row(date1.getTime / 1000L), Row(date2.getTime / 1000L)))
checkAnswer(df.select(unix_timestamp(col("s"), fmt)), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.selectExpr("unix_timestamp(ts)"), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.selectExpr("unix_timestamp(ss)"), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.selectExpr(s"unix_timestamp(d, '$fmt')"), Seq(
Row(date1.getTime / 1000L), Row(date2.getTime / 1000L)))
checkAnswer(df.selectExpr(s"unix_timestamp(s, '$fmt')"), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
val x1 = "2015-07-24 10:00:00"
val x2 = "2015-25-07 02:02:02"
val x3 = "2015-07-24 25:02:02"
val x4 = "2015-24-07 26:02:02"
val ts3 = Timestamp.valueOf("2015-07-24 02:25:02")
val ts4 = Timestamp.valueOf("2015-07-24 00:10:00")
val df1 = Seq(x1, x2, x3, x4).toDF("x")
checkAnswer(df1.select(unix_timestamp(col("x"))), Seq(
Row(ts1.getTime / 1000L), Row(null), Row(null), Row(null)))
checkAnswer(df1.selectExpr("unix_timestamp(x)"), Seq(
Row(ts1.getTime / 1000L), Row(null), Row(null), Row(null)))
checkAnswer(df1.select(unix_timestamp(col("x"), "yyyy-dd-MM HH:mm:ss")), Seq(
Row(null), Row(ts2.getTime / 1000L), Row(null), Row(null)))
checkAnswer(df1.selectExpr(s"unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq(
Row(ts4.getTime / 1000L), Row(null), Row(ts3.getTime / 1000L), Row(null)))
// invalid format
checkAnswer(df1.selectExpr(s"unix_timestamp(x, 'yyyy-MM-dd aa:HH:ss')"), Seq(
Row(null), Row(null), Row(null), Row(null)))
// february
val y1 = "2016-02-29"
val y2 = "2017-02-29"
val ts5 = Timestamp.valueOf("2016-02-29 00:00:00")
val df2 = Seq(y1, y2).toDF("y")
checkAnswer(df2.select(unix_timestamp(col("y"), "yyyy-MM-dd")), Seq(
Row(ts5.getTime / 1000L), Row(null)))
val now = sql("select unix_timestamp()").collect().head.getLong(0)
checkAnswer(sql(s"select cast ($now as timestamp)"), Row(new java.util.Date(now * 1000)))
}
test("to_unix_timestamp") {
val date1 = Date.valueOf("2015-07-24")
val date2 = Date.valueOf("2015-07-25")
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss")
checkAnswer(df.selectExpr("to_unix_timestamp(ts)"), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.selectExpr("to_unix_timestamp(ss)"), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
checkAnswer(df.selectExpr(s"to_unix_timestamp(d, '$fmt')"), Seq(
Row(date1.getTime / 1000L), Row(date2.getTime / 1000L)))
checkAnswer(df.selectExpr(s"to_unix_timestamp(s, '$fmt')"), Seq(
Row(ts1.getTime / 1000L), Row(ts2.getTime / 1000L)))
val x1 = "2015-07-24 10:00:00"
val x2 = "2015-25-07 02:02:02"
val x3 = "2015-07-24 25:02:02"
val x4 = "2015-24-07 26:02:02"
val ts3 = Timestamp.valueOf("2015-07-24 02:25:02")
val ts4 = Timestamp.valueOf("2015-07-24 00:10:00")
val df1 = Seq(x1, x2, x3, x4).toDF("x")
checkAnswer(df1.selectExpr("to_unix_timestamp(x)"), Seq(
Row(ts1.getTime / 1000L), Row(null), Row(null), Row(null)))
checkAnswer(df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq(
Row(ts4.getTime / 1000L), Row(null), Row(ts3.getTime / 1000L), Row(null)))
// february
val y1 = "2016-02-29"
val y2 = "2017-02-29"
val ts5 = Timestamp.valueOf("2016-02-29 00:00:00")
val df2 = Seq(y1, y2).toDF("y")
checkAnswer(df2.select(unix_timestamp(col("y"), "yyyy-MM-dd")), Seq(
Row(ts5.getTime / 1000L), Row(null)))
// invalid format
checkAnswer(df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd bb:HH:ss')"), Seq(
Row(null), Row(null), Row(null), Row(null)))
}
test("to_timestamp") {
val date1 = Date.valueOf("2015-07-24")
val date2 = Date.valueOf("2015-07-25")
val ts_date1 = Timestamp.valueOf("2015-07-24 00:00:00")
val ts_date2 = Timestamp.valueOf("2015-07-25 00:00:00")
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss")
checkAnswer(df.select(to_timestamp(col("ss"))),
df.select(unix_timestamp(col("ss")).cast("timestamp")))
checkAnswer(df.select(to_timestamp(col("ss"))), Seq(
Row(ts1), Row(ts2)))
checkAnswer(df.select(to_timestamp(col("s"), fmt)), Seq(
Row(ts1), Row(ts2)))
checkAnswer(df.select(to_timestamp(col("ts"), fmt)), Seq(
Row(ts1), Row(ts2)))
checkAnswer(df.select(to_timestamp(col("d"), "yyyy-MM-dd")), Seq(
Row(ts_date1), Row(ts_date2)))
}
test("datediff") {
val df = Seq(
(Date.valueOf("2015-07-24"), Timestamp.valueOf("2015-07-24 01:00:00"),
"2015-07-23", "2015-07-23 03:00:00"),
(Date.valueOf("2015-07-25"), Timestamp.valueOf("2015-07-25 02:00:00"),
"2015-07-24", "2015-07-24 04:00:00")
).toDF("a", "b", "c", "d")
checkAnswer(df.select(datediff(col("a"), col("b"))), Seq(Row(0), Row(0)))
checkAnswer(df.select(datediff(col("a"), col("c"))), Seq(Row(1), Row(1)))
checkAnswer(df.select(datediff(col("d"), col("b"))), Seq(Row(-1), Row(-1)))
checkAnswer(df.selectExpr("datediff(a, d)"), Seq(Row(1), Row(1)))
}
test("from_utc_timestamp") {
val df = Seq(
(Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00"),
(Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00")
).toDF("a", "b")
checkAnswer(
df.select(from_utc_timestamp(col("a"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-23 17:00:00")),
Row(Timestamp.valueOf("2015-07-24 17:00:00"))))
checkAnswer(
df.select(from_utc_timestamp(col("b"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-23 17:00:00")),
Row(Timestamp.valueOf("2015-07-24 17:00:00"))))
}
test("to_utc_timestamp") {
val df = Seq(
(Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00"),
(Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00")
).toDF("a", "b")
checkAnswer(
df.select(to_utc_timestamp(col("a"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-24 07:00:00")),
Row(Timestamp.valueOf("2015-07-25 07:00:00"))))
checkAnswer(
df.select(to_utc_timestamp(col("b"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-24 07:00:00")),
Row(Timestamp.valueOf("2015-07-25 07:00:00"))))
}
}
| aokolnychyi/spark | sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala | Scala | apache-2.0 | 24,346 |
package slinky.core.facade
import slinky.core._
import scala.scalajs.js
import js.|
import scala.annotation.unchecked.uncheckedVariance
import scala.scalajs.js.annotation.{JSImport, JSName}
import scala.scalajs.js.JSConverters._
@js.native
trait ReactElement extends js.Object with ReactElementMod
object ReactElement {
@inline implicit def stringToElement(s: String): ReactElement =
s.asInstanceOf[ReactElement]
@inline implicit def intToElement(i: Int): ReactElement =
i.asInstanceOf[ReactElement]
@inline implicit def doubleToElement(d: Double): ReactElement =
d.asInstanceOf[ReactElement]
@inline implicit def floatToElement(f: Float): ReactElement =
f.asInstanceOf[ReactElement]
@inline implicit def booleanToElement(b: Boolean): ReactElement =
b.asInstanceOf[ReactElement]
@inline implicit def iterableToElement[A, B <: Iterable[A]](
e: Iterable[A]
)(implicit cv: A => ReactElement): ReactElement =
e.map(cv).toJSArray.asInstanceOf[ReactElement]
@inline implicit def optionToElement[E](o: Option[E])(implicit cv: E => ReactElement): ReactElement =
o match {
case Some(e) => cv(e)
case None => null.asInstanceOf[ReactElement]
}
@inline implicit def jsUndefOrToElement[E](j: js.UndefOr[E])(implicit cv: E => ReactElement): ReactElement = {
val x = if (j.isDefined) cv(j.get) else null.asInstanceOf[ReactElement]
x
}
@inline implicit def anyToElementContainer[E, F[_]](
e: F[E]
)(implicit f: ReactElementContainer[F], cv: E => ReactElement): F[ReactElement] =
f.map(e)(cv)
}
@js.native
trait ReactInstance extends js.Object
@js.native
trait ReactChildren extends ReactElement
@js.native
trait ReactRef[T] extends js.Object {
var current: T @uncheckedVariance = js.native
}
@js.native
@JSImport("react", JSImport.Namespace, "React")
private[slinky] object ReactRaw extends js.Object {
def createElement(
elementName: String | js.Object,
properties: js.Dictionary[js.Any],
contents: ReactElement*
): ReactElement = js.native
val createElement: js.Dynamic = js.native // used for WithAttrs
def createContext[T](defaultValue: T): ReactContext[T] = js.native
def createRef[T](): ReactRef[T] = js.native
def forwardRef[P](fn: js.Object): js.Function = js.native
def memo(fn: js.Function, compare: js.UndefOr[js.Object]): js.Function = js.native
@js.native
object Children extends js.Object {
def map(children: ReactChildren, transformer: js.Function1[ReactElement, ReactElement]): ReactChildren = js.native
def map(children: ReactChildren, transformer: js.Function2[ReactElement, Int, ReactElement]): ReactChildren =
js.native
def forEach(children: ReactChildren, transformer: js.Function1[ReactElement, Unit]): Unit = js.native
def forEach(children: ReactChildren, transformer: js.Function2[ReactElement, Int, Unit]): Unit = js.native
def only(children: ReactChildren): ReactElement = js.native
def count(children: ReactChildren): Int = js.native
def toArray(children: ReactChildren): js.Array[ReactElement] = js.native
}
val Fragment: js.Object = js.native
val StrictMode: js.Object = js.native
val Suspense: js.Object = js.native
val Profiler: js.Object = js.native
}
object React {
def createElement(
elementName: String | js.Object,
properties: js.Dictionary[js.Any],
contents: ReactElement*
): ReactElement = ReactRaw.createElement(elementName, properties, contents: _*)
def createContext[T](defaultValue: T): ReactContext[T] = ReactRaw.createContext[T](defaultValue)
def createRef[T]: ReactRef[T] = ReactRaw.createRef[T]()
def forwardRef[P, R](component: FunctionalComponentTakingRef[P, R]): FunctionalComponentForwardedRef[P, R] =
new FunctionalComponentForwardedRef(ReactRaw.forwardRef(component.component))
def memo[P, R, C](component: FunctionalComponentCore[P, R, C]): C =
component.makeAnother(ReactRaw.memo(component.component, js.undefined))
def memo[P, R, C](component: FunctionalComponentCore[P, R, C], compare: (P, P) => Boolean): C =
component.makeAnother(
ReactRaw.memo(
component.component,
((oldProps: js.Dynamic, newProps: js.Dynamic) => {
compare(oldProps.__.asInstanceOf[P], newProps.__.asInstanceOf[P])
}): js.Function2[js.Dynamic, js.Dynamic, Boolean]
)
)
@JSImport("react", "Component", "React.Component")
@js.native
class Component(jsProps: js.Object) extends js.Object {
def forceUpdate(): Unit = js.native
def forceUpdate(callback: js.Function0[Unit]): Unit = js.native
}
object Children extends js.Object {
def map(children: ReactChildren, transformer: ReactElement => ReactElement): ReactChildren =
ReactRaw.Children.map(children, transformer)
def map(children: ReactChildren, transformer: (ReactElement, Int) => ReactElement): ReactChildren =
ReactRaw.Children.map(children, transformer)
def forEach(children: ReactChildren, transformer: ReactElement => Unit): Unit =
ReactRaw.Children.forEach(children, transformer)
def forEach(children: ReactChildren, transformer: (ReactElement, Int) => Unit): Unit =
ReactRaw.Children.forEach(children, transformer)
def only(children: ReactChildren): ReactElement =
ReactRaw.Children.only(children)
def count(children: ReactChildren): Int =
ReactRaw.Children.count(children)
def toArray(children: ReactChildren): js.Array[ReactElement] =
ReactRaw.Children.toArray(children)
}
}
@js.native
@JSImport("react", JSImport.Namespace, "React")
private[slinky] object HooksRaw extends js.Object {
def useState[T](default: T | js.Function0[T]): js.Tuple2[T, js.Function1[js.Any, Unit]] = js.native
def useEffect(thunk: js.Function0[EffectCallbackReturn]): Unit = js.native
def useEffect(thunk: js.Function0[EffectCallbackReturn], watchedObjects: js.Array[js.Any]): Unit = js.native
def useContext[T](context: ReactContext[T]): T = js.native
def useReducer[T, A](reducer: js.Function2[T, A, T], initialState: T): js.Tuple2[T, js.Function1[A, Unit]] = js.native
def useReducer[T, I, A](
reducer: js.Function2[T, A, T],
initialState: I,
init: js.Function1[I, T]
): js.Tuple2[T, js.Function1[A, Unit]] = js.native
def useMemo[T](callback: js.Function0[T], watchedObjects: js.Array[js.Any]): T = js.native
def useRef[T](initialValue: T): ReactRef[T] = js.native
def useImperativeHandle[R](ref: ReactRef[R], value: js.Function0[R]): Unit = js.native
def useImperativeHandle[R](ref: ReactRef[R], value: js.Function0[R], deps: js.Array[js.Any]): Unit = js.native
def useLayoutEffect(thunk: js.Function0[EffectCallbackReturn]): Unit = js.native
def useLayoutEffect(thunk: js.Function0[EffectCallbackReturn], watchedObjects: js.Array[js.Any]): Unit = js.native
def useDebugValue(value: String): Unit = js.native
// No useCallback, since its usage from Hooks won't be able to implement the reference equality guarantee while converting js.Function to scala.FunctionN, anyway
}
@js.native
trait EffectCallbackReturn extends js.Object
object EffectCallbackReturn {
@inline implicit def fromFunction[T](fn: () => T): EffectCallbackReturn =
(fn: js.Function0[T]).asInstanceOf[EffectCallbackReturn]
@inline implicit def fromJSFunction[T](fn: js.Function0[T]): EffectCallbackReturn =
fn.asInstanceOf[EffectCallbackReturn]
@inline implicit def fromAny[T](value: T): EffectCallbackReturn =
js.undefined.asInstanceOf[EffectCallbackReturn]
}
final class SetStateHookCallback[T](private val origFunction: js.Function1[js.Any, Unit]) extends AnyVal {
@inline def apply(newState: T): Unit =
origFunction.apply(newState.asInstanceOf[js.Any])
@inline def apply(transformState: T => T): Unit =
origFunction.apply(transformState: js.Function1[T, T])
}
object SetStateHookCallback {
@inline implicit def toFunction[T](callback: SetStateHookCallback[T]): T => Unit = callback(_)
@inline implicit def toTransformFunction[T](callback: SetStateHookCallback[T]): (T => T) => Unit = callback(_)
}
object Hooks {
@inline def useState[T](default: T): (T, SetStateHookCallback[T]) = {
val call = HooksRaw.useState[T](default)
(call._1, new SetStateHookCallback[T](call._2))
}
@inline def useState[T](lazyDefault: () => T): (T, SetStateHookCallback[T]) = {
val call = HooksRaw.useState[T](lazyDefault: js.Function0[T])
(call._1, new SetStateHookCallback[T](call._2))
}
@inline def useEffect[T](thunk: () => T)(implicit conv: T => EffectCallbackReturn): Unit =
HooksRaw.useEffect(() => conv(thunk()))
@inline def useEffect[T](thunk: () => T, watchedObjects: Iterable[Any])(
implicit conv: T => EffectCallbackReturn
): Unit =
HooksRaw.useEffect(
() => conv(thunk()),
watchedObjects.toJSArray.asInstanceOf[js.Array[js.Any]]
)
@inline def useContext[T](context: ReactContext[T]): T = HooksRaw.useContext[T](context)
@inline def useReducer[T, A](reducer: (T, A) => T, initialState: T): (T, A => Unit) = {
val ret = HooksRaw.useReducer[T, A](reducer, initialState)
(ret._1, ret._2)
}
@inline def useReducer[T, I, A](reducer: (T, A) => T, initialState: I, init: I => T): (T, A => Unit) = {
val ret = HooksRaw.useReducer[T, I, A](reducer, initialState, init)
(ret._1, ret._2)
}
@inline def useCallback[F](callback: F, watchedObjects: Iterable[Any])(implicit ev: F => js.Function): F =
// Do not implement using React's useCallback. Otherwise, converting the js.Function returned by to scala.FunctionN will
// produce a new object and thus violating the reference equality guarantee of useCallback
useMemo(() => callback, watchedObjects)
@inline def useMemo[T](memoValue: () => T, watchedObjects: Iterable[Any]): T =
HooksRaw.useMemo[T](memoValue, watchedObjects.toJSArray.asInstanceOf[js.Array[js.Any]])
@inline def useRef[T](initialValue: T): ReactRef[T] =
HooksRaw.useRef[T](initialValue)
@inline def useImperativeHandle[R](ref: ReactRef[R], value: () => R): Unit =
HooksRaw.useImperativeHandle[R](ref, value)
@inline def useImperativeHandle[R](ref: ReactRef[R], value: () => R, deps: Iterable[Any]): Unit =
HooksRaw.useImperativeHandle[R](ref, value, deps.toJSArray.asInstanceOf[js.Array[js.Any]])
@inline def useLayoutEffect[T](thunk: () => T)(implicit conv: T => EffectCallbackReturn): Unit =
HooksRaw.useLayoutEffect(() => conv(thunk()))
@inline def useLayoutEffect[T](thunk: () => T, watchedObjects: Iterable[Any])(
implicit conv: T => EffectCallbackReturn
): Unit =
HooksRaw.useLayoutEffect(
() => conv(thunk()),
watchedObjects.toJSArray.asInstanceOf[js.Array[js.Any]]
)
@inline def useDebugValue(value: String): Unit = HooksRaw.useDebugValue(value)
}
@js.native
trait ErrorBoundaryInfo extends js.Object {
val componentStack: String = js.native
}
@js.native
trait PrivateComponentClass extends js.Object {
@JSName("props")
var propsR: js.Object = js.native
@JSName("state")
var stateR: js.Object = js.native
@JSName("refs")
val refsR: js.Dynamic = js.native
@JSName("context")
val contextR: js.Dynamic = js.native
@JSName("setState")
def setStateR(newState: js.Object): Unit = js.native
@JSName("setState")
def setStateR(fn: js.Function2[js.Object, js.Object, js.Object]): Unit = js.native
@JSName("setState")
def setStateR(newState: js.Object, callback: js.Function0[Unit]): Unit = js.native
@JSName("setState")
def setStateR(fn: js.Function2[js.Object, js.Object, js.Object], callback: js.Function0[Unit]): Unit = js.native
}
| shadaj/slinky | core/src/main/scala/slinky/core/facade/React.scala | Scala | mit | 11,775 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.taas.optimization
import apparat.taas.ast._
import apparat.taas.runtime.{Convert, Eval}
import apparat.taas.graph.{TaasGraphLinearizer, TaasGraph}
/**
* @author Joa Ebert
*/
object ConstantFolding extends TaasOptimization {
var i = 0
def name = "Constant Folding"
def optimize(context: TaasOptimizationContext) = apply(context.code.graph) match {
case true => context.copy(modified = true)
case false => context
}
def apply(graph: TaasGraph): Boolean = {
var modified = false
for(vertex <- graph.verticesIterator) {
val (m, r) = foldConstantExpressions(vertex.block)
modified |= m
vertex.block = r
}
modified
}
def foldConstantExpressions(block: List[TExpr]): (Boolean, List[TExpr]) = {
var modified = false
var r = List.empty[TExpr]
for(op <- block) op match {
//x = y op z if y && z are const -> x = evaluate(y op z)
case t3 @ T3(op, lhs: TConst, rhs: TConst, result) => Eval(op, lhs, rhs) match {
case Some(evaluated) => {
modified = true
r = T2(TOp_Nothing, evaluated, result) :: r
}
case None => r = t3 :: r
}
//x = (y)z:y -> x = z
case T2(TConvert(t), rhs: TaasTyped, result) if TaasType.isEqual(rhs.`type`, t) => {
modified = true
r = T2(TOp_Nothing, rhs, result) :: r
}
//x = (y)z:w -> x = z:y
case t2 @ T2(TConvert(t), rhs: TConst, result) => Convert(rhs, t) match {
case Some(converted) => {
modified = true
r = T2(TOp_Nothing, converted, result) :: r
}
case None => r = t2 :: r
}
case other => r = other :: r
}
if(modified) {
(true, r.reverse)
} else {
(false, block)
}
}
}
| joa/apparat | apparat-taas/src/main/scala/apparat/taas/optimization/ConstantFolding.scala | Scala | lgpl-2.1 | 2,495 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.deregister
import org.joda.time.LocalDate
import play.api.libs.json.{Json, Writes}
import play.api.libs.json.JodaReads._
case class DeRegisterSubscriptionRequest(acknowledgementReference: String,
deregistrationDate: LocalDate,
deregistrationReason: DeregistrationReason,
deregReasonOther: Option[String] = None)
object DeRegisterSubscriptionRequest {
val DefaultAckReference = "A" * 32
implicit val reads = Json.reads[DeRegisterSubscriptionRequest]
implicit val writes: Writes[DeRegisterSubscriptionRequest] = {
import play.api.libs.functional.syntax._
import play.api.libs.json._
import play.api.libs.json.JodaWrites._
Writes[DeRegisterSubscriptionRequest] { ep =>
(
(__ \\ "acknowledgementReference").write[String] and
(__ \\ "deregistrationDate").write[LocalDate] and
__.write[DeregistrationReason] and
(__ \\ "deregReasonOther").writeNullable[String]
) (unlift(DeRegisterSubscriptionRequest.unapply)).writes(ep)
}
}
}
| hmrc/amls-frontend | app/models/deregister/DeRegisterSubscriptionRequest.scala | Scala | apache-2.0 | 1,747 |
/* sxr -- Scala X-Ray
* Copyright 2009, 2010 Mark Harrah, Olivier Michallat
*/
package sxr
import scala.tools.nsc.{ast, plugins, symtab, util, Global}
import ast.parser.Tokens
import plugins.Plugin
import symtab.Flags
import reflect.internal.util.SourceFile
import annotation.tailrec
import java.io.{File, Reader, Writer}
import java.net.URL
import OutputFormat.{OutputFormat, getWriter}
import Browse._
class StableID(val id: String) extends AnyVal
object Browse
{
/** The path to a link index, which allows linking between runs. This applies for both incremental and remote linking.*/
val LinkIndexRelativePath = "topLevel.index"
/** The path to a compressed link index, which allows linking between runs. This applies for both incremental and remote linking.*/
val CompressedLinkIndexRelativePath = LinkIndexRelativePath + ".gz"
/** The name of the directory containing cached remote `link.index`es*/
val CacheRelativePath = "cache.sxr"
}
/** The actual work extracting symbols and types is done here. */
abstract class Browse extends Plugin
{
def classDirectory: File
/** The output formats to write */
def outputFormats: List[OutputFormat]
/** The URLs of external sxr locations. */
def externalLinkURLs: List[URL]
/** Relativizes the path to the given Scala source file against the base directories. */
def getRelativeSourcePath(source: File): String
/** For a relativized source path, gets the full path. */
def getFullSourcePath(relative: String): Option[File]
/** The compiler.*/
val global: Global
import global._
lazy val outputDirectory = new File(classDirectory.getParentFile, classDirectory.getName + ".sxr")
outputDirectory.mkdirs
private[this] val linkIndexFile = new File(outputDirectory, LinkIndexRelativePath)
/** The entry method for invoking the configured writers to generate the output.*/
def generateOutput(externalIndexes: List[TopLevelIndex])
{
val sourceFiles = currentRun.units.toList.flatMap(getSourceFile(_))
val localIndex = getLocalIndex(sourceFiles)
val combinedIndex = TopLevelIndex.compound(localIndex :: externalIndexes)
if (sourceFiles.size > 0) {
val context = new OutputWriterContext(sourceFiles, outputDirectory, settings.encoding.value, localIndex)
val writers = outputFormats.map(getWriter(_, context))
writers.foreach(_.writeStart())
for(unit <- currentRun.units ; sourceFile <- getSourceFile(unit))
{
// generate the tokens
val tokens = scan(unit)
val traverser = new Traverse(tokens, unit.source, combinedIndex)
traverser(unit.body)
val tokenList = tokens.toList
Collapse(tokenList.toSet)
writers.foreach(_.writeUnit(sourceFile, getRelativeSourcePath(sourceFile), tokenList))
}
writers.foreach(_.writeEnd())
}
}
private[this] def getLocalIndex(sourceFiles: List[File]): MapIndex =
{
val relativeSources = sourceFiles.map(getRelativeSourcePath(_)).toSet
// approximation to determining if a top-level name is still present to avoid stale entries in the index
def sourceExists(relativePath: String): Boolean = getFullSourcePath(relativePath).isDefined
val localIndex = TopLevelIndex.read(linkIndexFile).filterSources(src => relativeSources(src) && sourceExists(src)).add(topLevelMappings)
TopLevelIndex.write(linkIndexFile, localIndex)
localIndex
}
private[this] def topLevelMappings: Seq[(String,String)] =
{
val newMappings = new collection.mutable.HashMap[String, String]
for {
unit <- currentRun.units
sourceFile <- getSourceFile(unit)
relativeSource = getRelativeSourcePath(sourceFile)
name <- topLevelNames(unit.body)
}
newMappings += (name -> relativeSource)
newMappings.toSeq
}
private def getSourceFile(unit: CompilationUnit): Option[File] = unit.source.file.file match {
case null => None // code compiled from the repl has no source file
case f: File => Some(f.getAbsoluteFile)
}
/** Tokenizes the given source. The tokens are put into an ordered set by the start position of the token.
* Symbols will be mapped back to these tokens by the offset of the symbol.*/
private def scan(unit: CompilationUnit) =
{
val tokens = wrap.Wrappers.treeSet[Token]
def addComment(start: Int, end: Int) { tokens += new Token(start, end - start + 1, Tokens.COMMENT) }
class Scan extends syntaxAnalyzer.UnitScanner(unit)
{
override def deprecationWarning(off: Int, msg: String) {}
override def error(off: Int, msg: String) {}
override def incompleteInputError(off: Int, msg: String) {}
override def foundComment(value: String, start: Int, end: Int) {
addComment(start, end)
super.foundComment(value, start, end)
}
override def foundDocComment(value: String, start: Int, end: Int) {
addComment(start, end)
super.foundDocComment(value, start, end)
}
override def nextToken() {
val offset0 = offset
val code = token
super.nextToken()
if(includeToken(code)) {
val length = (lastOffset - offset0) max 1
tokens += new Token(offset0, length, code)
}
}
}
if(unit.isJava)
new syntaxAnalyzer.JavaUnitParser(unit).parse() // TODO: Java source support
else {
val parser = new syntaxAnalyzer.UnitParser(unit) { override def newScanner = new Scan }
parser.parse()
}
tokens
}
/** Filters out unwanted tokens such as whitespace and commas. Braces are currently
* included because () is annotated as Unit, and a partial function created by
* { case ... } is associated with the opening brace. */
private def includeToken(code: Int) =
{
import Tokens.{COMMENT, USCORE, isBrace, isKeyword, isIdentifier, isLiteral}
code match
{
case COMMENT | USCORE => true
case _ => isKeyword(code) || isIdentifier(code) || isLiteral(code) || isBrace(code)
}
}
/** Gets the token for the given offset.*/
private def tokenAt(tokens: wrap.SortedSetWrapper[Token], offset: Int): Option[Token] =
tokensAt(tokens, offset).headOption
/** Gets the token for the given offset.*/
private def tokensAt(tokens: wrap.SortedSetWrapper[Token], offset: Int): List[Token] =
{
// create artificial tokens to get a subset of the tokens starting at the given offset
// then, take the first token in the range
tokens.range(new Token(offset, 1, 0), new Token(offset+1, 1, 0)).toList
}
/** Filters unwanted symbols, such as packages.*/
private def ignore(s: Symbol): Boolean =
ignoreBase(s) ||
s.isModuleClass || // ignore the generated class for modules
s.isPrimaryConstructor // the primary constructor overlaps with the class type, so just use the class type
private def ignoreBase(s: Symbol): Boolean =
!s.exists ||
s.isPackage || // nothing done with packages
s.isImplClass
private class Traverse(tokens: wrap.SortedSetWrapper[Token], source: SourceFile, index: TopLevelIndex) extends Traverser
{
// magic method #1
override def traverse(tree: Tree)
{
def handleDefault()
{
process(tree, index)
super.traverse(tree)
}
tree match
{
case ValDef(_, _, _, rhs) =>
// tests for synthetic val created for the x in x :: Nil, which would associate the wrong type with ::
// because the synthetic val is associated with the :: token
if(tree.symbol != null && tree.symbol.hasFlag(Flags.SYNTHETIC))
traverse(rhs)
else
handleDefault()
case Template(parents, self, body) =>
// If the first parent in the source is a trait, the first parent in parents will be AnyRef and it will
// use the trait's token, bumping the trait. So, this hack processes traits first
val (traits, notTraits) = parents.partition(_.symbol.isTrait)
traverseTrees(traits)
traverseTrees(notTraits)
if (!self.isEmpty) traverse(self)
traverseStats(body, tree.symbol)
/*case DefDef(_, _, tparams, vparamss, _, rhs) =>
atOwner(tree.symbol) {
traverseTrees(tparams); traverseTreess(vparamss); traverse(rhs)
}*/
case _ =>
handleDefault()
}
}
// magic method #2
private def process(t: Tree, index: TopLevelIndex)
{
def catchToNone[T](f: => T): Option[T] = try Some(f) catch { case e: UnsupportedOperationException => None }
for(tSource <- catchToNone(t.pos.source) if tSource == source; token <- tokenAt(tokens, t.pos.point))
{
def processDefaultSymbol() =
{
if(t.hasSymbol && !ignore(t.symbol))
processSymbol(t, token, source.file.file, index)
}
def processSimple() { token.tpe = TypeAttribute(typeString(t.tpe), None) }
def processTypeTree(tt: TypeTree) { if(!ignore(tt.symbol)) processSymbol(tt, token, source.file.file, index) }
t match
{
case _: ClassDef => processDefaultSymbol()
case _: ModuleDef => processDefaultSymbol()
case _: ValOrDefDef => processDefaultSymbol()
case _: TypeDef => processDefaultSymbol()
//case _: Super => processDefaultSymbol()
case _: This => processDefaultSymbol()
case s: Select => processDefaultSymbol()
case _: New => processSimple()
case _: Alternative => processDefaultSymbol()
case _: Star => processDefaultSymbol()
case _: Bind => processDefaultSymbol()
case Apply(fun, args) =>
/*val funInfo = fun.symbol.info
println("Function: " + fun + " " + funInfo + " " + funInfo.getClass)
funInfo match
{
case PolyType(tparams, MethodType(params, resultType) =>
println("PolyType method type: " params)
case MethodType(params, resultType) =>
println("MethodType method type: " params)
}
println("Args: " + args.getClass + " " + args.map(_.getClass))
*/
//processDefaultSymbol()
/*fun match
{
case tt: TypeTree => if(!ignoreBase(tt.symbol)) processTypeTree(tt)
case _ => traverse(fun)
}
traverseTrees(args)*/
//case _: Import => processSimple()
case _: Return => processSimple()
case _: If => processSimple()
case _: Match => processSimple() // this will annotate the 'match' keyword with the type returned by the associated pattern match
// The associated token is no longer the case keyword, but the pattern, so this would overwrite the pattern's type.
// case _: CaseDef => processSimple() // this will annotate the 'case' keyword with the type returned by that particular case statement
case _: Throw => processSimple()
case ta: TypeApply => processSimple() // this fills in type parameters for methods
case Ident(_) => processDefaultSymbol()
case Literal(value) => processSimple() // annotate literals
case tt: TypeTree =>
if(token.isPlain)
processSymbol(tt, token, source.file.file, index)
case _ => ()
}
}
}
}
// magic method #3
private def processSymbol(t: Tree, token: Token, sourceFile: File, index: TopLevelIndex)
{
val sym = t.symbol
def addDefinition()
{
for(id <- stableID(sym)) {
token.source = getRelativeSourcePath(sourceFile)
token += id
}
}
sym match
{
case ts: TermSymbol =>
val sType =
t match
{
case ad: ApplyDynamic => ad.qual.tpe.memberType(ad.symbol)
case s: Select => s.qualifier.tpe.memberType(s.symbol)
case _ => ts.owner.thisType.memberType(ts)
}
if(sType != null)
{
val asString =
sType match
{
case mt: MethodType if ts.hasFlag(Flags.IMPLICIT)=> "implicit " + fullName(sym) + " : " + typeString(sType)
case _ => typeString(sType)
}
//println("Term symbol " + sym.id + ": " + asString)
token.tpe = TypeAttribute(asString, linkTo(sourceFile, sType.typeSymbol, index))
}
case ts: TypeSymbol =>
val treeType = t.tpe
val sType =
if(treeType == NoType) ts.info
else treeType
//println("Type symbol " + sym.id + ": " + typeString(sType))
if(sType != null)
token.tpe = TypeAttribute(typeString(sType), linkTo(sourceFile, sType.typeSymbol, index))
case _ => ()
}
if(sym != null && sym != NoSymbol)
{
if(t.isDef)
addDefinition()
else
{
linkTo(sourceFile, sym, index) match
{
case Some(x) => token.reference = x
case None => ()//addDefinition()
}
}
}
}
/** Constructs a decoded fully qualified name for the given symbol. */
private def fullName(s: Symbol): String =
{
require(s != NoSymbol)
val owner = s.owner
require(owner != NoSymbol)
if(owner.isRoot || owner.isEmptyPackageClass)
s.nameString
else
fullName(owner.enclClass) + "." + s.nameString
}
/** Produces a string for the given type that should be informative, but brief.*/
private def typeString(t: Type): String =
{
t match
{
case ct: CompoundType => compoundTypeString(ct, "")// tries to reduce size of some type strings
case pt: PolyType =>
import pt._
if(typeParams.isEmpty)
"=> " + typeString(resultType)
else
{
val typeParameters = typeParams.map(_.defString).mkString("[", ", ", "]")
resultType match
{
case ct: CompoundType => compoundTypeString(ct, typeParameters)
case _ => typeParameters + typeString(resultType)
}
}
case _ =>
if(t == null)
""
else
t.toString
}
}
/** Converts the given compound type to a string. 'mainPostfix' is copied after the main type symbol
* but before any parents or refinements*/
private def compoundTypeString(ct: CompoundType, mainPostfix: String) =
{
import ct._
typeSymbol.toString + mainPostfix +
{
if(ct.typeSymbol.isPackageClass)
""
else if(parents.isEmpty)
{
if(decls.isEmpty)
""
else
decls.mkString("{", "; ", "}")
}
else
parents.mkString(" extends ", " with ", "")
}
}
/** Generates a link usable in the file 'from' to the symbol 'sym', which might be in some other file. */
private def linkTo(from: File, sym: Symbol, index: TopLevelIndex): Option[Link] =
{
def externalURL: Option[String] = for(name <- topLevelName(sym); result <- index.source(name)) yield
result.base match {
case None => makeLink(from, result.relativeSource)
case Some(b) => result.resolve
}
if(sym == null || sym == NoSymbol || sym.owner == NoSymbol)
None
else
stableID(sym) flatMap { id =>
val source = sym.sourceFile
val url: Option[String] =
if(source != null)
Some(makeLink(from, source.file))
else
externalURL
url map { u => new Link(u, id) }
}
}
@tailrec
private[this] def topLevelName(s: Symbol): Option[String] =
if(s == null || s == NoSymbol || s.isEmptyPackage) None
else
{
val encl = s.owner
if(encl.isPackageClass || encl.isEmptyPackage) Some(fullNameString(s)) else topLevelName(encl)
}
private def makeLink(from: File, to: File): String =
if(to == from) "" else FileUtil.relativePath(relativeSource(from), relativeSource(to))
private def makeLink(from: File, toRelative: String): String =
FileUtil.relativePath(relativeSource(from), new File(toRelative))
private[this] def lookup(sym: Symbol): Option[File] =
Option(sym.associatedFile).flatMap(_.underlyingSource).flatMap(f => classpathEntry(f.file))
private[this] def classpathEntry(f: File): Option[File] =
classpathFiles find { entry => FileUtil.relativize(entry, f).isDefined }
// couldn't find a direct method to get the Seq[File]
private[this] lazy val classpathFiles: Seq[File] =
util.ClassPath.split(new tools.util.PathResolver(settings).result.asClasspathString).map(s => new File(s).getAbsoluteFile)
/** Generates a String identifying the provided Symbol that is stable across runs.
* The Symbol must be a publicly accessible symbol, such as a method, class, or type member.*/
private def stableID(sym: Symbol): Option[StableID] =
Some(new StableID(fullNameString(sym)))
private[this] def normalize(sym: Symbol): Symbol =
if(sym.isModuleClass) {
val mod = sym.companionModule
if(mod == NoSymbol) sym else mod
} else if(sym.isCaseApplyOrUnapply) {
val cClass = sym.owner.companionClass
if(cClass != NoSymbol)
cClass
else
sym.owner
} else if(sym.isPrimaryConstructor)
sym.owner
else if(sym.isStable && sym.isMethod) {
val get = sym.getter(sym.enclClass)
if(get == NoSymbol) sym else get
}
else
sym
// hack: probably not sufficient to distinguish all possible overloaded methods
// the hash can be truncated quite a bit: aren't distinguishing between many options
private def methodHash(sym: Symbol) = FileUtil.quarterHash(sym.tpe.toString)
private def relativeSource(file: File) = new File(getRelativeSourcePath(file.getAbsoluteFile))
/** Constructs a decoded fully qualified name for the given symbol. */
private def fullNameString(sym: Symbol): String =
{
require(sym != NoSymbol)
val s = normalize(sym)
val owner = s.owner
require(owner != NoSymbol)
val root = owner.isRoot || owner.isEmptyPackageClass
val sep = if(s.isTerm) { if(root) "" else "." } else ";"
val name = sep + nameString(s)
if(root)
name
else
fullNameString(owner) + name
}
private[this] def nameString(sym: Symbol): String =
{
val params = if(isOverloadedMethod(sym)) "(" + methodHash(sym) + ")" else ""
sym.nameString + params
}
private[this] def isOverloadedMethod(sym: Symbol): Boolean =
sym.isMethod && sym.owner.info.member(sym.name).isOverloaded
private[this] def topLevelNames(tree: Tree): Seq[String] =
{
val names = new collection.mutable.ListBuffer[String]
val t = new TopLevelTraverser {
def name(n: String) { names += n }
}
t(tree)
names.toList
}
private abstract class TopLevelTraverser extends Traverser
{
def name(n: String): Unit
override final def traverse(tree: Tree)
{
tree match
{
case (_: ClassDef | _ : ModuleDef) if isTopLevel(tree.symbol) => name(tree.symbol.fullName)
case p: PackageDef =>
if(!p.symbol.isEmptyPackage)
name(p.symbol.fullName)
super.traverse(tree)
case _ =>
}
}
def isTopLevel(sym: Symbol): Boolean =
(sym ne null) && (sym != NoSymbol) && !sym.isImplClass && !sym.isNestedClass && sym.isStatic &&
!sym.hasFlag(Flags.SYNTHETIC) && !sym.hasFlag(Flags.JAVA)
}
}
| harrah/browse | src/main/scala/Browse.scala | Scala | bsd-3-clause | 18,063 |
package net.technowizardry
import java.io.{InputStream,OutputStream}
class JavaXMLStreamFactory extends XMLStreamFactory {
def CreateReader(stream : InputStream) = new JavaXMLReader(stream)
def CreateWriter(stream : OutputStream) = new JavaXMLWriter(stream)
} | ajacques/XmppClient | SeniorProject/src/net/technowizardry/JavaXMLStreamFactory.scala | Scala | mit | 270 |
package com.geishatokyo.diffsql
import com.geishatokyo.diffsql.diff.{Aggregator, Normalizer, Differencer}
import com.geishatokyo.diffsql.parser.SQLParser
/**
* Created by takeshita on 14/02/17.
*/
class DiffSQL(sqlParser : SQLParser,
aggregator : Aggregator,
normalizer : Normalizer,
differencer : Differencer,
sqlnizer : SQLnizer) {
def diff(after : String, before : String) : List[String] = {
val afterDefs = normalizer.normalize(aggregator.aggregate(sqlParser.parseSql(after)))
val beforeDefs = normalizer.normalize(aggregator.aggregate(sqlParser.parseSql(before)))
val createTables = afterDefs.filter(t => {
!beforeDefs.exists(_.name == t.name)
})
val dropTables = beforeDefs.filter(t => {
!afterDefs.exists(_.name == t.name)
})
val diffs = afterDefs.flatMap(a => {
beforeDefs.find(_.name == a.name) match{
case Some(b) => {
List(differencer.diff(a,b))
}
case None => Nil
}
})
(if(sqlnizer.createTable_?){
createTables.map(t => sqlnizer.toCreateTable(t))
}else Nil ) :::
(if(sqlnizer.dropTable_?){
dropTables.map(t => sqlnizer.toDropTable(t))
}else Nil) :::
diffs.flatMap(d => sqlnizer.toAlterSQL(d))
}
}
| geishatokyo/diff-sql-table | parser/src/main/scala/com/geishatokyo/diffsql/DiffSQL.scala | Scala | mit | 1,303 |
package com.twitter.finagle.ssl
import com.twitter.io.{StreamIO, TempDirectory}
import java.io._
import java.security.{KeyStore, SecureRandom}
import javax.net.ssl._
/**
* Take a PEM-encoded cert and key and turn it into a PKCS#12 keystore
* via openssl, which is then loaded into a resulting KeyManager
*
* @param certificatePath the path to the PEM-encoded certificate
* @param keyPath the path to the PEM-encoded private key
* @param caCertPath the path to the PEM-encoded intermediate/root certs;
* multiple certs should be concatenated into a single file. If caCertPath
* is set, use it in setting up the connection instead of certificatePath.
* The cert chain should contain the certificate.
* @return Array[KeyManager]
*/
object PEMEncodedKeyManager {
class ExternalExecutableFailed(message: String) extends Exception(message)
def apply(
certificatePath: String,
keyPath: String,
caCertPath: Option[String]
): Array[KeyManager] =
asStream(keyPath) { keyStream =>
// if the chain is present, use it instead of the cert (chain contains cert)
asStream(caCertPath.getOrElse(certificatePath)) { certificateStream =>
makeKeystore(certificateStream, keyStream)
}
}
private[this] def secret(length: Int): Array[Char] = {
val rng = new SecureRandom()
val b = new Array[Char](length)
for (i <- 0 until length)
b(i) = (65 + rng.nextInt(90 - 65)).toChar
b
}
private[this] def asStream[T](filename: String)(f: (InputStream) => T): T = {
val stream = new FileInputStream(filename)
try {
f(stream)
} finally {
stream.close()
}
}
private[this] def makeKeystore(
certificateStream: InputStream,
keyStream: InputStream
): Array[KeyManager] = {
// Create a secure directory for the conversion
val path = TempDirectory.create(false) //explicitly handle clean-up here, not at shutdown
try {
Shell.run(Array("chmod", "0700", path.getAbsolutePath()))
// Guard the keystore with a randomly-generated password
val password = secret(24)
val passwordStr = new String(password)
// Use non-deterministic file names
val fn = new String(secret(12))
val pemPath = path + File.separator + "%s.pem".format(fn)
val p12Path = path + File.separator + "%s.p12".format(fn)
// Write out the certificate and key
val f = new FileOutputStream(new File(pemPath))
try {
StreamIO.copy(certificateStream, f)
StreamIO.copy(keyStream, f)
} finally {
f.close()
}
// Import the PEM-encoded certificate and key to a PKCS12 file
Shell.run(
Array(
"openssl",
"pkcs12",
"-export",
"-password",
"pass:%s".format(passwordStr),
"-in",
pemPath,
"-out",
p12Path
)
)
// Read the resulting keystore
val keystore = asStream(p12Path) { stream =>
val ks = KeyStore.getInstance("pkcs12")
ks.load(stream, password)
ks
}
// Clean up by deleting the files and directory
Seq(pemPath, p12Path).foreach(new File(_).delete())
val kmf = KeyManagerFactory.getInstance("SunX509")
kmf.init(keystore, password)
kmf.getKeyManagers
} finally {
path.delete()
}
}
}
| mkhq/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/PEMEncodedKeyManager.scala | Scala | apache-2.0 | 3,373 |
import doodle.core._
import doodle.turtle.Instruction._
import doodle.turtle._
import doodle.backend.StandardInterpreter._
import doodle.core.Image._
import doodle.core.Point._
import doodle.core._
import doodle.jvm.Java2DCanvas._
import doodle.syntax._
import scala.collection.immutable.Iterable
object Week5 extends App {
// def polygonWithTurtles(sides: Int, sideLength: Double): Image = {
// val rotation = Angle.one / sides
// def iter(n: Int): List[Instruction] =
// n match {
// case 0 => Nil
// case n => turn(rotation) :: forward(sideLength) :: iter(n-1)
// }
// Turtle.draw(iter(sides))
// }
// polygonWithTurtles(5,10)
//
// def squareSpiral(steps: Int, distance: Double, angle: Angle, increment: Double): Image = {
// def iter(n: Int, distance: Double): List[Instruction] = {
// n match {
// case 0 => Nil
// case n => forward(distance) :: turn(angle) :: iter(steps-1, distance + increment)
// }
// }
// Turtle.draw(iter(steps, distance))
// }
// def polygonWithTurtle(sides: Int, size: Int): Image = {
// val rotation = Angle.one / sides
// val elts: List[Instruction] =
// (1 to sides).toList.map { i =>
// turn(rotation) :: forward(size)
// }
// Turtle.draw(elts)
// }
def polygon(sides: Int, size: Int): Image = {
val rotation = Angle.one / sides
val elts =
(1 to sides).toList.map { i =>
LineTo(polar(size, rotation * i))
}
closedPath(MoveTo(polar(size, Angle.zero)) :: elts)
}
val result: Image = polygon(5,50)
result.draw
def style(image: Image): Image =
image.
lineWidth(6.0).
lineColor(Color.royalBlue).
fillColor(Color.skyBlue)
def DoubleLimits(introList : List[Any]) = {
introList flatMap(x => List(x,x))
}
def Empty(introList : List[Any]) = {
introList flatMap(x => List())
}
println(Empty(List(1,2,3)))
}
//
//final case class Distribution[A] (events: List[(A, Double)]) {
// def flatMap[B](f: A => Distribution[B]) = {
// val x = events.flatMap {
// case (a: A, p: Double) =>
// f(a).events.map {
// case (b: B, pb: Double) => (b, pb * p)
// }
// }
// }
//// }.groupBy(_._1).mapValues(_.unzip._2.sum).toList
//} | mbarshay/ScalaClassHelloWorld | src/main/scala/Week5.scala | Scala | apache-2.0 | 2,273 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.kudu
import java.util
import com.datamountaineer.streamreactor.connect.kudu.config.{KuduConfig, KuduSettings}
import com.datamountaineer.streamreactor.connect.kudu.sink.KuduWriter
import com.datamountaineer.streamreactor.connect.schemas.ConverterUtil
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.errors.RetriableException
import org.apache.kafka.connect.sink.SinkRecord
import org.apache.kudu.client.SessionConfiguration.FlushMode
import org.apache.kudu.client._
import org.mockito.Matchers.{any, eq => mockEq}
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import scala.annotation.tailrec
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* Created by [email protected] on 04/03/16.
* stream-reactor
*/
class TestKuduWriter extends TestBase with KuduConverter with MockitoSugar with ConverterUtil {
"A Kudu Writer should write" in {
val record = getTestRecords.head
val kuduSchema = convertToKuduSchema(record)
val kuduRow = kuduSchema.newPartialRow()
//mock out kudu client
val insert = mock[Upsert]
val table = mock[KuduTable]
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val resp = mock[ListTablesResponse]
val config = new KuduConfig(getConfigAutoCreate(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(true)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(insert.getRow).thenReturn(kuduRow)
when(table.getSchema).thenReturn(kuduSchema)
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_SYNC)
when(client.getTablesList).thenReturn(resp)
when(resp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
writer.write(getTestRecords)
writer.close()
}
"A Kudu writer should write JSON" in {
val jsonPayload =
"""
| {
| "_id": "580151bca6f3a2f0577baaac",
| "index": 0,
| "guid": "6f4dbd32-d325-4eb7-87f9-2e7fa6701cba",
| "isActive": false,
| "balance": 3589.15,
| "age": 27,
| "eyeColor": "brown",
| "name": "Clements Crane",
| "company": "TERRAGEN",
| "email": "[email protected]",
| "phone": "+1 (905) 514-3719",
| "address": "316 Hoyt Street, Welda, Puerto Rico, 1474",
| "latitude": "-49.817964",
| "longitude": "-141.645812"
| }
""".stripMargin
val topic = "sink_test"
val record = new SinkRecord(topic, 0, null, null, Schema.STRING_SCHEMA, jsonPayload, 0)
val payload = convertStringSchemaAndJson(record, Map.empty, Set.empty)
val kuduSchema = convertToKuduSchemaFromJson(payload, topic)
val kuduRow = kuduSchema.newPartialRow()
//mock out kudu client
val insert = mock[Upsert]
val table = mock[KuduTable]
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val resp = mock[ListTablesResponse]
val config = new KuduConfig(getConfigAutoCreate(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(true)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(insert.getRow).thenReturn(kuduRow)
when(table.getSchema).thenReturn(kuduSchema)
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_SYNC)
when(client.getTablesList).thenReturn(resp)
when(resp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
writer.write(Set(record))
writer.close()
}
"A Kudu Writer should create table on arrival of first record" in {
val record = getTestRecords.head
val kuduSchema = convertToKuduSchema(record)
val kuduRow = kuduSchema.newPartialRow()
//mock out kudu client
val insert = mock[Upsert]
val table = mock[KuduTable]
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val resp = mock[ListTablesResponse]
val config = new KuduConfig(getConfigAutoCreate(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(false)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(insert.getRow).thenReturn(kuduRow)
when(table.getSchema).thenReturn(kuduSchema)
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_SYNC)
when(client.getTablesList).thenReturn(resp)
when(resp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
writer.write(getTestRecords)
writer.close()
}
"A Kudu Writer should create table on arrival of first JSON record" in {
val jsonPayload =
"""
| {
| "_id": "580151bca6f3a2f0577baaac",
| "index": 0,
| "guid": "6f4dbd32-d325-4eb7-87f9-2e7fa6701cba",
| "isActive": false,
| "balance": 3589.15,
| "age": 27,
| "eyeColor": "brown",
| "name": "Clements Crane",
| "company": "TERRAGEN",
| "email": "[email protected]",
| "phone": "+1 (905) 514-3719",
| "address": "316 Hoyt Street, Welda, Puerto Rico, 1474",
| "latitude": "-49.817964",
| "longitude": "-141.645812"
| }
""".stripMargin
val topic = "sink_test"
val record = new SinkRecord(topic, 0, null, null, Schema.STRING_SCHEMA, jsonPayload, 0)
val payload = convertStringSchemaAndJson(record, Map.empty, Set.empty)
val kuduSchema = convertToKuduSchemaFromJson(payload, topic)
val kuduRow = kuduSchema.newPartialRow()
//mock out kudu client
val insert = mock[Upsert]
val table = mock[KuduTable]
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val resp = mock[ListTablesResponse]
val config = new KuduConfig(getConfigAutoCreate(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(false)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(insert.getRow).thenReturn(kuduRow)
when(table.getSchema).thenReturn(kuduSchema)
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_SYNC)
when(client.getTablesList).thenReturn(resp)
when(resp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
writer.write(getTestRecords)
writer.close()
}
"should identify schema change from source records" in {
val schema1 = createSchema
val schema2 = createSchema5
val rec1 = createSinkRecord(createRecord(schema1, "1"), TOPIC, 1)
val rec2 = createSinkRecord(createRecord5(schema2, "2"), TOPIC, 2)
val kuduSchema = convertToKuduSchema(rec1)
val kuduSchema2 = convertToKuduSchema(rec2.valueSchema())
val kuduRow2 = kuduSchema2.newPartialRow()
//mock out kudu client
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val table = mock[KuduTable]
val insert = mock[Upsert]
val atrm = mock[AlterTableResponse]
val resp = mock[ListTablesResponse]
val config = new KuduConfig(getConfigAutoCreateAndEvolve(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(false)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(table.getSchema).thenReturn(kuduSchema)
when(insert.getRow).thenReturn(kuduRow2)
when(client.alterTable(mockEq(TABLE), any[AlterTableOptions])).thenReturn(atrm)
when(client.isAlterTableDone(TABLE)).thenReturn(true)
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_SYNC)
when(client.getTablesList).thenReturn(resp)
when(resp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
writer.write(Set(rec1))
writer.write(Set(rec2))
}
"A Kudu Writer should throw retry on flush errors" in {
val record = getTestRecords.head
val kuduSchema = convertToKuduSchema(record)
val kuduRow = kuduSchema.newPartialRow()
//mock out kudu client
val insert = mock[Upsert]
val table = mock[KuduTable]
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val resp = mock[OperationResponse]
val errorRow = mock[RowError]
val tableresp = mock[ListTablesResponse]
val config = new KuduConfig(getConfigAutoCreateRetry(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(true)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(table.getSchema).thenReturn(kuduSchema)
when(insert.getRow).thenReturn(kuduRow)
when(resp.hasRowError).thenReturn(true)
when(errorRow.toString).thenReturn("Test error string")
when(resp.getRowError).thenReturn(errorRow)
when(kuduSession.flush()).thenReturn(List(resp))
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_SYNC)
when(client.getTablesList).thenReturn(tableresp)
when(tableresp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
intercept[RetriableException] {
writer.write(getTestRecords)
}
}
"A Kudu Writer should check pending errors and throw exception" in {
val record = getTestRecords.head
val kuduSchema = convertToKuduSchema(record)
val kuduRow = kuduSchema.newPartialRow()
//mock out kudu client
val insert = mock[Upsert]
val table = mock[KuduTable]
val client = mock[KuduClient]
val kuduSession = mock[KuduSession]
val errorRow = mock[RowError]
val rowErrorsAndOverflowStatus = mock[RowErrorsAndOverflowStatus]
val resp = mock[ListTablesResponse]
when(rowErrorsAndOverflowStatus.getRowErrors()).thenReturn(Array[RowError](errorRow))
val config = new KuduConfig(getConfigAutoCreateRetryWithBackgroundFlush(""))
val settings = KuduSettings(config)
when(client.newSession()).thenReturn(kuduSession)
when(client.tableExists(TABLE)).thenReturn(true)
when(client.openTable(TABLE)).thenReturn(table)
when(table.newUpsert()).thenReturn(insert)
when(table.getSchema).thenReturn(kuduSchema)
when(insert.getRow).thenReturn(kuduRow)
when(errorRow.toString).thenReturn("Test error string")
when(kuduSession.getFlushMode).thenReturn(FlushMode.AUTO_FLUSH_BACKGROUND)
when(kuduSession.getPendingErrors()).thenReturn(rowErrorsAndOverflowStatus)
when(client.getTablesList).thenReturn(resp)
when(resp.getTablesList).thenReturn(List.empty[String].asJava)
val writer = new KuduWriter(client, settings)
verify(kuduSession, times(1)).setFlushMode(FlushMode.AUTO_FLUSH_BACKGROUND)
intercept[RetriableException] {
writer.write(getTestRecords)
}
verify(kuduSession, times(1)).getPendingErrors
}
}
| CodeSmell/stream-reactor | kafka-connect-kudu/src/test/scala/com/datamountaineer/streamreactor/connect/kudu/TestKuduWriter.scala | Scala | apache-2.0 | 12,110 |
package org.mozartoz.bootcompiler
package symtab
import scala.collection.mutable.{ ArrayBuffer, Map }
import scala.util.parsing.input.{ Position, NoPosition }
import ast._
import bytecode._
/** Companion object of [[org.mozartoz.bootcompiler.symtab.Abstraction]] */
object Abstraction {
private val nextID = (new util.Counter).next _
}
/** Abstraction */
class Abstraction(val owner: Abstraction, val name: String, val pos: Position) {
/** Numeric ID of the abstraction */
val id = Abstraction.nextID()
/** Formal parameters */
val formals = new ArrayBuffer[Symbol]
/** Local variables */
val locals = new ArrayBuffer[Symbol]
/** Global variables, aka contextual environment */
val globals = new ArrayBuffer[Symbol]
/** Flags */
val flags = new ArrayBuffer[String]
/** AST of the body */
var body: Statement = SkipStatement()
/** Map from free variables to the corresponding global variable */
private val _freeVarToGlobal = Map[Symbol, Symbol]()
/** Code area */
val codeArea = new CodeArea(this)
/** Arity, i.e., number of formal parameters */
def arity = formals.size
/** Acquires a symbol as being declared in this abstraction */
def acquire(symbol: Symbol) {
symbol.setOwner(this)
if (symbol.isFormal) formals += symbol
else if (symbol.isGlobal) globals += symbol
else locals += symbol
}
/** Full name of the abstraction, for display purposes */
def fullName: String =
if (owner == NoAbstraction) name
else owner.fullName + "::" + name
/** Creates a new abstraction that is inner to this abstraction */
def newAbstraction(name: String, pos: Position) =
new Abstraction(this, name, pos)
/** Maps a free variable to the corresponding global variable
*
* If no such global variable exists yet, it is created.
*/
def freeVarToGlobal(symbol: Symbol) = {
require(symbol.owner ne this)
_freeVarToGlobal.getOrElseUpdate(symbol, {
val global = symbol.copyAsGlobal()
acquire(global)
global
})
}
/** Dumps the abstraction on standard error
*
* @param includeByteCode include the bytecode in the dump
*/
def dump(includeByteCode: Boolean = true) {
println(fullName + ": P/" + arity.toString())
println(" formals: " + (formals mkString " "))
println(" locals: " + (locals mkString " "))
println(" globals: " + (globals mkString " "))
println()
println(body)
if (codeArea.isDefined) {
println()
codeArea.dump(includeByteCode)
}
}
}
/** No abstraction marker */
object NoAbstraction extends Abstraction(null, "<NoAbstraction>", NoPosition) {
override val owner = this
}
| layus/mozart2 | bootcompiler/src/main/scala/org/mozartoz/bootcompiler/symtab/Abstraction.scala | Scala | bsd-2-clause | 2,671 |
package templemore.onx.version3
import Token._
/**
* @author Chris Turner
*/
object OandX {
def main(args: Array[String]) = {
val grid = new Grid
val winScanner = new WinScanner
val moveFinder = new MoveFinder(true)
var token = Token.Nought
while ( !grid.full_? && !winScanner.win_?(grid) ) {
println("Player: " + Token.symbol(token))
grid.token(moveFinder.findBestPosition(grid, token), token)
println(grid.toString)
token = Token.flip(token)
}
if ( winScanner.win_?(grid) ) {
println("Game won by: " + Token.symbol(winScanner.winningToken(grid)))
}
else {
println("Game drawn");
}
}
} | skipoleschris/OandX | src/test/scala/templemore/onx/version3/OandX.scala | Scala | apache-2.0 | 677 |
package com.dvgodoy.spark.benford.distributions
import breeze.linalg.DenseVector
import breeze.stats.distributions.RandBasis
import com.dvgodoy.spark.benford.constants._
import com.dvgodoy.spark.benford.util._
import org.apache.commons.math3.random.MersenneTwister
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.scalactic._
import play.api.libs.json._
import scala.collection.mutable
import play.api.libs.functional.syntax._
import play.api.libs.json.Reads._
import Accumulation._
import play.json.extras.scalactic.ScalacticFormats._
class Bootstrap extends Serializable {
private def rollToss(nOutcomes: Int, rand: RandBasis): (Int, Double) = {
(rand.randInt(nOutcomes).get(),rand.uniform.get())
}
private def findOutcome(aliasTable: AliasTable, rollToss: (Int, Double)): Int = if (rollToss._2 < aliasTable.modProb(rollToss._1)) rollToss._1 + 10 else aliasTable.aliases(rollToss._1) + 10
private def calcFrequencies(digitsCounts: List[(Int, Int)]): Frequencies = {
val digitsTotal = digitsCounts.map { case (d1d2, count) => count }.sum
val countsD1D2 = digitsCounts ::: (10 to 99).toSet.diff(digitsCounts.map(_._1).toSet).toList.map(n => (n, 0))
val frequenciesD1D2 = countsD1D2.map { case (d1d2, count) => (d1d2, count/digitsTotal.toDouble) }
.toArray.sorted.map(_._2)
val frequenciesD1 = countsD1D2.map { case (d1d2, count) => (d1d2/10, count) }
.groupBy { case (d1, count) => d1 }
.map { case (d1, arrayCounts) => (d1, arrayCounts.map { case (d1, count) => count }.sum/digitsTotal.toDouble) }
.toArray.sorted.map(_._2)
val frequenciesD2 = countsD1D2.map { case (d1d2, count) => (d1d2%10, count) }
.groupBy { case (d2, count) => d2 }
.map { case (d2, arrayCounts) => (d2, arrayCounts.map { case (d2, count) => count }.sum/digitsTotal.toDouble) }
.toArray.sorted.map(_._2)
Frequencies(digitsTotal, frequenciesD1D2, frequenciesD1, frequenciesD2)
}
private def calcOverlaps(bootStatsCIRDD: RDD[StatsCIByLevel], benfordStatsCIRDD: RDD[StatsCIByLevel]): RDD[OverlapsByLevel] = {
val overlapRDD = bootStatsCIRDD.map{ case StatsCIByLevel(idxLevel, depth, stats) => ((idxLevel, depth), stats) }
.join(benfordStatsCIRDD.map{ case StatsCIByLevel(idxLevel, depth, stats) => ((idxLevel, depth), stats) })
.map{ case ((idxLevel, depth), (boot, benford)) => OverlapsByLevel(idxLevel, depth, boot.overlaps(benford), boot.contains(BenfordStatsDigits)) }
overlapRDD
}
private def calcResultsByLevel(overlapRDD: RDD[OverlapsByLevel]): RDD[ResultsByLevel] = {
overlapRDD.map { case obl => obl.calcResults }
}
protected def buildAliasTable(prob: Array[Double]): AliasTable = {
val nOutcomes = prob.length
assert(nOutcomes == 90)
val aliases = DenseVector.zeros[Int](nOutcomes)
val sum = breeze.linalg.sum(prob)
val modProb = DenseVector(prob.map { param => param / sum * nOutcomes })
val (iSmaller, iLarger) = (0 until nOutcomes).partition(modProb(_) < 1d)
val smaller = mutable.Stack(iSmaller:_*)
val larger = mutable.Stack(iLarger:_*)
while (smaller.nonEmpty && larger.nonEmpty) {
val small = smaller.pop()
val large = larger.pop()
aliases(small) = large
modProb(large) -= (1d - modProb(small))
if (modProb(large) < 1)
smaller.push(large)
else
larger.push(large)
}
AliasTable(modProb, aliases, nOutcomes)
}
protected def generateBootstrapTable(sc: SparkContext, sampleSize: Int, numSamples: Int): RDD[(Long, (Int, (Int, Double)))] = {
val nOutcomes = 90
sc.parallelize(1 to numSamples).mapPartitionsWithIndex { (idx, iter) =>
val rand = new RandBasis(new MersenneTwister(idx + 42))
iter.flatMap(sample => Array.fill(sampleSize)(rollToss(nOutcomes, rand)).zipWithIndex
.map { case ((roll, toss), idx) => (idx.toLong, (sample, (roll, toss))) })
}
}
protected case class OutcomeByLevel(idx: Long, idxLevel: Long, depth: Int, sample: Int, n: Int)
protected def generateBootstrapOutcomes(bootstrapTableRDD: RDD[(Long, (Int, (Int, Double)))], data: DataByLevel, aliasMap: Map[Long,AliasTable], groupId: Int): RDD[OutcomeByLevel] = {
val depth = data.levels(groupId)._2
val idxGroup = data.dataByLevelsRDD
.filter{ case Level(idxLevel, depth, idx, value, d1d2) => idxLevel == groupId }
.map{ case Level(idxLevel, depth, idx, value, d1d2) => idx }.collect().toSet
bootstrapTableRDD.filter{case (idx, (sample, (roll, toss))) => idxGroup.contains(idx)}
.map { case (idx, (sample, (roll, toss))) => OutcomeByLevel(idx, groupId, depth, sample, findOutcome(aliasMap(groupId), (roll, toss))) }
}
protected case class MomentsByLevel(idxLevel: Long, depth: Int, sample: Int, moments: MomentsDigits)
protected def calcMomentsSamples(bootRDD: RDD[OutcomeByLevel], groupId: Int): RDD[MomentsByLevel] = {
bootRDD.filter { case OutcomeByLevel(idx, idxLevel, depth, sample, n) => idxLevel == groupId}
.map { case OutcomeByLevel(idx, idxLevel, depth, sample, n) => ((idxLevel, depth, sample, n), 1) }
.reduceByKey(_ + _)
.map { case ((idxLevel, depth, sample, n), count) => ((idxLevel, depth, sample), calcMoments(n, count))}
.reduceByKey(_ + _)
.map { case ((idxLevel, depth, sample), moments) => MomentsByLevel(idxLevel, depth, sample, moments) }
}
protected case class StatsByLevel(idxLevel: Long, depth: Int, sample: Int, stats: StatsDigits)
protected def calcStatsSamples(momentsRDD: RDD[MomentsByLevel]): RDD[StatsByLevel] = {
momentsRDD.map { case MomentsByLevel(idxLevel, depth, sample, moments) => StatsByLevel(idxLevel, depth, sample, calcStatsDigits(moments)) }
}
protected def groupStats(statsRDD: RDD[StatsByLevel]): RDD[((Long, Int), StatsDigits)] = {
statsRDD.map { case StatsByLevel(idxLevel, depth, sample, stats) => ((idxLevel, depth), stats) }.reduceByKey(_+_)
}
protected def calcStatsCIs(dataStatsRDD: RDD[((Long, Int), StatsDigits)], groupStatsRDD: RDD[((Long, Int), StatsDigits)], conf: Array[Double]): RDD[StatsCIByLevel] = {
groupStatsRDD.join(dataStatsRDD)
.map { case ((idxLevel, depth), (groupStats, dataStats)) => StatsCIByLevel(idxLevel, depth, groupStats.calcBcaCI(conf, dataStats)) }
}
def calcDataStats(data: DataByLevelMsg, groupId: Int): DataStatsMsg = {
try {
withGood(data) { (data) =>
val originalRDD = data.dataByLevelsRDD.map { case Level(idxLevel, depth, idx, value, d1d2) => OutcomeByLevel(idx, idxLevel, depth, 1, d1d2) }
val momentsOriginalRDD = calcMomentsSamples(originalRDD, groupId)
val statsOriginalRDD = calcStatsSamples(momentsOriginalRDD)
groupStats(statsOriginalRDD)
}
} catch {
case ex: Exception => Bad(One(s"Error: ${ex.getMessage}"))
}
}
protected def findLevels(dataLevelRDD: RDD[((Long, Double, Int), Array[String])])(implicit jobId: JobId): DataByLevelMsg = {
val sc = dataLevelRDD.context
try {
sc.setJobDescription(jobId.id + ".findLevels")
val concatRDD = dataLevelRDD
.map { case (value, levels) => (value, levels
.zipWithIndex
.map { case (nextLevels, idx) => (levels.slice(0, idx + 1).foldLeft("L")(_ + "." + _), idx) } ) }
val levelsRDD = concatRDD.flatMap { case (value, levels) => levels.map { case (name, depth) => ((name, depth), value) } }
val uniqLevelsRDD = levelsRDD.map { case (classif, value) => classif }.distinct().sortBy(identity).zipWithIndex()
val uniqLevels = uniqLevelsRDD.collect()
val levels = if (uniqLevels.length > 0) {
Good(uniqLevels.map { case ((classif, depth), idx) => (idx, (classif, depth)) }.toMap)
} else {
Bad(One(s"Error: There are no valid values in the dataset."))
}
val hierarchies = concatRDD.map { case (value, levels) => levels.map(_._1).toList }.distinct().collect()
val idxHierarchies = hierarchies
.map(levels => levels.flatMap(hierarchy => uniqLevels
.filter{case ((level, depth), idxLevel) => level == hierarchy}.map{case ((level, depth), idxLevel) => idxLevel }))
val pointers = idxHierarchies.flatMap(levels => levels.zipWithIndex.map{case (top, idx) => (top, if (idx < (levels.length - 1)) levels(idx + 1) else -1)})
.distinct.groupBy(_._1).map{case (top, below) => (top, below.map(_._2))}
val dataByLevelsRDD = uniqLevelsRDD.join(levelsRDD).map { case ((name, depth), (idxLevel, (idx, value, d1d2))) => Level(idxLevel, depth, idx, value, d1d2) }
val freqByLevel = calcFrequenciesLevels(dataByLevelsRDD)
withGood(levels, Good(pointers), Good(freqByLevel), Good(dataByLevelsRDD)) {
DataByLevel(_, _, _, _)
}
} catch {
case ex: Exception => Bad(One(s"Error: ${ex.getMessage}"))
} finally {
sc.setJobDescription("")
}
}
protected def calcFrequenciesLevels(levelsRDD: RDD[Level])(implicit jobId: JobId): Array[FreqByLevel] = {
val sc = levelsRDD.sparkContext
try {
sc.setJobDescription(jobId.id + ".calcFrequenciesLevels")
val levelsCountRDD = levelsRDD
.map { case Level(idxLevel, depth, idx, value, d1d2) => ((idxLevel, d1d2), 1) }
.reduceByKey(_ + _)
.map { case ((idxLevel, d1d2), count) => (idxLevel, (d1d2, count)) }
val freqLevels = levelsCountRDD.groupByKey().map { case (idxLevel, counts) => FreqByLevel(idxLevel, calcFrequencies(counts.toList)) }.collect()
freqLevels
} finally {
sc.setJobDescription("")
}
}
def loadData(sc: SparkContext, filePath: String)(implicit jobId: JobId): DataByLevelMsg = {
try {
sc.setJobDescription(jobId.id + ".loadData")
val dataLevelRDD = sc.textFile(filePath)
.map(line => line.split(",")
.map(_.trim.replace("\"","")))
.map(line => (parseDouble(line(0)), line.slice(1,line.length)))
.map{ case (value, levels) => (value, if (levels.length == 0) Array("") else levels) }
.filter { case (value, levels) => value match { case Some(v) if v != 0.0 => true; case Some(v) if v == 0.0 => false; case None => false } }
.map { case (value, levels) => (value.getOrElse(0.0), levels) }
.zipWithIndex()
.map { case ((value, levels), idx) => ((idx, value, findD1D2(value)), levels) }
val dataByLevel = findLevels(dataLevelRDD)
dataByLevel
} catch {
case ex: Exception => Bad(One(s"Error: ${ex.getMessage}"))
} finally {
sc.setJobDescription("")
}
}
def calcBasicBoot(sc: SparkContext, data: DataByLevelMsg, numSamples: Int): BasicBootMsg = {
try {
withGood(data) { (dbl) =>
val sampleSize = dbl.freqByLevel.filter { case FreqByLevel(idxLevel, freq) => idxLevel == 0 }(0).freq.count
val aliasMap = dbl.freqByLevel.map { case FreqByLevel(idxLevel, freq) => (idxLevel, buildAliasTable(freq.freqD1D2)) }.toMap
val aliasMapBenf = dbl.freqByLevel.map { case FreqByLevel(idxLevel, freq) => (idxLevel, buildAliasTable(BenfordProbabilitiesD1D2)) }.toMap
val bootTableRDD = generateBootstrapTable(sc, sampleSize, numSamples)
BasicBoot(aliasMap, aliasMapBenf, bootTableRDD)
}
} catch {
case ex: Exception => Bad(One(s"Error: ${ex.getMessage}"))
}
}
def calcSampleCIs(basicBoot: BasicBootMsg, dataStatsRDD: DataStatsMsg, data: DataByLevelMsg, groupId: Int): StatsCIByLevelMsg = {
try {
withGood(basicBoot, dataStatsRDD, data) { (basicBoot, dataStatsRDD, data) =>
val bootRDD = generateBootstrapOutcomes(basicBoot.bootTableRDD, data, basicBoot.aliasMap, groupId)
val momentsRDD = calcMomentsSamples(bootRDD, groupId)
val statsRDD = calcStatsSamples(momentsRDD)
val groupStatsRDD = groupStats(statsRDD)
val statsCIRDD = calcStatsCIs(dataStatsRDD, groupStatsRDD, Array(0.975, 0.99))
statsCIRDD
}
} catch {
case ex: Exception => Bad(One(s"Error: ${ex.getMessage}"))
}
}
def calcResults(bootSampleRDD: StatsCIByLevelMsg, bootBenfordRDD: StatsCIByLevelMsg): ResultsByLevelMsg = {
try {
withGood(bootSampleRDD, bootBenfordRDD) { (bootSampleRDD, bootBenfordRDD) =>
val overlapRDD = calcOverlaps(bootSampleRDD, bootBenfordRDD)
calcResultsByLevel(overlapRDD)
}
} catch {
case ex: Exception => Bad(One(s"Error: ${ex.getMessage}"))
}
}
def getCIs(statsCIRDD: StatsCIByLevelMsg)(implicit jobId: JobId): JsValue = {
try {
statsCIRDD match {
case Good(rdd) => {
val sc = rdd.sparkContext
sc.setJobDescription(jobId.id + ".getCIs")
val CIs = rdd.collect()
sc.setJobDescription("")
val json = Json.toJson(CIs)
pruneCIs(json)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getResults(resultsRDD: ResultsByLevelMsg)(implicit jobId: JobId): JsValue = {
try {
resultsRDD match {
case Good(rdd) => {
val sc = rdd.sparkContext
sc.setJobDescription(jobId.id + ".getResultsByGroupId")
val res = rdd.collect()
sc.setJobDescription("")
val json = Json.toJson(res)
pruneResults(json)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getFrequenciesByGroupId(data: DataByLevelMsg, groupId: Int): JsValue = {
try {
data match {
case Good(dbl) => {
val frequencies = dbl.freqByLevel.filter { case FreqByLevel(idxLevel, freq) => idxLevel == groupId }
.map { case FreqByLevel(idxLevel, freq) => freq }
Json.toJson(frequencies)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getFrequenciesByLevel(data: DataByLevelMsg, level: Int): JsValue = {
try {
data match {
case Good(dbl) => {
val groupIds = dbl.levels.filter { case (idxLevel, (name, depth)) => depth == level }.keySet
val frequencies = dbl.freqByLevel.filter { case FreqByLevel(idxLevel, freq) => groupIds.contains(idxLevel) }
.map { case FreqByLevel(idxLevel, freq) => freq }
Json.toJson(frequencies)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getGroups(data: DataByLevelMsg): JsValue = {
try {
data match {
case Good(dbl) => {
val groups = (dbl.levels.toList.sortBy(_._1) zip dbl.hierarchy.toList.sortBy(_._1))
.map { case ((idxLevel, (name, depth)), (idx, children)) => Group(idxLevel, depth, name.substring(2), children.sorted) }
Json.toJson(groups)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getTestsByGroupId(data: DataByLevelMsg, groupId: Int): JsValue = {
try {
data match {
case Good(dbl) => {
val frequencies = dbl.freqByLevel.filter { case FreqByLevel(idxLevel, freq) => idxLevel == groupId }
.map { case FreqByLevel(idxLevel, freq) => freq }
Json.obj(
"z" -> Json.toJson(frequencies.map(_.zTest)),
"chisquared" -> Json.toJson(frequencies.map(_.chiTest))
)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getTestsByLevel(data: DataByLevelMsg, level: Int): JsValue = {
try {
data match {
case Good(dbl) => {
val groupIds = dbl.levels.filter { case (idxLevel, (name, depth)) => depth == level }.keySet
val frequencies = dbl.freqByLevel.filter { case FreqByLevel(idxLevel, freq) => groupIds.contains(idxLevel) }
.map { case FreqByLevel(idxLevel, freq) => freq }
Json.obj(
"z" -> Json.toJson(frequencies.map(_.zTest)),
"chisquared" -> Json.toJson(frequencies.map(_.chiTest))
)
}
case Bad(e) => Json.obj("error" -> Json.toJson(e.head))
}
} catch {
case ex: Exception => Json.obj("error" -> Json.toJson(s"Error: ${ex.getMessage}"))
}
}
def getSuspiciousGroups(jsonResults: JsValue): JsValue = {
val statTransf = ((__ \ 'id).json.pick and (__ \ 'results \ 'statsDiag).json.pick) reduce
val statsSusp = jsonResults.as[List[JsValue]]
.map(_.transform(statTransf).get)
.map(r => (r(0).as[Int], r(1).as[Int]))
.filter{case (idxLevel, compliant) => compliant == -1}
.sorted
.map{case (idxLevel, compliant) => JsNumber(idxLevel)}.toSeq
val regsFilter = (__ \ 'results \ 'n).json.pick
val regsTransf = ((__ \ 'id).json.pick and (__ \ 'results \ 'regsDiag).json.pick) reduce
val regsSusp = jsonResults.as[List[JsValue]]
.filter(_.transform(regsFilter).get.as[Int] >= 1000)
.map(_.transform(regsTransf).get)
.map(r => (r(0).as[Int], r(1).as[Int]))
.filter{case (idxLevel, compliant) => compliant == -1}
.sorted
.map{case (idxLevel, compliant) => JsNumber(idxLevel)}.toSeq
Json.obj(
"stats" -> JsArray(statsSusp),
"regs" -> JsArray(regsSusp)
)
}
}
object Bootstrap {
def apply() = new Bootstrap
} | dvgodoy/spark-benford-analysis | src/main/scala/com/dvgodoy/spark/benford/distributions/Bootstrap.scala | Scala | apache-2.0 | 17,829 |
package com.wavesplatform.transaction
import com.google.common.primitives.Bytes
import com.wavesplatform.account.{AddressScheme, Alias, KeyPair, PrivateKey, PublicKey}
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.transaction.serialization.impl.CreateAliasTxSerializer
import com.wavesplatform.transaction.validation.impl.CreateAliasTxValidator
import monix.eval.Coeval
import play.api.libs.json.JsObject
import scala.util.Try
final case class CreateAliasTransaction(
version: TxVersion,
sender: PublicKey,
aliasName: String,
fee: TxAmount,
timestamp: TxTimestamp,
proofs: Proofs,
chainId: Byte
) extends SigProofsSwitch
with VersionedTransaction
with TxWithFee.InWaves
with LegacyPBSwitch.V3 {
lazy val alias: Alias = Alias.createWithChainId(aliasName, chainId).explicitGet()
override def builder: TransactionParser = CreateAliasTransaction
override val bodyBytes: Coeval[Array[TxVersion]] = Coeval.evalOnce(CreateAliasTransaction.serializer.bodyBytes(this))
override val bytes: Coeval[Array[TxVersion]] = Coeval.evalOnce(CreateAliasTransaction.serializer.toBytes(this))
override val json: Coeval[JsObject] = Coeval.evalOnce(CreateAliasTransaction.serializer.toJson(this))
override val id: Coeval[ByteStr] = Coeval.evalOnce {
val payload = version match {
case TxVersion.V1 | TxVersion.V2 => Bytes.concat(Array(builder.typeId), alias.bytes)
case _ => bodyBytes()
}
ByteStr(crypto.fastHash(payload))
}
}
object CreateAliasTransaction extends TransactionParser {
type TransactionT = CreateAliasTransaction
val supportedVersions: Set[TxVersion] = Set(1, 2, 3)
val typeId: TxType = 10: Byte
implicit val validator = CreateAliasTxValidator
val serializer = CreateAliasTxSerializer
implicit def sign(tx: CreateAliasTransaction, privateKey: PrivateKey): CreateAliasTransaction =
tx.copy(proofs = Proofs(crypto.sign(privateKey, tx.bodyBytes())))
override def parseBytes(bytes: Array[TxVersion]): Try[CreateAliasTransaction] =
serializer.parseBytes(bytes)
def create(
version: TxVersion,
sender: PublicKey,
aliasName: String,
fee: TxAmount,
timestamp: TxTimestamp,
proofs: Proofs,
chainId: Byte = AddressScheme.current.chainId
): Either[ValidationError, TransactionT] =
CreateAliasTransaction(version, sender, aliasName, fee, timestamp, proofs, chainId).validatedEither
def signed(
version: TxVersion,
sender: PublicKey,
alias: String,
fee: TxAmount,
timestamp: TxTimestamp,
signer: PrivateKey,
chainId: Byte = AddressScheme.current.chainId
): Either[ValidationError, TransactionT] =
create(version, sender, alias, fee, timestamp, Nil, chainId).map(_.signWith(signer))
def selfSigned(version: TxVersion, sender: KeyPair, aliasName: String, fee: TxAmount, timestamp: TxTimestamp, chainId: Byte = AddressScheme.current.chainId): Either[ValidationError, TransactionT] =
signed(version, sender.publicKey, aliasName, fee, timestamp, sender.privateKey, chainId)
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/transaction/CreateAliasTransaction.scala | Scala | mit | 3,301 |
package swiss.sib.analytics.server.logs.utils
object EndPointUtils {
val FIRST_LEVEL_PATH_PATTERN = """(\\/\\w*)(\\/)?(.*)?""".r
def getFirstLevelPath(endpoint: String): String = {
endpoint match {
case FIRST_LEVEL_PATH_PATTERN(firstLevelPath, _, _) => return firstLevelPath;
case _ => return "not-defined";
}
}
} | sib-swiss/server-log-analytics | src/main/scala/swiss/sib/analytics/server/logs/utils/EndPointUtils.scala | Scala | gpl-2.0 | 339 |
package org.antipathy.mvn_scalafmt.model
// $COVERAGE-OFF$
/** Class representing the result of a format on a file
* @param name The name of the file
* @param details a summary of the file
*/
case class FileSummary(name: String, details: String) {
override def toString: String = s"$details: $name"
}
// $COVERAGE-ON$
| SimonJPegg/mvn_scalafmt | src/main/scala/org/antipathy/mvn_scalafmt/model/FileSummary.scala | Scala | apache-2.0 | 328 |
package org.jetbrains.plugins.scala.debugger
import com.intellij.debugger.engine.DebugProcess
import com.intellij.debugger.{PositionManager, PositionManagerFactory}
import org.jetbrains.plugins.scala.extensions.invokeLater
import org.jetbrains.plugins.scala.project.ProjectExt
import org.jetbrains.plugins.scala.statistics.{FeatureKey, Stats}
/**
* User: Alefas
* Date: 14.10.11
*/
class ScalaPositionManagerFactory extends PositionManagerFactory {
override def createPositionManager(process: DebugProcess): PositionManager = {
invokeLater {
Stats.trigger(process.getProject.hasScala, FeatureKey.debuggerTotal)
}
new ScalaPositionManager(process)
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/debugger/ScalaPositionManagerFactory.scala | Scala | apache-2.0 | 677 |
package com.eevolution.context.dictionary.domain.model
import ai.x.play.json.Jsonx
import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable}
import org.joda.time.DateTime
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com on 18/02/17.
*/
/**
* User Entity
* @param userId User ID
* @param tenantId Tenant ID
* @param organizationId Organization ID
* @param isActive Is Active
* @param created Created
* @param createdBy Created By
* @param updated Updated
* @param updatedBy Updated By
* @param name Name
* @param description Description
* @param password Password
* @param email Email
* @param supervisorId Supervisor ID
* @param partnerId Partner ID
* @param processing Processing
* @param emailUser Email User
* @param emailUserPW Email User PW
* @param partnerLocationId Partner Location ID
* @param greetingId Greeting ID
* @param title Title
* @param comments Comments
* @param phone Phone
* @param phone2 Phone 2
* @param fax Fax
* @param lastContact Last Contact
* @param lastResult Last Result
* @param birthday Birthday
* @param orgTrxId Org TRX ID
* @param emailVerify Email Verify
* @param emailVerifyDate Email Verify Date
* @param notificationType Notification Type
* @param isFullBPAccess Is Full BP Access
* @param jobId Job ID
* @param ldapUser LDAP User
* @param connectionProfile Connection Profile
* @param value Value
* @param userPin User Pin
* @param isInPayroll Is In Payroll
* @param salt Salt
* @param isSalesLead Is Sales Lead
* @param locationId Location ID
* @param leadSource Lead Source
* @param leadStatus Lead Status
* @param leadSourceDescription Lead Source Description
* @param leadStatusDescription Lead Status Description
* @param campaignId Campaign ID
* @param salesRepId Sales Rep ID
* @param bpName BP Name
* @param bpLocationId BP Location ID
* @param emailConfigId Email Config
* @param isLogInUser Is Log In User
* @param isInternalUser Is Internal User
* @param isWebStoreUser Is Web Store User
* @param recentItemsMaxSaved Recent Items Max Saved
* @param recentItemsMaxShown Recent Items Max Shown
* @param uuid UUID
*/
case class User(userId : Int ,
tenantId: Int,
organizationId : Int = 0 ,
isActive : Boolean = true,
created : DateTime = DateTime.now,
createdBy : Int ,
updated : DateTime = DateTime.now ,
updatedBy : Int,
name: String,
description: Option[String],
password : Option[String] ,
email : Option[String],
supervisorId : Option[Int],
partnerId : Option[Int] ,
processing : Boolean = false ,
emailUser : Option[String] ,
emailUserPW : Option[String] ,
partnerLocationId : Option[Int] ,
greetingId : Option [Int] ,
title : Option[String],
comments : Option[String] ,
phone : Option[String],
phone2 : Option[String] ,
fax : Option[String],
lastContact : Option[DateTime] ,
lastResult : Option[String],
birthday : Option[DateTime],
orgTrxId : Option[Int],
emailVerify : Option[String],
emailVerifyDate : Option[DateTime],
notificationType : String = "E",
isFullBPAccess : Boolean = true ,
jobId : Option[Int],
ldapUser : Option[String],
connectionProfile : Option[String],
value : Option[String],
userPin : Option[String],
isInPayroll : Boolean = true,
salt : Option[String],
isSalesLead : Boolean = false ,
locationId : Option[Int],
leadSource : Option[String] ,
leadStatus : Option[String] ,
leadSourceDescription : Option[String],
leadStatusDescription : Option[String],
campaignId : Option[Int],
salesRepId : Option[Int],
bpName : Option[String],
bpLocationId : Option[Int],
emailConfigId : Option[Int],
isLogInUser: Boolean = false,
isInternalUser: Boolean = false,
isWebStoreUser: Boolean = false,
recentItemsMaxSaved: Int = 50,
recentItemsMaxShown: Int = 20,
uuid: String
) extends DomainModel
with ActiveEnabled
with Identifiable
with Traceable {
override type ActiveEnabled = this.type
override type Identifiable = this.type
override type Traceable = this.type
override def Id: Int = userId
override val entityName: String = "AD_User"
override val identifier: String = "AD_User_ID"
}
object User {
implicit lazy val jsonFormat = Jsonx.formatCaseClass[User]
def create(userId : Int ,
tenantId: Int,
organizationId : Int,
isActive : Boolean,
created : DateTime,
createdBy : Int ,
updated :DateTime ,
updatedBy : Int,
name: String,
description: String,
password : String ,
email : String,
supervisorId : Int,
partnerId : Int ,
processing : Boolean ,
emailUser : String ,
emailUserPW : String ,
partnerLocationId : Int ,
greetingId : Int ,
title : String,
comments : String ,
phone : String,
phone2 : String ,
fax : String,
lastContact : DateTime,
lastResult : String,
birthday : DateTime,
orgTrxId : Int,
emailVerify : String,
emailVerifyDate : DateTime,
notificationType : String,
isFullBPAccess : Boolean ,
jobId : Int,
ldapUser : String,
connectionProfile : String,
value : String,
userPin : String,
isInPayroll : Boolean,
salt : String,
isSalesLead : Boolean ,
locationId : Int,
leadSource : String ,
leadStatus : String ,
leadSourceDescription : String,
leadStatusDescription : String,
campaignId : Int,
salesRepId : Int,
bpName : String,
bpLocationId : Int,
emailConfigId : Int,
isLogInUser: Boolean,
isInternalUser: Boolean,
isWebStoreUser: Boolean,
recentItemsMaxSaved: Int,
recentItemsMaxShown: Int,
uuid: String) = User(userId, tenantId, organizationId, isActive, created, createdBy, updated,
updatedBy,name, None, None, None, None, None, processing, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, notificationType, isFullBPAccess, None, None, None,
None, None, isInPayroll, None, isSalesLead, None, None, None, None, None, None, None, None, None,
None, isLogInUser, isInternalUser, isWebStoreUser, recentItemsMaxSaved, recentItemsMaxShown, uuid)
} | adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/User.scala | Scala | gpl-3.0 | 8,338 |
package impl
import java.nio.file.{Files, StandardOpenOption}
import java.util.Date
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.server._
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import play.api.libs.ws.ahc.AhcWSComponents
import api.BazService
import com.softwaremill.macwire._
class BazLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new BazApplication(context) {
override def serviceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new BazApplication(context) with LagomDevModeComponents
}
abstract class BazApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with AhcWSComponents {
override lazy val lagomServer =serverFor[BazService](wire[BazServiceImpl])
Files.write(environment.getFile("target/reload.log").toPath, s"${new Date()} - reloaded\\n".getBytes("utf-8"),
StandardOpenOption.CREATE, StandardOpenOption.APPEND)
}
| rstento/lagom | dev/sbt-plugin/src/sbt-test/sbt-plugin/run-all-scaladsl/c/src-c/main/scala/impl/BazLoader.scala | Scala | apache-2.0 | 1,113 |
package blended.testsupport.pojosr
import java.io.File
import java.util.Properties
import blended.container.context.impl.internal.AbstractContainerContextImpl
import com.typesafe.config.impl.Parseable
import com.typesafe.config.{Config, ConfigFactory, ConfigObject, ConfigParseOptions}
import scala.beans.BeanProperty
import scala.jdk.CollectionConverters._
class MockContainerContext(baseDir : String, ctid : String) extends AbstractContainerContextImpl {
initialize()
@BeanProperty
override lazy val containerDirectory : String = baseDir
@BeanProperty
override lazy val containerConfigDirectory : String = containerDirectory + "/etc"
@BeanProperty
override lazy val containerLogDirectory : String = baseDir
@BeanProperty
override lazy val profileDirectory : String = containerDirectory
@BeanProperty
override lazy val profileConfigDirectory : String = containerConfigDirectory
@BeanProperty
override lazy val containerHostname : String = "localhost"
@BeanProperty
override lazy val uuid : String = ctid
private def getSystemProperties() : Properties = {
// Avoid ConcurrentModificationException due to parallel setting of system properties by copying properties
val systemProperties = System.getProperties()
val systemPropertiesCopy = new Properties()
systemProperties.entrySet().asScala.foreach { kv =>
systemPropertiesCopy.put(kv.getKey(), kv.getValue())
}
systemPropertiesCopy
}
private def loadSystemProperties() : ConfigObject = {
Parseable
.newProperties(
getSystemProperties(),
ConfigParseOptions.defaults().setOriginDescription("system properties")
)
.parse()
}
override lazy val containerConfig : Config = {
val sysProps = loadSystemProperties()
val envProps = ConfigFactory.systemEnvironment()
ConfigFactory
.parseFile(
new File(profileConfigDirectory, "application.conf"),
ConfigParseOptions.defaults().setAllowMissing(false)
)
.withFallback(sysProps)
.withFallback(envProps)
.resolve()
}
}
| woq-blended/blended | blended.testsupport.pojosr/src/main/scala/blended/testsupport/pojosr/MockContainerContext.scala | Scala | apache-2.0 | 2,088 |
/*
* The MIT License (MIT)
* <p>
* Copyright (c) 2017-2020
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.techcode.streamy.util.parser
import akka.util.ByteString
import scala.language.implicitConversions
/**
* Represent a [[ByteString]] parser that provide an efficient way to parse [[ByteString]].
*/
trait ByteStringParser[Out] extends Parser[ByteString, Out] {
override def parse(raw: ByteString): Either[ParseException, Out] = {
_length = raw.length
super.parse(raw)
}
@inline def current(): Char = (data(_cursor) & 0xFF).toChar
final def slice(): ByteString = data.slice(_mark, _cursor)
/**
* Returns byte at current cursor position without bounds check.
* Advances cursor by 1.
*
* @return current byte.
*/
def readByte(): Byte = {
val b = data(_cursor)
skip()
b
}
/**
* Returns byte at current cursor position.
* If the input is less than 1 byte left, a padding is applied.
* Advances cursor by 1.
*
* @return current byte.
*/
def readBytePadded(): Byte = remainingSize match {
case 0 => -1
case _ => readByte()
}
/**
* Returns the next two bytes as an unsigned 16-bit value,
* with the first becoming the more-significant byte (i.e. big endian/network byte order),
* if possible without any range checks.
* Advances cursor by 2.
*/
def readDoubleByte(): Char = {
val c = _cursor
skip(2)
((data(c) << 8) | data(c + 1) & 0xFF).toChar
}
/**
* Returns the next two bytes as an unsigned 16-bit value,
* with the first becoming the more-significant byte (i.e. big endian/network byte order).
* If the input has less than 2 bytes left, a padding is applied.
*/
def readDoubleBytePadded(): Char = remainingSize match {
case 0 => '\\uFFFF'
case 1 => ((readByte() << 8) | 0xFF).toChar
case _ => readDoubleByte()
}
/**
* Returns the next four bytes as an [[Int]],
* with the first becoming the most-significant byte (i.e. big endian/network byte order),
* if possible without any range checks.
* Advances the cursor by 4.
*/
def readQuadByte(): Int = {
val c = _cursor
skip(4)
data(c) << 24 |
(data(c + 1) & 0xFF) << 16 |
(data(c + 2) & 0xFF) << 8 |
data(c + 3) & 0xFF
}
/**
* Returns the next four bytes as an [[Int]],
* with the first becoming the most-significant byte (i.e. big endian/network byte order).
* If the input has less than 4 bytes left, a padding is applied and its result returned.
*/
def readQuadBytePadded(): Int = remainingSize match {
case 0 => 0xFFFFFFFF
case 1 => (readByte() << 24) | 0xFFFFFF
case 2 => (readDoubleByte() << 16) | 0xFFFF
case 3 => (readDoubleByte() << 16) | ((readByte() & 0xFF) << 8) | 0xFF
case _ => readQuadByte()
}
/**
* Returns the eight eight bytes as a [[Long]],
* with the first becoming the most-significant byte (i.e. big endian/network byte order),
* if possible without any range checks.
* Advances the cursor by 8.
*/
def readOctaByte(): Long = {
val c = _cursor
skip(8)
data(c).toLong << 56 |
(data(c + 1) & 0xFFL) << 48 |
(data(c + 2) & 0xFFL) << 40 |
(data(c + 3) & 0xFFL) << 32 |
(data(c + 4) & 0xFFL) << 24 |
(data(c + 5) & 0xFFL) << 16 |
(data(c + 6) & 0xFFL) << 8 |
data(c + 7) & 0xFFL
}
/**
* Returns the next eight bytes as a [[Long]],
* with the first becoming the most-significant byte (i.e. big endian/network byte order).
* If the input has less than 8 bytes left, a padding is applied and its result returned.
*/
def readOctaBytePadded(): Long = remainingSize match {
case 0 => 0xFFFFFFFFFFFFFFFFL
case 1 => (readByte().toLong << 56) | 0xFFFFFFFFFFFFFFL
case 2 => (readDoubleByte().toLong << 48) | 0xFFFFFFFFFFFFL
case 3 => (readDoubleByte().toLong << 48) | ((readByte() & 0XFFL) << 40) | 0xFFFFFFFFFFL
case 4 => (readQuadByte().toLong << 32) | 0xFFFFFFFFL
case 5 => (readQuadByte().toLong << 32) | ((readByte() & 0xFFL) << 24) | 0xFFFFFFL
case 6 => (readQuadByte().toLong << 32) | ((readDoubleByte() & 0xFFFFL) << 16) | 0xFFFFL
case 7 => (readQuadByte().toLong << 32) | ((readDoubleByte() & 0xFFFFL) << 16) | ((readByte() & 0xFFL) << 8) | 0xFFL
case _ => readOctaByte()
}
}
| amannocci/streamy | core/src/main/scala/io/techcode/streamy/util/parser/ByteStringParser.scala | Scala | mit | 5,416 |
package filodb.akkabootstrapper
import scala.collection.immutable.Seq
import scala.util.{Failure, Success}
import scala.util.control.NonFatal
import akka.actor.{Address, AddressFromURIString}
import akka.cluster.Cluster
import com.typesafe.scalalogging.StrictLogging
import scalaj.http.{Http, HttpResponse}
/** Seed node strategy. Some implementations discover, some simply read from immutable config. */
abstract class ClusterSeedDiscovery(val cluster: Cluster,
val settings: AkkaBootstrapperSettings) extends StrictLogging {
import io.circe.parser.decode
import io.circe.generic.auto._
@throws(classOf[DiscoveryTimeoutException])
def discoverClusterSeeds: Seq[Address] = {
discoverExistingCluster match {
case Seq() => discoverPeersForNewCluster
case nonEmpty: Seq[Address] => nonEmpty
}
}
@throws(classOf[DiscoveryTimeoutException])
//scalastyle:off null
protected def discoverPeersForNewCluster: Seq[Address]
protected def discoverExistingCluster: Seq[Address] = {
val seedsEndpoint = settings.seedsBaseUrl + settings.seedsPath
var response: HttpResponse[String] = null
var retriesRemaining = settings.seedsHttpRetries
do {
try {
logger.info(s"Trying to fetch seeds from $seedsEndpoint ... $retriesRemaining retries remaining.")
response = Http(seedsEndpoint).timeout(2000, 2000).asString
logger.info(s"Seeds endpoint returned a ${response.code}. Response body was ${response.body}")
} catch {
case NonFatal(e) => {
if (e.isInstanceOf[java.net.ConnectException]) {
// Don't bother logging the full the trace for something which is expected.
e.setStackTrace(new Array[StackTraceElement](0))
}
logger.info(s"Seeds endpoint $seedsEndpoint failed. This is expected on cluster bootstrap", e)
}
}
retriesRemaining -= 1
if (retriesRemaining > 0) Thread.sleep(settings.seedsHttpSleepBetweenRetries.toMillis)
} while ((response == null || !response.is2xx) && retriesRemaining > 0)
if (response == null || !response.is2xx) {
logger.info(s"Giving up on discovering seeds after ${settings.seedsHttpRetries} retries. " +
s"Assuming cluster does not exist. ")
Seq.empty[Address]
} else {
decode[ClusterMembershipHttpResponse](response.body) match {
case Right(membersResponse) =>
logger.info("Cluster exists. Response: {}", membersResponse)
membersResponse.members.sorted.map(a => AddressFromURIString.parse(a))
case Left(ex) =>
logger.error(s"Exception parsing JSON response ${response.body}, returning empty seeds", ex)
Seq.empty[Address]
}
}
}
}
object ClusterSeedDiscovery {
/** Seed node strategy. Some implementations discover, some simply read them. */
def apply(cluster: Cluster, settings: AkkaBootstrapperSettings): ClusterSeedDiscovery = {
import settings.{seedDiscoveryClass => fqcn}
cluster.system.dynamicAccess.createInstanceFor[ClusterSeedDiscovery](
fqcn, Seq((cluster.getClass, cluster), (settings.getClass, settings))) match {
case Failure(e) =>
throw new IllegalArgumentException(
s"Could not instantiate seed discovery class $fqcn. Please check your configuration", e)
case Success(clazz) => clazz
}
}
}
| velvia/FiloDB | akka-bootstrapper/src/main/scala/filodb/akkabootstrapper/ClusterSeedDiscovery.scala | Scala | apache-2.0 | 3,407 |
package org.jetbrains.plugins.scala
package annotator
package element
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.codeInspection.ProblemHighlightType
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil.{findCommonContext, findFirstContext}
import org.jetbrains.plugins.scala.annotator.AnnotatorUtils.highlightImplicitView
import org.jetbrains.plugins.scala.annotator.createFromUsage._
import org.jetbrains.plugins.scala.annotator.quickfix.ReportHighlightingErrorQuickFix
import org.jetbrains.plugins.scala.autoImport.quickFix.ScalaImportTypeFix
import org.jetbrains.plugins.scala.codeInspection.varCouldBeValInspection.ValToVarQuickFix
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScConstructorPattern, ScInfixPattern, ScPattern}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScMethodLike, ScReference, ScStableCodeReference}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameters}
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScValue, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportSelector}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaFile, ScalaPsiElement}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.expr.{ScInterpolatedExpressionPrefix, ScInterpolatedPatternPrefix}
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticFunction
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.Any
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.resolve.{ReferenceExpressionResolver, ScalaResolveResult}
import org.jetbrains.plugins.scala.lang.scaladoc.parser.parsing.MyScaladocParsing
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.{ScDocResolvableCodeReference, ScDocTag}
// TODO unify with ScMethodInvocationAnnotator and ScConstructorInvocationAnnotator
object ScReferenceAnnotator extends ElementAnnotator[ScReference] {
override def annotate(element: ScReference, typeAware: Boolean)
(implicit holder: ScalaAnnotationHolder): Unit = {
if (typeAware) {
annotateReference(element)
}
qualifierPart(element, typeAware)
}
def qualifierPart(element: ScReference, typeAware: Boolean)
(implicit holder: ScalaAnnotationHolder): Unit =
if (!element.getUserData(ReferenceExpressionResolver.ConstructorProxyHolderKey))
element.qualifier match {
case None => checkNotQualifiedReferenceElement(element, typeAware)
case Some(_) => checkQualifiedReferenceElement(element, typeAware)
}
def annotateReference(reference: ScReference, inDesugaring: Boolean = false)
(implicit holder: ScalaAnnotationHolder): Unit = {
val results = reference.multiResolveScala(false)
if (results.length > 1) {
return
}
for (r <- results) {
if (r.isAssignment) {
annotateAssignmentReference(reference)
}
val refContext = reference.getContext
val targetElement = r.element
if (!r.isApplicable()) {
targetElement match {
case f @ (_: ScFunction | _: PsiMethod | _: ScSyntheticFunction) =>
refContext match {
case _: ScGenericCall =>
case _: MethodInvocation =>
case _ if !reference.is[ScInterpolatedPatternPrefix] =>
r.problems.foreach {
case MissedParametersClause(_) =>
holder.createErrorAnnotation(
reference,
ScalaBundle.message("annotator.error.missing.arguments.for.method", nameWithSignature(f)),
createFixesByUsages(reference)
)
case _ =>
}
case _ =>
}
case _ =>
}
}
}
}
/**
* Annotates: val a = 1; a += 1;
*/
private def annotateAssignmentReference(reference: ScReference)
(implicit holder: ScalaAnnotationHolder): Unit = {
val qualifier = reference.getContext match {
case x: ScMethodCall => x.getEffectiveInvokedExpr match {
case x: ScReferenceExpression => x.qualifier
case _ => None
}
case x: ScInfixExpr => Some(x.left)
case _ => None
}
val refElementOpt = qualifier.flatMap(_.asOptionOfUnsafe[ScReference])
val ref: Option[PsiElement] = refElementOpt.flatMap(_.resolve().toOption)
val reassignment = ref.exists(ScalaPsiUtil.isReadonly)
if (reassignment) {
val maybeFix = ref.get match {
case ScalaPsiUtil.inNameContext(v: ScValue) =>
Some(new ValToVarQuickFix(v))
case _ => None
}
holder.createErrorAnnotation(reference, ScalaBundle.message("annotator.error.reassignment.to.val"), maybeFix)
}
}
private def checkNotQualifiedReferenceElement(refElement: ScReference, typeAware: Boolean)
(implicit holder: ScalaAnnotationHolder): Unit = {
refElement match {
case _: ScInterpolatedExpressionPrefix => return // do not inspect interpolated literal, it will be highlighted in other place
case _ if refElement.isSoft => return
case _ =>
}
val resolve = refElement.multiResolveScala(false)
refElement match {
case _: ScDocResolvableCodeReference =>
if (resolve.isEmpty) {
holder.createWarningAnnotation(
refElement,
ScalaBundle.message("cannot.resolve", refElement.refName),
ScalaImportTypeFix(refElement)
)
}
return
case _ =>
}
if (resolve.length != 1) {
def addUnknownSymbolProblem(): Unit = {
if (resolve.isEmpty) {
createUnknownSymbolProblem(refElement)
}
}
val parent = refElement.getParent
refElement match {
case refElement: ScReferenceExpression =>
// Let's try to hide dynamic named parameter usage
refElement.getContext match {
case assignment@ScAssignment(`refElement`, _) if resolve.isEmpty && assignment.isDynamicNamedAssignment => return
case _ => addUnknownSymbolProblem()
}
case _ =>
parent match {
case ScInfixPattern(_, `refElement`, _) if refElement.isInstanceOf[ScStableCodeReference] => // todo: this is hide A op B in patterns
case _: ScImportSelector if resolve.length > 0 =>
case _: ScDocTag =>
holder.createWeakWarningAnnotation(refElement, ScalaBundle.message("cannot.resolve", refElement.refName))
case _ => addUnknownSymbolProblem()
}
}
} else {
def showError(): Unit = {
val error = ScalaBundle.message("forward.reference.detected")
holder.createErrorAnnotation(refElement.nameId, error)
}
refElement.getContainingFile match {
case file: ScalaFile if !file.allowsForwardReferences =>
resolve(0) match {
case r if r.isForwardReference =>
ScalaPsiUtil.nameContext(r.getActualElement) match {
case v: ScValue if !v.hasModifierProperty("lazy") => showError()
case _: ScVariable => showError()
case nameContext if nameContext.isValid =>
//if it has not lazy val or var between reference and statement then it's forward reference
val context = findCommonContext(refElement, nameContext)
if (context != null) {
val neighbour = (findFirstContext(nameContext, false, elem => elem.getContext.eq(context)) match {
case s: ScalaPsiElement => s.getDeepSameElementInContext
case elem => elem
}).getPrevSibling
@scala.annotation.tailrec
def check(neighbour: PsiElement): Boolean = {
if (neighbour == null ||
neighbour.getTextRange.getStartOffset <= refElement.getTextRange.getStartOffset) return false
neighbour match {
case v: ScValue if !v.hasModifierProperty("lazy") => true
case _: ScVariable => true
case _ => check(neighbour.getPrevSibling)
}
}
if (check(neighbour)) showError()
}
}
case _ =>
}
case _ =>
}
}
checkAccessForReference(resolve, refElement)
if (resolve.length == 1) {
val resolveResult = resolve(0)
refElement match {
case e: ScReferenceExpression if e.getParent.isInstanceOf[ScPrefixExpr] &&
e.getParent.asInstanceOf[ScPrefixExpr].operation == e =>
resolveResult.implicitFunction match {
case Some(fun) =>
val pref = e.getParent.asInstanceOf[ScPrefixExpr]
val expr = pref.operand
highlightImplicitMethod(expr, resolveResult, refElement, fun)
case _ =>
}
case e: ScReferenceExpression if e.getParent.isInstanceOf[ScInfixExpr] &&
e.getParent.asInstanceOf[ScInfixExpr].operation == e =>
resolveResult.implicitFunction match {
case Some(fun) =>
val inf = e.getParent.asInstanceOf[ScInfixExpr]
val expr = inf.getBaseExpr
highlightImplicitMethod(expr, resolveResult, refElement, fun)
case _ =>
}
case _ =>
}
}
def isOnTopLevel(element: PsiElement) = element match {
case scalaPsi: ScalaPsiElement => !scalaPsi.parents.exists(_.isInstanceOf[ScTypeDefinition])
case _ => false
}
//don't highlight ambiguous definitions, if they are resolved to multiple top-level declarations
//needed for worksheet and scala notebook files (e.g. zeppelin)
def isTopLevelResolve =
resolve.length > 1 && resolve.headOption.map(_.element).filter(isOnTopLevel).exists {
firstElement =>
val fFile = firstElement.getContainingFile
resolve.tail.map(_.element).forall(nextEl => nextEl.getContainingFile == fFile && isOnTopLevel(nextEl))
}
if (typeAware && resolve.length != 1 && !(refElement.containingScalaFile.exists(_.isMultipleDeclarationsAllowed) && isTopLevelResolve)) {
val parent = refElement.getParent
def addCreateApplyOrUnapplyFix(errorWithRefName: String => String, fix: ScTypeDefinition => IntentionAction): Boolean = {
val refWithoutArgs = ScalaPsiElementFactory.createReferenceFromText(refElement.getText, parent.getContext, parent)
if (refWithoutArgs != null && refWithoutArgs.multiResolveScala(false).exists(!_.getElement.isInstanceOf[PsiPackage])) {
// We can't resolve the method call A(arg1, arg2), but we can resolve A. Highlight this differently.
val message = errorWithRefName(refElement.refName)
val typeDefFix = refWithoutArgs match {
case ResolvesTo(obj: ScObject) => fix(obj) :: Nil
case InstanceOfClass(td: ScTypeDefinition) => fix(td) :: Nil
case _ => Nil
}
holder.createErrorAnnotation(
refElement.nameId,
message,
ReportHighlightingErrorQuickFix :: typeDefFix
)
true
} else false
}
parent match {
case _: ScImportSelector if resolve.length > 0 => return
case _: ScMethodCall if resolve.length > 1 =>
val error = ScalaBundle.message("cannot.resolve.overloaded", refElement.refName)
holder.createErrorAnnotation(refElement.nameId, error)
case _: ScMethodCall if resolve.length > 1 =>
val error = ScalaBundle.message("cannot.resolve.overloaded", refElement.refName)
holder.createErrorAnnotation(refElement.nameId, error)
case mc: ScMethodCall if addCreateApplyOrUnapplyFix(
ScalaBundle.message("cannot.resolve.apply.method", _),
td => new CreateApplyQuickFix(td, mc)
) =>
return
case (p: ScPattern) && (_: ScConstructorPattern | _: ScInfixPattern) =>
val errorWithRefName: String => String = ScalaBundle.message("cannot.resolve.unapply.method", _)
if (addCreateApplyOrUnapplyFix(errorWithRefName, td => new CreateUnapplyQuickFix(td, p))) return
case scalaDocTag: ScDocTag if scalaDocTag.getName == MyScaladocParsing.THROWS_TAG => return //see SCL-9490
case _ =>
}
}
}
private def createUnknownSymbolProblem(reference: ScReference)
(implicit holder: ScalaAnnotationHolder) = {
val identifier = reference.nameId
val fixes =
UnresolvedReferenceFixProvider.fixesFor(reference) :+
ReportHighlightingErrorQuickFix :++
createFixesByUsages(reference) // TODO We can now use UnresolvedReferenceFixProvider to decoupte custom fixes from the annotator
holder.createErrorAnnotation(
identifier,
ScalaBundle.message("cannot.resolve", identifier.getText),
ProblemHighlightType.LIKE_UNKNOWN_SYMBOL,
fixes
)
}
private def checkQualifiedReferenceElement(refElement: ScReference, typeAware: Boolean)
(implicit holder: ScalaAnnotationHolder): Unit = {
val resolve = refElement.multiResolveScala(false)
checkAccessForReference(resolve, refElement)
val resolveCount = resolve.length
if (refElement.isInstanceOf[ScExpression] && resolveCount == 1) {
val resolveResult = resolve(0)
resolveResult.implicitFunction match {
case Some(fun) =>
val qualifier = refElement.qualifier.get
val expr = qualifier.asInstanceOf[ScExpression]
highlightImplicitMethod(expr, resolveResult, refElement, fun)
case _ =>
}
}
if (refElement.isInstanceOf[ScDocResolvableCodeReference] && resolveCount > 0 || refElement.isSoft) return
if (typeAware && resolveCount != 1) {
refElement.getParent match {
case _: ScImportSelector | _: ScImportExpr if resolveCount > 0 => return
case _: ScMethodCall if resolveCount > 1 =>
val error = ScalaBundle.message("cannot.resolve.overloaded", refElement.refName)
holder.createErrorAnnotation(refElement.nameId, error)
case _ => createUnknownSymbolProblem(refElement)
}
}
}
def nameWithSignature(f: PsiNamedElement) = nameOf(f) + signatureOf(f)
private def nameOf(f: PsiNamedElement) = f match {
case m: ScMethodLike if m.isConstructor => m.containingClass.name
case _ => f.name
}
private def signatureOf(f: PsiNamedElement): String = {
implicit val tpc: TypePresentationContext = TypePresentationContext(f)
f match {
case f: ScMethodLike =>
if (f.parameters.isEmpty) "" else formatParamClauses(f.parameterList)
case m: PsiMethod =>
val params = m.parameters
if (params.isEmpty) "" else formatJavaParams(params)
case syn: ScSyntheticFunction =>
if (syn.paramClauses.isEmpty) "" else syn.paramClauses.map(formatSyntheticParams).mkString
}
}
private def formatParamClauses(paramClauses: ScParameters)(implicit tpc: TypePresentationContext) = {
def formatParams(parameters: Seq[ScParameter], types: Seq[ScType]) = {
val parts = parameters.zip(types).map {
case (p, t) => t.presentableText + (if(p.isRepeatedParameter) "*" else "")
}
parenthesise(parts)
}
paramClauses.clauses.map(clause => formatParams(clause.parameters, clause.paramTypes)).mkString
}
private def formatJavaParams(parameters: Seq[PsiParameter])(implicit tpc: TypePresentationContext): String = {
val types = parameters.map(_.paramType())
val parts = parameters.zip(types).map {
case (p, t) => t.presentableText + (if(p.isVarArgs) "*" else "")
}
parenthesise(parts)
}
private def formatSyntheticParams(parameters: Seq[Parameter])(implicit tpc: TypePresentationContext): String = {
val parts = parameters.map {
p => p.paramType.presentableText + (if (p.isRepeated) "*" else "")
}
parenthesise(parts)
}
private def parenthesise(items: Seq[_]) = items.mkString("(", ", ", ")")
private def highlightImplicitMethod(expr: ScExpression, resolveResult: ScalaResolveResult, refElement: ScReference,
fun: PsiNamedElement)
(implicit holder: ScalaAnnotationHolder): Unit = {
val typeTo = resolveResult.implicitType match {
case Some(tp) => tp
case _ => Any(expr.projectContext)
}
highlightImplicitView(expr, fun, typeTo, refElement.nameId)
}
private def checkAccessForReference(resolve: Array[ScalaResolveResult], refElement: ScReference)
(implicit holder: ScalaAnnotationHolder): Unit = {
if (resolve.length != 1 || refElement.isSoft || refElement.isInstanceOf[ScDocResolvableCodeReference]) return
resolve(0) match {
case r if !r.isAccessible =>
val error = ScalaBundle.message("symbol.is.inaccessible.from.this.place", r.element.name)
holder.createErrorAnnotation(refElement.nameId, error)
//todo: add fixes
case _ =>
}
}
def createFixesByUsages(reference: ScReference): List[CreateFromUsageQuickFixBase] =
reference match {
case reference: ScReferenceExpression => createFixesByUsages(reference)
case reference: ScStableCodeReference => createFixesByUsages(reference)
case _ => Nil
}
private[this] def createFixesByUsages(reference: ScReferenceExpression): List[CreateFromUsageQuickFixBase] =
reference.getParent match {
case _: ScMethodCall =>
val isUpperCased = reference.refName.headOption.exists(_.isUpper)
new CreateMethodQuickFix(reference) ::
(if (isUpperCased)
new CreateCaseClassQuickFix(reference) :: Nil
else
Nil)
case ScInfixExpr(_, `reference`, _) =>
new CreateMethodQuickFix(reference) :: Nil
case (_: ScGenericCall) childOf (_: ScMethodCall) =>
new CreateMethodQuickFix(reference) :: Nil
case _: ScGenericCall =>
new CreateParameterlessMethodQuickFix(reference) :: Nil
case _ =>
new CreateParameterlessMethodQuickFix(reference) ::
new CreateValueQuickFix(reference) ::
new CreateVariableQuickFix(reference) ::
new CreateObjectQuickFix(reference) ::
Nil
}
private[this] def createFixesByUsages(reference: ScStableCodeReference): List[CreateTypeDefinitionQuickFix] =
reference.getParent match {
case st: ScSimpleTypeElement if st.singleton => Nil
case pattern@(_: ScConstructorPattern | _: ScInfixPattern) =>
new CreateCaseClassQuickFix(reference) ::
new CreateExtractorObjectQuickFix(reference, pattern.asInstanceOf[ScPattern]) ::
Nil
case _ =>
new CreateTraitQuickFix(reference) ::
new CreateClassQuickFix(reference) ::
new CreateCaseClassQuickFix(reference) ::
Nil
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/element/ScReferenceAnnotator.scala | Scala | apache-2.0 | 19,830 |
package com.ibm.watson.developer_cloud.personality_insights.v2.model
import java.util.Date
import com.ibm.watson.developer_cloud.service.GenericModel
/**
* Created by Martin Harvan ([email protected]) on 20/03/16.
*/
case class ContentItem(charset: String, content: String, contentType: String, created: Date, forward: Boolean,
id: String, language: String, parentId: String, reply: Boolean, sourceId: String,
update: Date, userId: String)extends GenericModel
| kane77/watson-scala-wrapper | src/main/scala/com/ibm/watson/developer_cloud/personality_insights/v2/model/ContentItem.scala | Scala | apache-2.0 | 520 |
package test.scala.co.skaphe.add.domain
import main.scala.co.skaphe.add.domain._
import org.scalacheck.Gen
import org.scalacheck.Prop._
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
object AddNumbersTest extends Properties("AddNumbersTest") {
// Generates numbers from 0 to 100 (both inclusive)
val generator = Gen.choose(0, 100)
property("addNumbersInList") = {
val emptyList = List()
val accumulator = 0
0 == AddNumbers.addNumbersInList(emptyList, accumulator)
val oneItemList = List(5)
5 == AddNumbers.addNumbersInList(oneItemList, accumulator)
val anyList = List(1,2,3,4,5,6,7,8,9)
45 == AddNumbers.addNumbersInList(anyList, accumulator)
}
property("addNumberNTimes") = forAll(generator, generator) { (number: Int, times: Int) =>
val accumulator = 0
times match {
case 0 => number == AddNumbers.addNumberNTimes(number, times, accumulator)
case _ => (number * times) == AddNumbers.addNumberNTimes(number, times, accumulator)
}
}
}
| skaphe/math-micro-services | add/src/test/scala/co/skaphe/add/domain/AddNumbersTest.scala | Scala | mit | 1,031 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
import com.intellij.lang.ASTNode
import com.intellij.psi._
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil.SafeCheckException
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScParameterizedTypeElement, ScSimpleTypeElement, ScTypeElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScExtendsBlock}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.base.types.ScSimpleTypeElementImpl
import org.jetbrains.plugins.scala.lang.psi.types.Compatibility.Expression
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{Parameter, ScMethodType, ScTypePolymorphicType, TypeParameter}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve.{ResolveUtils, ScalaResolveResult}
import scala.collection.Seq
import scala.collection.immutable.HashMap
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScConstructorImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScConstructor {
def typeElement: ScTypeElement = findNotNullChildByClass(classOf[ScTypeElement])
override def toString: String = "Constructor"
def expectedType: Option[ScType] = {
getContext match {
case parents: ScClassParents =>
if (parents.typeElements.length != 1) None
else {
parents.getContext match {
case e: ScExtendsBlock =>
e.getContext match {
case n: ScNewTemplateDefinition =>
n.expectedType()
case _ => None
}
case _ => None
}
}
case _ => None
}
}
def newTemplate = {
getContext match {
case parents: ScClassParents =>
parents.getContext match {
case e: ScExtendsBlock =>
e.getContext match {
case n: ScNewTemplateDefinition =>
Some(n)
case _ => None
}
}
case _ => None
}
}
//todo: duplicate ScSimpleTypeElementImpl
def parameterize(tp: ScType, clazz: PsiClass, subst: ScSubstitutor): ScType = {
if (clazz.getTypeParameters.length == 0) {
tp
} else {
ScParameterizedType(tp, clazz.getTypeParameters.map {
case tp: ScTypeParam => new ScTypeParameterType(tp, subst)
case ptp => new ScTypeParameterType(ptp, subst)
})
}
}
def shapeType(i: Int): TypeResult[ScType] = {
val seq = shapeMultiType(i)
if (seq.length == 1) seq(0)
else Failure("Can't resolve type", Some(this))
}
def shapeMultiType(i: Int): Seq[TypeResult[ScType]] = innerMultiType(i, isShape = true)
def multiType(i: Int): Seq[TypeResult[ScType]] = innerMultiType(i, isShape = false)
private def innerMultiType(i: Int, isShape: Boolean): Seq[TypeResult[ScType]] = {
def FAILURE = Failure("Can't resolve type", Some(this))
def workWithResolveResult(constr: PsiMethod, r: ScalaResolveResult,
subst: ScSubstitutor, s: ScSimpleTypeElement,
ref: ScStableCodeReferenceElement): TypeResult[ScType] = {
val clazz = constr.containingClass
val tp = r.getActualElement match {
case ta: ScTypeAliasDefinition => subst.subst(ta.aliasedType.getOrElse(return FAILURE))
case _ =>
parameterize(ScSimpleTypeElementImpl.calculateReferenceType(ref, shapesOnly = true).
getOrElse(return FAILURE), clazz, subst)
}
val res = constr match {
case fun: ScMethodLike =>
val methodType = ScType.nested(fun.methodType(Some(tp)), i).getOrElse(return FAILURE)
subst.subst(methodType)
case method: PsiMethod =>
if (i > 0) return Failure("Java constructors only have one parameter section", Some(this))
ResolveUtils.javaMethodType(method, subst, getResolveScope, Some(subst.subst(tp)))
}
val typeParameters: Seq[TypeParameter] = r.getActualElement match {
case tp: ScTypeParametersOwner if tp.typeParameters.length > 0 =>
tp.typeParameters.map(new TypeParameter(_))
case ptp: PsiTypeParameterListOwner if ptp.getTypeParameters.length > 0 =>
ptp.getTypeParameters.toSeq.map(new TypeParameter(_))
case _ => return Success(res, Some(this))
}
s.getParent match {
case p: ScParameterizedTypeElement =>
val zipped = p.typeArgList.typeArgs.zip(typeParameters)
val appSubst = new ScSubstitutor(new HashMap[(String, String), ScType] ++ zipped.map {
case (arg, typeParam) =>
((typeParam.name, ScalaPsiUtil.getPsiElementId(typeParam.ptp)), arg.getType(TypingContext.empty).getOrAny)
}, Map.empty, None)
Success(appSubst.subst(res), Some(this))
case _ =>
var nonValueType = ScTypePolymorphicType(res, typeParameters)
expectedType match {
case Some(expected) =>
try {
nonValueType = ScalaPsiUtil.localTypeInference(nonValueType.internalType,
Seq(new Parameter("", None, expected, false, false, false, 0)),
Seq(new Expression(ScalaPsiUtil.undefineSubstitutor(nonValueType.typeParameters).
subst(subst.subst(tp).inferValueType))),
nonValueType.typeParameters, shouldUndefineParameters = false, filterTypeParams = false)
} catch {
case s: SafeCheckException => //ignore
}
case _ =>
}
Success(nonValueType, Some(this))
}
}
def processSimple(s: ScSimpleTypeElement): Seq[TypeResult[ScType]] = {
s.reference match {
case Some(ref) =>
val buffer = new ArrayBuffer[TypeResult[ScType]]
val resolve = if (isShape) ref.shapeResolveConstr else ref.resolveAllConstructors
resolve.foreach {
case r@ScalaResolveResult(constr: PsiMethod, subst) =>
buffer += workWithResolveResult(constr, r, subst, s, ref)
case ScalaResolveResult(clazz: PsiClass, subst) if !clazz.isInstanceOf[ScTemplateDefinition] && clazz.isAnnotationType =>
val params = clazz.getMethods.flatMap {
case p: PsiAnnotationMethod =>
val paramType = subst.subst(ScType.create(p.getReturnType, getProject, getResolveScope))
Seq(Parameter(p.getName, None, paramType, paramType, p.getDefaultValue != null, isRepeated = false, isByName = false))
case _ => Seq.empty
}
buffer += Success(ScMethodType(ScDesignatorType(clazz), params, isImplicit = false)(getProject, getResolveScope), Some(this))
case _ =>
}
buffer.toSeq
case _ => Seq(Failure("Hasn't reference", Some(this)))
}
}
simpleTypeElement.toSeq.flatMap(processSimple)
}
def reference: Option[ScStableCodeReferenceElement] = {
simpleTypeElement.flatMap(_.reference)
}
def simpleTypeElement: Option[ScSimpleTypeElement] = typeElement match {
case s: ScSimpleTypeElement => Some(s)
case p: ScParameterizedTypeElement =>
p.typeElement match {
case s: ScSimpleTypeElement => Some(s)
case _ => None
}
case _ => None
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitConstructor(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitConstructor(this)
case _ => super.accept(visitor)
}
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/ScConstructorImpl.scala | Scala | apache-2.0 | 8,352 |
package org.jetbrains.plugins.scala.codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
/**
* @author Nikolay.Tropin
*/
class RedundantConversionTest extends OperationsOnCollectionInspectionTest{
override val inspectionClass: Class[_ <: OperationOnCollectionInspection] = classOf[RedundantCollectionConversionInspection]
override def hint: String = InspectionBundle.message("redundant.collection.conversion")
def test_1() {
doTest(s"List(1, 2).${START}toList$END",
"List(1, 2).toList",
"List(1, 2)")
}
def test_2() {
doTest(s"Seq(1, 2).${START}to[Seq]$END",
"Seq(1, 2).to[Seq]",
"Seq(1, 2)")
}
def test_3(): Unit = {
doTest(s"Map(1 -> true).${START}toMap[Int, Boolean]$END",
"Map(1 -> true).toMap[Int, Boolean]",
"Map(1 -> true)")
}
def test_4(): Unit = {
doTest(
s"""
|def list() = List(1, 2)
|list().${START}toList$END
""".stripMargin,
"""
|def list() = List(1, 2)
|list().toList
""".stripMargin,
"""
|def list() = List(1, 2)
|list()
""".stripMargin
)
}
def test_5(): Unit = {
doTest(
s"""Seq(1) match {
| case seq => seq.${START}toSeq$END
|}""".stripMargin,
s"""Seq(1) match {
| case seq => seq.toSeq
|}""".stripMargin,
s"""Seq(1) match {
| case seq => seq
|}""".stripMargin
)
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/collections/RedundantConversionTest.scala | Scala | apache-2.0 | 1,492 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.killrweather
import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.duration._
import akka.actor.Props
import org.joda.time.{DateTimeZone, DateTime}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import com.datastax.spark.connector.streaming._
import com.datastax.spark.connector._
import com.datastax.spark.connector.embedded.{KafkaConsumer, EmbeddedKafka}
class NodeGuardianSpec extends ActorSparkSpec {
import WeatherEvent._
import Weather._
import settings._
// === Tweak as needed ===
val year = 2008
// The first test waits for at least n-messages published to kafka from file data
val publishedToKafka = 8000
val duration = 60.seconds
// === Don't modify ===
system.eventStream.subscribe(self, classOf[NodeInitialized])
val atomic = new AtomicInteger(0)
val kafka = new EmbeddedKafka
kafka.createTopic(KafkaTopicRaw)
val ssc = new StreamingContext(sc, Seconds(SparkStreamingBatchInterval))
val consumer = new KafkaConsumer(kafka.kafkaConfig.zkConnect, KafkaTopicRaw, KafkaGroupId, 1, 10, atomic)
val guardian = system.actorOf(Props(
new NodeGuardian(ssc, kafka, settings)), "node-guardian")
"NodeGuardian" must {
"publish a NodeInitialized to the event stream on initialization" in {
expectMsgPF(10.seconds) {
case NodeInitialized =>
}
}
"return a weather station" in {
guardian ! GetWeatherStation(sample.wsid)
expectMsgPF() {
case e: WeatherStation =>
e.id should be(sample.wsid)
}
}
"get the current weather for a given weather station, based on UTC" in {
val timstamp = new DateTime(DateTimeZone.UTC).withYear(sample.year).withMonthOfYear(sample.month).withDayOfMonth(sample.day)
guardian ! GetCurrentWeather(sample.wsid, Some(timstamp))
expectMsgPF() {
case Some(e) =>
e.asInstanceOf[RawWeatherData].wsid should be(sample.wsid)
}
}
"transforms annual weather .gz files to line data and publish to a Kafka topic" in {
awaitCond(atomic.get >= publishedToKafka, duration) // assert process of publishing has started, continues to consume
}
s"streams in data from kafka, transforms it, and saves it to $CassandraTableRaw" in {
val tableData = ssc.cassandraTable(CassandraKeyspace, CassandraTableRaw)
awaitCond(tableData.count >= publishedToKafka, duration)
}
s"streams in data from kafka, transforms it, and saves it to $CassandraTableDailyPrecip" in {
val tableData = ssc.cassandraTable(CassandraKeyspace, CassandraTableDailyPrecip)
awaitCond(tableData.toLocalIterator.size > 500, duration)
}
"consecutive reads on a stream after different computations writing to different tables should still have the raw data" in {
val tableData = ssc.cassandraTable(CassandraKeyspace, CassandraTableDailyPrecip)
awaitCond(tableData.toLocalIterator.size > 500, duration)
}
"computes and return a monthly aggregation to the requester." in {
guardian ! GetPrecipitation(sample.wsid, sample.year)
expectMsgPF(timeout.duration) {
case a: AnnualPrecipitation =>
a.wsid should be (sample.wsid)
a.year should be (sample.year)
}
}
"Return the top k temps for any station in a given year" in {
guardian ! GetTopKPrecipitation(sample.wsid, sample.year, k = 10)
expectMsgPF(timeout.duration) {
case a: TopKPrecipitation =>
a.wsid should be (sample.wsid)
a.year should be (sample.year)
a.top.size should be (10)
}
}
"aggregate hourly wsid temperatures for a given day and year" in {
guardian ! GetDailyTemperature(sample)
expectMsgPF(timeout.duration) {
case aggregate: DailyTemperature =>
validate(Day(aggregate.wsid, aggregate.year, aggregate.month, aggregate.day))
}
}
s"asynchronously store DailyTemperature data in $CassandraTableDailyTemp" in {
val tableData = sc.cassandraTable[DailyTemperature](CassandraKeyspace, CassandraTableDailyTemp)
.where("wsid = ? AND year = ? AND month = ? AND day = ?",
sample.wsid, sample.year, sample.month, sample.day)
awaitCond(tableData.toLocalIterator.toSeq.headOption.nonEmpty, 10.seconds)
val aggregate = tableData.toLocalIterator.toSeq.head
validate(Day(aggregate.wsid, aggregate.year, aggregate.month, aggregate.day))
}
"compute daily temperature rollups per weather station to monthly statistics." in {
guardian ! GetMonthlyHiLowTemperature(sample.wsid, sample.year, sample.month)
expectMsgPF(timeout.duration) {
case aggregate: MonthlyTemperature =>
validate(Day(aggregate.wsid, aggregate.year, aggregate.month, sample.day))
}
}
}
override def afterAll() {
super.afterAll()
ssc.stop(true, false)
consumer.shutdown()
kafka.shutdown()
Thread.sleep(2000) // hrm, no clean shutdown found yet that doesn't throw
}
def validate(aggregate: Day): Unit = {
aggregate.wsid should be(sample.wsid)
aggregate.year should be(sample.year)
aggregate.month should be(sample.month)
aggregate.day should be(sample.day)
}
} | chbatey/killrweather | killrweather-app/src/it/scala/com/datastax/killrweather/NodeGuardianSpec.scala | Scala | apache-2.0 | 6,037 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.javadsl.client
import javax.inject.Inject
import javax.inject.Provider
import javax.inject.Singleton
import akka.actor.ActorSystem
import com.lightbend.lagom.internal.client.CircuitBreakerConfig
import com.lightbend.lagom.internal.client.CircuitBreakerMetricsProviderImpl
import com.lightbend.lagom.internal.client.CircuitBreakerMetricsProviderProvider
import com.lightbend.lagom.internal.spi.CircuitBreakerMetricsProvider
import com.lightbend.lagom.javadsl.client.CircuitBreakersPanel
import play.api.inject.Binding
import play.api.inject.Module
import play.api.Configuration
import play.api.Environment
class CircuitBreakerModule extends Module {
override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = {
Seq(
bind[CircuitBreakersPanel].to[CircuitBreakersPanelImpl],
bind[CircuitBreakerMetricsProvider].toProvider[CircuitBreakerMetricsProviderProvider],
bind[CircuitBreakerConfig].toSelf,
bind[CircuitBreakerMetricsProviderImpl].toSelf
)
}
}
| ignasi35/lagom | service/javadsl/client/src/main/scala/com/lightbend/lagom/internal/javadsl/client/CircuitBreakerModule.scala | Scala | apache-2.0 | 1,138 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.synsys.org.apache.spark.streaming.camel
import java.io.Serializable;
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{ JavaReceiverInputDStream, JavaStreamingContext, JavaDStream }
import scala.reflect.ClassTag
import org.apache.spark.streaming.dstream.{ ReceiverInputDStream, DStream }
object CamelUtils {
/**
* Create an input stream that receives messages from an Apache Camel component.
* @param ssc StreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param messagePart The part(s) of the message to store. Defaults to MessagePart.BODY
* @param storageLevel RDD storage level. Defaults to StorageLevel.MEMORY_AND_DISK_SER_2
* @tparam V The type of the message value
*/
def createStream[V: ClassTag](
ssc: StreamingContext,
componentUri: String,
messagePart: MessagePart = MessagePart.BODY,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2): ReceiverInputDStream[V] = {
new CamelInputDStream[V](ssc, componentUri, messagePart, storageLevel)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* Storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2.
* @param valueClass The type of the message value
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param messagePart The part(s) of the message to receive
*/
def createStream[V](
valueClass: Class[V],
jssc: JavaStreamingContext,
componentUri: String,
messagePart: MessagePart): JavaReceiverInputDStream[V] = {
implicit val valueClassTag: ClassTag[V] = ClassTag(valueClass)
createStream[V](jssc.ssc, componentUri, messagePart)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* The stored Message part is MessagePart.BODY
* The storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2.
* @param valueClass The type of the message value
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
*/
def createStream[V](
valueClass: Class[V],
jssc: JavaStreamingContext,
componentUri: String): JavaReceiverInputDStream[V] = {
implicit val valueClassTag: ClassTag[V] = ClassTag(valueClass)
createStream[V](jssc.ssc, componentUri)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* The stored Message part is MessagePart.BODY
* @param valueClass The type of the message value
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param storageLevel RDD storage level.
*/
def createStream[V](
valueClass: Class[V],
jssc: JavaStreamingContext,
componentUri: String,
storageLevel: StorageLevel): JavaReceiverInputDStream[V] = {
implicit val valueClassTag: ClassTag[V] = ClassTag(valueClass)
createStream[V](jssc.ssc, componentUri, MessagePart.BODY, storageLevel)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* @param valueClass The type of the message value
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param messagePart The part(s) of the message to receive
* @param storageLevel RDD storage level
*/
def createStream[V](
valueClass: Class[V],
jssc: JavaStreamingContext,
componentUri: String,
messagePart: MessagePart,
storageLevel: StorageLevel): JavaReceiverInputDStream[V] = {
implicit val valueClassTag: ClassTag[V] = ClassTag(valueClass)
createStream[V](jssc.ssc, componentUri, messagePart, storageLevel)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* Storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2.
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param messagePart The part(s) of the message to receive
*
* @deprecated Use the equivalent generic method above
*/
def createStream(
jssc: JavaStreamingContext,
componentUri: String,
messagePart: MessagePart): JavaReceiverInputDStream[Serializable] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[Serializable]]
createStream[Serializable](jssc.ssc, componentUri, messagePart)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* The stored Message part is MessagePart.BODY
* The storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2.
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
*
* @deprecated Use the equivalent generic method above
*/
def createStream(
jssc: JavaStreamingContext,
componentUri: String): JavaReceiverInputDStream[Serializable] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[Serializable]]
createStream[Serializable](jssc.ssc, componentUri)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* The stored Message part is MessagePart.BODY
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param storageLevel RDD storage level
*
* @deprecated Use the equivalent generic method above
*/
def createStream(
jssc: JavaStreamingContext,
componentUri: String,
storageLevel: StorageLevel): JavaReceiverInputDStream[Serializable] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[Serializable]]
createStream[Serializable](jssc.ssc, componentUri, MessagePart.BODY, storageLevel)
}
/**
* Create an input stream that receives messages from an Apache Camel component.
* @param jssc JavaStreamingContext object
* @param componentUri Uri of the Apache Camel component
* @param messagePart The part(s) of the message to receive
* @param storageLevel RDD storage level
*
* @deprecated Use the equivalent generic method above
*/
def createStream(
jssc: JavaStreamingContext,
componentUri: String,
messagePart: MessagePart,
storageLevel: StorageLevel): JavaReceiverInputDStream[Serializable] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[Serializable]]
createStream[Serializable](jssc.ssc, componentUri, messagePart, storageLevel)
}
}
| synsys/spark | spark-streaming-camel_2.10/src/main/scala/com/synsys/org/apache/spark/streaming/camel/CamelUtils.scala | Scala | apache-2.0 | 7,528 |
package reductions
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import reductions.ParallelCountChange._
@RunWith(classOf[JUnitRunner])
class ParallelCountChangeSuite extends FunSuite {
test("countChange should return 0 for money < 0") {
def check(money: Int, coins: List[Int]) =
assert(countChange(money, coins) == 0,
s"countChang($money, _) should be 0")
check(-1, List())
check(-1, List(1, 2, 3))
check(-Int.MinValue, List())
check(-Int.MinValue, List(1, 2, 3))
}
test("countChange should return 1 when money == 0") {
def check(coins: List[Int]) =
assert(countChange(0, coins) == 1,
s"countChang(0, _) should be 1")
check(List())
check(List(1, 2, 3))
check(List.range(1, 100))
}
test("countChange should return 0 for money > 0 and coins = List()") {
def check(money: Int) =
assert(countChange(money, List()) == 0,
s"countChang($money, List()) should be 0")
check(1)
check(Int.MaxValue)
}
test("countChange should work when there is only one coin") {
def check(money: Int, coins: List[Int], expected: Int) =
assert(countChange(money, coins) == expected,
s"countChange($money, $coins) should be $expected")
check(1, List(1), 1)
check(2, List(1), 1)
check(1, List(2), 0)
check(Int.MaxValue, List(Int.MaxValue), 1)
check(Int.MaxValue - 1, List(Int.MaxValue), 0)
}
test("countChange should work for multi-coins") {
def check(money: Int, coins: List[Int], expected: Int) =
assert(countChange(money, coins) == expected,
s"countChange($money, $coins) should be $expected")
check(50, List(1, 2, 5, 10), 341)
check(250, List(1, 2, 5, 10, 20, 50), 177863)
}
} | yurii-khomenko/fpScalaSpec | c3w2reductions/src/test/scala/reductions/ParallelCountChangeSuite.scala | Scala | gpl-3.0 | 1,795 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, TensorModule}
import com.intel.analytics.bigdl.tensor.{DenseTensorApply, Tensor, TensorFunc4, TensorFunc6}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* Apply the SoftPlus function to an n-dimensional input tensor.
*
* SoftPlus function: f_i(x) = 1/beta * log(1 + exp(beta * x_i))
*
* @param beta Controls sharpness of transfer function
*/
@SerialVersionUID(- 6938956677043843473L)
class SoftPlus[T: ClassTag, D: ClassTag](
val beta: Double = 1.0
)( implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends AbstractModule[Tensor[D], Tensor[D], T] {
// Avoid floating point issues with exp(x), x>20
private val threshold = ev2.fromType[Double](20.0)
private val betaT = ev2.fromType[Double](beta)
output = Tensor[D]()
gradInput = Tensor[D]()
override def updateOutput(input: Tensor[D]): Tensor[D] = {
output.resizeAs(input)
// f(x) = 1/beta * log(1 + exp(beta * x))
val func = new TensorFunc4[D] {
override def apply (data1: Array[D], offset1: Int, data2: Array[D], offset2: Int): Unit = {
data1(offset1) = if (ev2.isGreater(ev2.times(data2(offset2), betaT), threshold)) {
data2(offset2)
} else {
ev2.divide(ev2.log1p(ev2.exp(ev2.times(data2(offset2), betaT))), betaT)
}
}
}
DenseTensorApply.apply2[D](output, input, func)
output
}
override def updateGradInput(input: Tensor[D], gradOutput: Tensor[D]): Tensor[D] = {
gradInput.resizeAs(input)
// d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1)
// SINCE
// y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1)
// THEREFORE:
// d/dx(f(x)) = (exp(k*y) - 1) / exp(k*y)
val func = new TensorFunc6[D] {
override def apply(data1: Array[D], offset1: Int, data2: Array[D], offset2: Int,
data3: Array[D], offset3: Int): Unit = {
val z = ev2.exp(ev2.times(data3(offset3), betaT))
data1(offset1) = if (ev2.isGreater(ev2.times(data3(offset3), betaT), threshold)) {
data2(offset2)
} else {
ev2.times(data2(offset2), ev2.divide(ev2.minus(z, ev2.fromType[Int](1)), z))
}
}
}
DenseTensorApply.apply3[D](gradInput, gradOutput, output, func)
gradInput
}
override def getClassTagNumerics() : (Array[ClassTag[_]], Array[TensorNumeric[_]]) = {
(Array[ClassTag[_]](scala.reflect.classTag[T], scala.reflect.classTag[D]),
Array[TensorNumeric[_]](ev, ev2))
}
}
object SoftPlus {
def apply[@specialized(Float, Double) T: ClassTag, D: ClassTag](
beta: Double = 1.0)
(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D]) : SoftPlus[T, D] = {
new SoftPlus[T, D](beta)
}
}
| qiuxin2012/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/SoftPlus.scala | Scala | apache-2.0 | 3,457 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc
import org.apache.spark.{SparkException, SparkFunSuite}
class RpcAddressSuite extends SparkFunSuite {
test("hostPort") {
val address = RpcAddress("1.2.3.4", 1234)
assert(address.host == "1.2.3.4")
assert(address.port == 1234)
assert(address.hostPort == "1.2.3.4:1234")
}
test("fromSparkURL") {
val address = RpcAddress.fromSparkURL("spark://1.2.3.4:1234")
assert(address.host == "1.2.3.4")
assert(address.port == 1234)
}
test("fromSparkURL: a typo url") {
val e = intercept[SparkException] {
RpcAddress.fromSparkURL("spark://1.2. 3.4:1234")
}
assert("Invalid master URL: spark://1.2. 3.4:1234" === e.getMessage)
}
test("fromSparkURL: invalid scheme") {
val e = intercept[SparkException] {
RpcAddress.fromSparkURL("invalid://1.2.3.4:1234")
}
assert("Invalid master URL: invalid://1.2.3.4:1234" === e.getMessage)
}
test("toSparkURL") {
val address = RpcAddress("1.2.3.4", 1234)
assert(address.toSparkURL == "spark://1.2.3.4:1234")
}
}
| dhruve/spark | core/src/test/scala/org/apache/spark/rpc/RpcAddressSuite.scala | Scala | apache-2.0 | 1,863 |
// Copyright (c) 2013 Aleksander Bielawski. All rights reserved.
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file.
package com.github.pabzdzdzwiagief.initialization
import scala.tools.nsc.Global
import com.github.pabzdzdzwiagief.initialization.{Trace => JavaTrace}
private[this] trait Annotations {
val global: Global
/** Represents something that happens during initialization procedure. */
sealed abstract class Trace {
/** Method from which `member` is referenced. */
val from: global.MethodSymbol
/** Object that identifies relevant class member. */
val member: global.Symbol
/** Point in relevant source file. */
val point: Int
/** For comparing with other annotations attached to the same symbol.
* Instruction happens before those for which this value is greater.
*/
val ordinal: Int
}
abstract sealed class Access extends Trace {
override val member: global.TermSymbol
}
abstract sealed class Invocation extends Trace {
override val member: global.MethodSymbol
}
case class Get(from: global.MethodSymbol, member: global.TermSymbol, point: Int, ordinal: Int)
extends Access
case class Set(from: global.MethodSymbol, member: global.TermSymbol, point: Int, ordinal: Int)
extends Access
case class Virtual(from: global.MethodSymbol, member: global.MethodSymbol, point: Int, ordinal: Int)
extends Invocation
case class Static(from: global.MethodSymbol, member: global.MethodSymbol, point: Int, ordinal: Int)
extends Invocation
object Trace {
import global.{LiteralAnnotArg, Constant, AnnotationInfo}
import global.rootMirror.getRequiredClass
import global.nme.{CONSTRUCTOR, LOCAL_SUFFIX_STRING, isLocalName}
import global.newTermName
/** Converts Scalac's internal annotation representation to a Trace
* object if the annotation represents a Trace.
*/
def fromAnnotation(anyAnnotation: AnnotationInfo,
from: global.MethodSymbol,
inClass: global.ClassSymbol): Option[Trace] = for {
info ← Some(anyAnnotation)
if info.atp <:< traceType
map = info.javaArgs collect {
case (n, a: LiteralAnnotArg) => (n.decoded, a.const)
}
Constant(owner: String) = map("owner")
Constant(memberName: String) = map("memberName")
Constant(fromMemberName: String) = map("fromMemberName")
Constant(typeString: String) = map("typeString")
Constant(fromTypeString: String) = map("fromTypeString")
Constant(traceType: String) = map("traceType")
Constant(point: Int) = map("point")
Constant(ordinal: Int) = map("ordinal")
fromType = getRequiredClass(owner).tpe
if fromMemberName == from.nameString
if fromTypeString == from.info.safeToString
toNameRaw = newTermName(memberName)
toName = if (toNameRaw == CONSTRUCTOR) toNameRaw else toNameRaw.encode
accessed = toName.append(LOCAL_SUFFIX_STRING).ensuring(isLocalName _)
(name, mkTrace) = traceType match {
case "Static" => (toName, { s: global.Symbol =>
Static(from, s.asMethod, point, ordinal)
})
case "Virtual" => (toName, { s: global.Symbol =>
Virtual(from, s.overridingSymbol(inClass)
.orElse(s)
.asMethod, point, ordinal)
})
case "Get" => (accessed, { s: global.Symbol =>
Get(from, s.asTerm, point, ordinal)
})
case "Set" => (accessed, { s: global.Symbol =>
Set(from, s.asTerm, point, ordinal)
})
}
symbol ← fromType.memberBasedOnName(name, 0).alternatives.find {
fromType.memberType(_).safeToString == typeString
}
} yield mkTrace(symbol)
/** Converts Trace object to Scalac's internal annotation representation. */
def toAnnotation(trace: Trace): AnnotationInfo = {
val name = classOf[JavaTrace].getCanonicalName
def a(x: Any) = LiteralAnnotArg(Constant(x))
def n(s: String) = newTermName(s)
AnnotationInfo(getRequiredClass(name).tpe, Nil, List(
n("owner") → a(trace.member.owner.javaClassName),
n("memberName") → a(trace.member.nameString),
n("fromMemberName") → a(trace.from.nameString),
n("fromTypeString") → a(trace.from.info.safeToString),
n("typeString") → a(trace.member.info.safeToString),
n("traceType") → a(trace.getClass.getSimpleName),
n("point") → a(trace.point),
n("ordinal") → a(trace.ordinal)
))
}
private[this] val traceType =
getRequiredClass(classOf[JavaTrace].getCanonicalName).tpe
}
}
| pabzdzdzwiagief/initialization | src/main/scala/com/github/pabzdzdzwiagief/initialization/Annotations.scala | Scala | bsd-2-clause | 4,729 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.