code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.aergonaut.lifeaquatic.tileentity
import java.util
import cofh.api.fluid.ITankContainerBucketable
import cofh.api.tileentity.ITileInfo
import cofh.lib.util.helpers.{FluidHelper, ItemHelper}
import com.aergonaut.lifeaquatic.recipe.VatRecipe
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.init.Items
import net.minecraft.inventory.ISidedInventory
import net.minecraft.item.{Item, ItemStack}
import net.minecraft.nbt.{NBTTagCompound, NBTTagList}
import net.minecraft.util.{ChatComponentText, IChatComponent}
import net.minecraftforge.common.util.ForgeDirection
import net.minecraftforge.fluids._
class TileEntityVat extends TileEntityBase with ISidedInventory with ITankContainerBucketable with ITileInfo {
var formed: Boolean = false
// x, y, z
var offset: Array[Int] = Array(0, 0, 0)
var input: Option[ItemStack] = None
var catalyst: Option[ItemStack] = None
val waterTank: FluidTank = new FluidTank(12000)
val outputTank: FluidTank = new FluidTank(12000)
var maxProcessTime: Int = 1
var processTime: Int = 0
var currentRecipe: Option[VatRecipe] = None
var activeCatalystAmount = 0
def master: Option[TileEntityVat] = {
if (!formed) return None
if (offset(0) == 0 && offset(1) == 0 && offset(2) == 0) {
return Some(this)
}
val xx = xCoord - offset(0)
val yy = yCoord - offset(1)
val zz = zCoord - offset(2)
val tile = worldObj.getTileEntity(xx, yy, zz)
if (tile.isInstanceOf[TileEntityVat]) Some(tile.asInstanceOf[TileEntityVat]) else None
}
override protected def readFromNBTCustom(nBTTagCompound: NBTTagCompound): Unit = {
formed = nBTTagCompound.getBoolean("formed")
offset = nBTTagCompound.getIntArray("offset")
waterTank.readFromNBT(nBTTagCompound.getCompoundTag("waterTank"))
outputTank.readFromNBT(nBTTagCompound.getCompoundTag("outputTank"))
val currentRecipeTag = nBTTagCompound.getCompoundTag("currentRecipe")
if (currentRecipeTag.hasKey("Empty")) {
currentRecipe = None
} else {
currentRecipe = VatRecipe.loadRecipeFromNBT(currentRecipeTag)
}
processTime = nBTTagCompound.getInteger("processTime")
maxProcessTime = nBTTagCompound.getInteger("maxProcessTime")
activeCatalystAmount = nBTTagCompound.getInteger("activeCatalystAmount")
}
override def readFromNBT(nBTTagCompound: NBTTagCompound): Unit = {
super.readFromNBT(nBTTagCompound)
val invTagList = nBTTagCompound.getTagList("inventory", 10)
for (i <- 0 until invTagList.tagCount()) {
val tag = invTagList.getCompoundTagAt(i)
val slot = tag.getByte("Slot").toInt
slot match {
case TileEntityVat.InputSlot => input = Some(ItemStack.loadItemStackFromNBT(tag))
case TileEntityVat.CatalystSlot => catalyst = Some(ItemStack.loadItemStackFromNBT(tag))
case _ => {}
}
}
}
override protected def writeToNBTCustom(nBTTagCompound: NBTTagCompound): Unit = {
// multiblock formation and master positioning
nBTTagCompound.setBoolean("formed", formed)
nBTTagCompound.setIntArray("offset", offset)
// tanks
val waterTankTAg = waterTank.writeToNBT(new NBTTagCompound())
nBTTagCompound.setTag("waterTank", waterTankTAg)
val outputTankTag = outputTank.writeToNBT(new NBTTagCompound())
nBTTagCompound.setTag("outputTank", outputTankTag)
// current recipe
val currentRecipeTag = new NBTTagCompound()
if (currentRecipe.isEmpty) {
currentRecipeTag.setString("None", "")
} else {
currentRecipe.get.writeToNBT(currentRecipeTag)
}
nBTTagCompound.setTag("currentRecipe", currentRecipeTag)
// sync progress
nBTTagCompound.setInteger("processTime", processTime)
nBTTagCompound.setInteger("maxProcessTime", maxProcessTime)
nBTTagCompound.setInteger("activeCatalystAmount", activeCatalystAmount)
}
override def writeToNBT(nBTTagCompound: NBTTagCompound): Unit = {
super.writeToNBT(nBTTagCompound)
val invTagList = new NBTTagList()
if (input.nonEmpty) {
val inputTag = new NBTTagCompound()
inputTag.setByte("Slot", TileEntityVat.InputSlot.toByte)
input.get.writeToNBT(inputTag)
invTagList.appendTag(inputTag)
}
if (catalyst.nonEmpty) {
val catalystTag = new NBTTagCompound()
catalystTag.setByte("Slot", TileEntityVat.CatalystSlot.toByte)
catalyst.get.writeToNBT(catalystTag)
invTagList.appendTag(catalystTag)
}
nBTTagCompound.setTag("inventory", invTagList)
}
override def updateEntity(): Unit = {
if (!worldObj.isRemote && master.nonEmpty && master.get == this) {
var update = false
if (processTime > 0) {
if (input.isEmpty) {
processTime = 0
currentRecipe = None
} else {
processTime -= 1
}
update = true
} else {
if (currentRecipe.nonEmpty) {
val recipe = currentRecipe.get
// consume the input
decrStackSize(TileEntityVat.InputSlot, 1)
// consume the fuels
waterTank.drain(recipe.waterCost, true)
activeCatalystAmount -= recipe.catalystCost
// produce the output
outputTank.fill(recipe.output.copy(), true)
// clear the recipe
currentRecipe = None
update = true
} else {
// search for a recipe with the current conditions
currentRecipe = VatRecipe.findRecipeForInput(input, waterTank.getFluidAmount, activeCatalystAmount)
if (currentRecipe.nonEmpty) {
processTime = currentRecipe.get.processTime
maxProcessTime = processTime
update = true
} else {
// maybe we didn't have enough catalyst
// pretend to consume one and try the search again
if (catalyst.nonEmpty) {
val catalystValue = TileEntityVat.CatalystMapping(catalyst.get.getItem)
currentRecipe = VatRecipe.findRecipeForInput(input, waterTank.getFluidAmount, activeCatalystAmount + catalystValue)
if (currentRecipe.nonEmpty) {
// adding more catalyst did it, so actually consume one
decrStackSize(TileEntityVat.CatalystSlot, 1)
activeCatalystAmount += catalystValue
processTime = currentRecipe.get.processTime
maxProcessTime = processTime
update = true
}
}
}
}
}
if (update) {
markDirty()
worldObj.markBlockForUpdate(xCoord, yCoord, zCoord)
}
}
}
def progress: Int = ((maxProcessTime - processTime) / maxProcessTime.toFloat).round
// ISidedInventory
override def canExtractItem(p_102008_1_ : Int, p_102008_2_ : ItemStack, p_102008_3_ : Int): Boolean = false
override def canInsertItem(slot: Int, stack: ItemStack, side: Int): Boolean = {
master.exists(_._canInsertItem(slot, stack, side))
}
def _canInsertItem(slot: Int, stack: ItemStack, side: Int): Boolean = {
isItemValidForSlot(slot, stack)
}
override def getAccessibleSlotsFromSide(p_94128_1_ : Int): Array[Int] = Array(TileEntityVat.InputSlot, TileEntityVat.CatalystSlot)
override def decrStackSize(p_70298_1_ : Int, p_70298_2_ : Int): ItemStack = {
master.flatMap(_._decrStackSize(p_70298_1_, p_70298_2_)).orNull
}
def _decrStackSize(slot: Int, amount: Int): Option[ItemStack] = {
val stack = Option(getStackInSlot(slot))
stack.map(s => {
if (s.stackSize <= amount) {
setInventorySlotContents(slot, null)
s
} else {
val newStack = s.splitStack(amount)
if (newStack.stackSize == 0) setInventorySlotContents(slot, null)
newStack
}
})
}
override def closeInventory(): Unit = {}
override def getSizeInventory: Int = if (formed) 2 else 0
override def getInventoryStackLimit: Int = 64
override def isItemValidForSlot(p_94041_1_ : Int, p_94041_2_ : ItemStack): Boolean = p_94041_1_ match {
case TileEntityVat.InputSlot => ItemHelper.isOre(p_94041_2_)
case TileEntityVat.CatalystSlot => TileEntityVat.Catalysts(p_94041_2_.getItem)
}
override def getStackInSlotOnClosing(p_70304_1_ : Int): ItemStack = {
master.flatMap(_._getStackInSlotOnClosing(p_70304_1_)).orNull
}
def _getStackInSlotOnClosing(slot: Int): Option[ItemStack] = {
val stack = Option(getStackInSlot(slot))
if (stack.nonEmpty) setInventorySlotContents(slot, null)
stack
}
override def openInventory(): Unit = {}
override def setInventorySlotContents(p_70299_1_ : Int, p_70299_2_ : ItemStack): Unit = {
master.foreach(_._setInventorySlotContents(p_70299_1_, Option(p_70299_2_)))
}
def _setInventorySlotContents(slot: Int, stack: Option[ItemStack]): Unit = slot match {
case TileEntityVat.InputSlot => input = _processStackAndInsert(stack)
case TileEntityVat.CatalystSlot => catalyst = _processStackAndInsert(stack)
}
def _processStackAndInsert(stack: Option[ItemStack]): Option[ItemStack] = stack.map(s => {
if (s.stackSize > getInventoryStackLimit) s.stackSize = getInventoryStackLimit
s
})
override def isUseableByPlayer(p_70300_1_ : EntityPlayer): Boolean = true
override def getStackInSlot(p_70301_1_ : Int): ItemStack = master.flatMap(_._getStackInSlot(p_70301_1_)).orNull
private def _getStackInSlot(slot: Int): Option[ItemStack] = slot match {
case TileEntityVat.InputSlot => input
case TileEntityVat.CatalystSlot => catalyst
}
override def hasCustomInventoryName: Boolean = false
override def getInventoryName: String = "LifeAquaticVat"
// ITankContainerBucketable
override def allowBucketFill(stack: ItemStack): Boolean = {
if (!FluidContainerRegistry.isBucket(stack)) return false
val fluid = Option(FluidHelper.getFluidForFilledItem(stack))
fluid.exists(fl => FluidHelper.isFluidEqual(fl, FluidHelper.WATER))
}
override def allowBucketDrain(stack: ItemStack): Boolean = false
override def drain(from: ForgeDirection, resource: FluidStack, doDrain: Boolean): FluidStack = drain(from, resource.amount, doDrain)
override def drain(from: ForgeDirection, maxDrain: Int, doDrain: Boolean): FluidStack = master.flatMap(_._drain(from, maxDrain, doDrain)).orNull
def _drain(from: ForgeDirection, maxDrain: Int, doDrain: Boolean): Option[FluidStack] = Option(outputTank.drain(maxDrain, doDrain))
override def canFill(from: ForgeDirection, fluid: Fluid): Boolean = FluidHelper.isFluidEqualOrNull(fluid, FluidHelper.WATER)
override def canDrain(from: ForgeDirection, fluid: Fluid): Boolean = true
override def fill(from: ForgeDirection, resource: FluidStack, doFill: Boolean): Int = master.map(_._fill(from, resource, doFill)).getOrElse(0)
def _fill(from: ForgeDirection, resource: FluidStack, doFill: Boolean): Int = waterTank.fill(resource, doFill)
override def getTankInfo(from: ForgeDirection): Array[FluidTankInfo] = master.map(_._getTankInfo(from)).getOrElse(Array())
def _getTankInfo(from: ForgeDirection): Array[FluidTankInfo] = Array(waterTank.getInfo, outputTank.getInfo)
override def getTileInfo(info: util.List[IChatComponent], side: ForgeDirection, player: EntityPlayer, debug: Boolean): Unit = {
if (!formed) return
info.add(new ChatComponentText("Available recipes:"))
}
}
object TileEntityVat {
final val InputSlot = 0
final val CatalystSlot = 1
final val Catalysts = Set(Items.gunpowder)
final val CatalystMapping: Map[Item, Int] = Map((Items.gunpowder -> 2000))
}
| aergonaut/LifeAquatic | src/main/scala/com/aergonaut/lifeaquatic/tileentity/TileEntityVat.scala | Scala | mit | 11,549 |
class Outer {
def assertNoFields(c: Class[_]): Unit = {
assert(c.getDeclaredFields.isEmpty)
}
def assertHasOuter(c: Class[_]): Unit = {
assert(c.getDeclaredFields.exists(_.getName.contains("outer")))
}
class Member
final class FinalMember
def test: Unit = {
assertHasOuter(classOf[Member])
assertNoFields(classOf[FinalMember])
final class C
assertNoFields(classOf[C])
class D
assertNoFields(classOf[D])
(() => {class E; assertNoFields(classOf[E])}).apply()
// The outer reference elision currently runs on a class-by-class basis. If it cannot rule out that a class has
// subclasses, it will not remove the outer reference. A smarter analysis here could detect if no members of
// a sealed (or effectively sealed) hierarchy use the outer reference, the optimization could be performed.
class Parent
class Child extends Parent
assertHasOuter(classOf[Parent])
// Note: outer references (if they haven't been elided) are used in pattern matching as follows.
// This isn't relevant to term-owned classes, as you can't refer to them with a prefix that includes
// the outer class.
val outer1 = new Outer
val outer2 = new Outer
(new outer1.Member: Any) match {
case _: outer2.Member => sys.error("wrong match!")
case _: outer1.Member => // okay
case x => throw new MatchError(x)
}
// ... continuing on that theme, note that `Member` isn't considered as a local class, it is owned by a the class
// `LocalOuter`, which itself happens to be term-owned. So we expect that it has an outer reference, and that this
// is respected in type tests.
class LocalOuter {
class Member
final class FinalMember
}
assertNoFields(classOf[LocalOuter])
assertHasOuter(classOf[LocalOuter#Member])
val localOuter1 = new LocalOuter
val localOuter2 = new LocalOuter
(new localOuter1.Member: Any) match {
case _: localOuter2.Member => sys.error("wrong match!")
case _: localOuter1.Member => // okay
case x => throw new MatchError(x)
}
// Final member classes still lose the outer reference.
assertNoFields(classOf[LocalOuter#FinalMember])
}
}
object Test {
def main(args: Array[String]): Unit = {
new Outer().test
}
}
| scala/scala | test/files/run/t9408.scala | Scala | apache-2.0 | 2,341 |
package cc.factorie.app.nlp.ner
/**
* @author Kate Silverstein
* created on 3/23/15
*/
import org.scalatest._
import cc.factorie.app.nlp.load._
class TestNerTaggers extends FlatSpec {
val conllTestFilename = this.getClass.getResource("/conll-ner-input").getPath
val ontoTestFilename = this.getClass.getResource("/parser-test-input").getPath
"LoadConll2003" should "load 2 documents" in {
val testDocs = LoadConll2003.fromFilename(conllTestFilename)
println(testDocs.length)
testDocs.foreach(d => println(d.sections.flatMap(_.tokens).mkString(",")))
assert(testDocs.length == 2, "failed to load documents")
assert(testDocs.head.tokenCount > 0, "failed to load document with tokens")
assert(testDocs.head.sections.flatMap(_.tokens).forall(t => t.attr.contains(classOf[LabeledIobConllNerTag])), "token with no LabeledIobConllNerTag")
val bilouTestDocs = LoadConll2003(BILOU=true).fromFilename(conllTestFilename)
assert(bilouTestDocs.length == 2, "failed to load documents")
assert(bilouTestDocs.head.tokenCount > 0, "failed to load document with tokens")
assert(bilouTestDocs.head.sections.flatMap(_.tokens).forall(t => t.attr.contains(classOf[LabeledBilouConllNerTag])), "token with no LabeledBilouConllNerTag")
}
"LoadOntonotes5" should "load 1 document" in {
val testDocs = LoadOntonotes5.fromFilename(ontoTestFilename)
assert(testDocs.length == 1, "failed to load documents")
assert(testDocs.head.tokenCount > 0, "failed to load document with tokens")
assert(testDocs.head.sections.flatMap(_.tokens).forall(t => t.attr.contains(classOf[LabeledBilouOntonotesNerTag])), "token with no LabeledBilouOntonotesNerTag")
}
// TODO add an actual test for training/testing ChainNer, but without loading all of the lexicons (since this takes awhile) -ks
}
| hlin117/factorie | src/test/scala/cc/factorie/app/nlp/ner/TestNerTaggers.scala | Scala | apache-2.0 | 1,831 |
package extracells.common.integration
import net.minecraftforge.common.config.Configuration
class Integration {
def loadConfig(config: Configuration): Unit = {
for (mod <- Mods.values)
mod.loadConfig(config)
}
def preInit(): Unit = {
if (Mods.IGW.correctSide() && Mods.IGW.shouldLoad){}
//TODO: IGW
}
def init(): Unit = {
//TODO: Integration
}
def postInit(): Unit = {
//TODO: Integration
}
}
| ruifung/ExtraCells2 | src/main/scala/extracells/common/integration/Integration.scala | Scala | mit | 442 |
package one.lockstep.vault
import one.lockstep.util.crypto._
import one.lockstep.util._
import com.softwaremill.quicklens._
case class VaultEdit(unlockedContents: UnlockedVault.Contents, pendingPasscode: Option[Bytes] = None) {
def put(entryId: String, entrySpec: sdk.Spec): VaultEdit = {
import Ciphersuite128._ //todo consider user-specified ciphers
val (entry, secret) = entrySpec match {
case explicit: ExplicitSpec =>
(VaultEntry(entryId, explicit.kind, explicit.public, explicit.attrs), explicit.secret)
case KeyGenSpec(entryType @ VaultEntry.Type.cipher, attrs) =>
val key = cipher.keygen()
val entry = VaultEntry(entryId, entryType, None, attrs)
(entry, key.raw)
case KeyGenSpec(entryType @ VaultEntry.Type.signature, attrs) =>
???
// val KeyPair(publicKey, privateKey) = Crypto.sig.keygen()
// val entry = VaultEntry(id, entryType, Some(publicKey.raw), attrs)
// (entry, privateKey.raw)
case KeyGenSpec(entryType @ VaultEntry.Type.decryption, attrs) =>
???
// val KeyPair(publicKey, privateKey) = Crypto.asym.keygen()
// val entry = VaultEntry(id, entryType, Some(publicKey.raw), attrs)
// (entry, privateKey.raw)
case unsupported: sdk.Spec =>
throw new UnsupportedOperationException(unsupported.toString)
}
copy(unlockedContents = unlockedContents + (entry, secret))
}
def remove(entryId: String): VaultEdit =
this.modify(_.unlockedContents).using(_ - entryId)
def putEntryAttr(entryId: String, attrId: String, value: Bytes): VaultEdit =
this.modify(_.unlockedContents.public.entries.at(entryId).attrs).using(_ + (attrId -> value))
def removeEntryAttr(entryId: String, attrId: String): VaultEdit =
this.modify(_.unlockedContents.public.entries.at(entryId).attrs).using(_ - attrId)
def putVaultAttr(attrId: String, value: Bytes): VaultEdit =
this.modify(_.unlockedContents.public.attrs).using(_ + (attrId -> value))
def removeVaultAttr(attrId: String): VaultEdit =
this.modify(_.unlockedContents.public.attrs).using(_ - attrId)
def changePasscode(passcode: Bytes): VaultEdit =
copy(pendingPasscode = Some(passcode))
}
object VaultEdit {
def apply(): VaultEdit = VaultEdit(UnlockedVault.Contents())
} | lockstep-one/vault | vault-client/src/main/scala/one/lockstep/vault/VaultEdit.scala | Scala | agpl-3.0 | 2,306 |
/**
*
* Device
* Ledger wallet
*
* Created by Pierre Pollastri on 16/01/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package com.ledger.ledgerwallet.models
import java.security.KeyStore
import java.security.KeyStore.SecretKeyEntry
import java.util.Date
import javax.crypto.spec.SecretKeySpec
import com.ledger.ledgerwallet.crypto.SecretKey
import com.ledger.ledgerwallet.remote.api.m2fa.GcmAPI
import com.ledger.ledgerwallet.utils.{Benchmark, GooglePlayServiceHelper}
import org.json.JSONObject
import com.ledger.ledgerwallet.concurrent.ExecutionContext.Implicits.ui
import scala.collection.JavaConversions._
import android.content.Context
import com.ledger.ledgerwallet.base.model.{Collection, BaseModel}
import scala.concurrent.Future
import scala.util.{Success, Try}
class PairedDongle(_id: String = null, _name: String = null, _date: Date = null) extends BaseModel {
val id = string("id").set(_id)
val name = string("name").set(_name)
val createdAt = date("created_at").set(_date)
def pairingKey(implicit context: Context): Option[SecretKey] = PairedDongle.retrievePairingKey(id.get)
def delete()(implicit context: Context): Unit = {
PairedDongle.deletePairingKey(context, id.get)
GcmAPI.defaultInstance.removeDongleToken(this)
PairedDongle.delete(this)
}
def this() = {
this(null, null, null)
}
}
object PairedDongle extends Collection[PairedDongle] {
def get(id: String)(implicit context: Context): Option[PairedDongle] = {
val serializedDongle = context.getSharedPreferences(PreferencesName, Context.MODE_PRIVATE).getString(id, null)
if (serializedDongle == null)
return None
val dongle = Try(inflate(new JSONObject(serializedDongle)))
dongle getOrElse None
}
def all(implicit context: Context): Array[PairedDongle] = {
val serializedDongles = context.getSharedPreferences(PreferencesName, Context.MODE_PRIVATE).getAll
var dongles = Array[PairedDongle]()
serializedDongles foreach {case (key, value) =>
value match {
case json: String =>
val dongle = Try(inflate(new JSONObject(json)))
if (dongle.isSuccess && dongle.get.isDefined)
dongles = dongles :+ dongle.get.get
case _ =>
}
}
dongles
}
def create(id: String, name: String, pairingKey: Array[Byte])(implicit context: Context): PairedDongle = {
implicit val LogTag = "PairedDongle Creation"
Benchmark {
val dongle = new PairedDongle(id, name, new Date())
context
.getSharedPreferences(PreferencesName, Context.MODE_PRIVATE)
.edit()
.putString(id, dongle.toJson.toString)
.commit()
Future {
storePairingKey(context, id, pairingKey)
}
GooglePlayServiceHelper.getGcmRegistrationId onComplete {
case Success(regId) => GcmAPI.defaultInstance.updateDonglesToken(regId)
case _ =>
}
dongle
}
}
def delete(dongle: PairedDongle)(implicit context: Context): Unit = {
context
.getSharedPreferences(PreferencesName, Context.MODE_PRIVATE)
.edit()
.remove(dongle.id.get)
.commit()
}
def retrievePairingKey(id: String)(implicit context: Context): Option[SecretKey] = SecretKey.get(context, id)
def storePairingKey(context: Context, id: String, pairingKey: Array[Byte]): Unit = {
SecretKey.create(context, id, pairingKey)
}
def deletePairingKey(context: Context, id: String): Unit = SecretKey.delete(context, id)
val PreferencesName = "PairedDonglePreferences"
} | Morveus/ledger-wallet-android | app/src/main/scala/com/ledger/ledgerwallet/models/PairedDongle.scala | Scala | mit | 4,634 |
package com.gossiperl.client.actors
import akka.actor.SupervisorStrategy.Escalate
import akka.actor._
import com.gossiperl.client.exceptions.GossiperlClientException
import com.gossiperl.client.{GossiperlClientProtocol, GossiperlClient, OverlayConfiguration}
import scala.collection.mutable.{ Map => MutableMap }
object SupervisorProtocol {
case class Connect( c: OverlayConfiguration, client: GossiperlClient )
case class Disconnect( c: OverlayConfiguration )
case class ForwardEvent( config: OverlayConfiguration, event: GossiperlClientProtocol.ProtocolOp )
case class OverlayStopped( c: OverlayConfiguration )
}
object Supervisor {
val supervisorName = "gossiperl-client-supervisor"
protected val clientStore = MutableMap.empty[String, GossiperlClient]
}
class Supervisor extends ActorEx with ActorLogging {
import Supervisor._
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
implicit val timeout = Timeout(1 seconds)
log.debug(s"Supervisor $supervisorName running.")
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 10 seconds) {
case _:Exception => Escalate
}
def receive = {
case SupervisorProtocol.Connect( config, client ) =>
clientStore.get( config.overlayName ) match {
case Some(existing) =>
val message = s"Overlay ${config.overlayName} already used by client $existing"
client.event.applyOrElse( GossiperlClientProtocol.Error(config, message, new GossiperlClientException(message)), unhandled )
case None =>
context.actorOf(Props( new OverlayWorker( config ) ), name = config.overlayName)
clientStore.put( config.overlayName, client )
client.event.applyOrElse( GossiperlClientProtocol.Accepted( config ), unhandled )
}
case SupervisorProtocol.OverlayStopped( config ) =>
clientStore.get( config.overlayName ) match {
case Some(stopped) =>
log.debug(s"Overlay ${config.overlayName} is now stopped. Notifying the client...")
clientStore.remove(config.overlayName)
stopped.event.applyOrElse( GossiperlClientProtocol.Stopped( config ), unhandled )
case None =>
log.error(s"Could not find the client for ${config.overlayName}. Could not notify stopped state.")
}
case SupervisorProtocol.Disconnect( config ) =>
clientStore.get( config.overlayName ) match {
case Some(client) =>
!:(s"/user/${supervisorName}/${config.overlayName}", OverlayWorkerProtocol.Disconnect) onFailure {
case ex =>
self ! SupervisorProtocol.ForwardEvent( config, GossiperlClientProtocol.Error(config, "There was an error handing in a shutdown request.", ex) )
}
case None => log.error(s"Overlay ${config.overlayName} not found.")
}
case SupervisorProtocol.ForwardEvent( config, sourceEvent ) =>
clientStore.get( config.overlayName ) match {
case Some(client) =>
client.event.applyOrElse( sourceEvent, unhandled )
case None => log.error(s"Overlay ${config.overlayName} not found. Event $sourceEvent not handed in.")
}
}
}
| gossiperl/gossiperl-client-scala | src/main/scala/com/gossiperl/client/actors/Supervisor.scala | Scala | mit | 3,240 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
import javax.swing.Icon
import com.intellij.extapi.psi.StubBasedPsiElementBase
import com.intellij.navigation.ItemPresentation
import com.intellij.openapi.editor.colors.TextAttributesKey
import com.intellij.psi._
import com.intellij.psi.search.{LocalSearchScope, SearchScope}
import com.intellij.psi.stubs.NamedStub
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createIdentifier
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.JavaIdentifier
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}
trait ScNamedElement extends ScalaPsiElement with PsiNameIdentifierOwner with NavigatablePsiElement {
@Cached(synchronized = false, ModCount.anyScalaPsiModificationCount, this)
def name: String = {
this match {
case st: StubBasedPsiElementBase[_] => st.getGreenStub match {
case namedStub: NamedStub[_] => namedStub.getName
case _ => nameInner
}
case _ => nameInner
}
}
def name_=(it: String) {
setName(it)
}
def nameInner: String = nameId.getText
@Cached(synchronized = false, ModCount.anyScalaPsiModificationCount, this)
def nameContext: PsiElement =
this.withParentsInFile
.find(ScalaPsiUtil.isNameContext)
.orNull
override def getTextOffset: Int = nameId.getTextRange.getStartOffset
override def getName: String = ScalaNamesUtil.toJavaName(name)
def nameId: PsiElement
override def getNameIdentifier: PsiIdentifier = if (nameId != null) new JavaIdentifier(nameId) else null
override def setName(name: String): PsiElement = {
val id = nameId.getNode
val parent = id.getTreeParent
val newId = createIdentifier(name)
parent.replaceChild(id, newId)
this
}
override def getPresentation: ItemPresentation = {
val clazz: ScTemplateDefinition =
nameContext.getParent match {
case _: ScTemplateBody | _: ScEarlyDefinitions =>
PsiTreeUtil.getParentOfType(this, classOf[ScTemplateDefinition], true)
case _ if this.isInstanceOf[ScClassParameter] =>
PsiTreeUtil.getParentOfType(this, classOf[ScTemplateDefinition], true)
case _ => null
}
val parentMember: ScMember = PsiTreeUtil.getParentOfType(this, classOf[ScMember], false)
new ItemPresentation {
def getPresentableText: String = name
def getTextAttributesKey: TextAttributesKey = null
def getLocationString: String = clazz match {
case _: ScTypeDefinition => "(" + clazz.qualifiedName + ")"
case _: ScNewTemplateDefinition => "(<anonymous>)"
case _ => ""
}
override def getIcon(open: Boolean): Icon = parentMember match {case mem: ScMember => mem.getIcon(0) case _ => null}
}
}
override def getIcon(flags: Int): Icon =
nameContext match {
case null => null
case _: ScCaseClause => Icons.PATTERN_VAL
case x => x.getIcon(flags)
}
abstract override def getUseScope: SearchScope = {
ScalaPsiUtil.intersectScopes(super.getUseScope, nameContext match {
case member: ScMember if member != this => Some(member.getUseScope)
case caseClause: ScCaseClause => Some(new LocalSearchScope(caseClause))
case elem @ (_: ScEnumerator | _: ScGenerator) =>
Option(PsiTreeUtil.getContextOfType(elem, true, classOf[ScForStatement]))
.orElse(Option(PsiTreeUtil.getContextOfType(elem, true, classOf[ScBlock], classOf[ScMember])))
.map(new LocalSearchScope(_))
case _ => None
})
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/ScNamedElement.scala | Scala | apache-2.0 | 4,195 |
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.logging
import com.twitter.util.{HandleSignal, Return, StorageUnit, Time, Try}
import java.io.{File, FilenameFilter, FileOutputStream, OutputStream}
import java.nio.charset.Charset
import java.text.SimpleDateFormat
import java.util.{Calendar, Date, logging => javalog}
sealed abstract class Policy
object Policy {
case object Never extends Policy
case object Hourly extends Policy
case object Daily extends Policy
case class Weekly(dayOfWeek: Int) extends Policy
case object SigHup extends Policy
case class MaxSize(size: StorageUnit) extends Policy
private[this] val singletonPolicyNames: Map[String, Policy] =
Map("never" -> Never, "hourly" -> Hourly, "daily" -> Daily, "sighup" -> SigHup)
// Regex object that matches "Weekly(n)" and extracts the `dayOfWeek` number.
private[this] val weeklyRegex = """(?i)weekly\\(([1-7]+)\\)""".r
/**
* Parse a string into a Policy object. Parsing rules are as follows:
*
* - Case-insensitive names of singleton Policy objects (e.g. Never, Hourly,
* Daily) are parsed into their corresponding objects.
* - "Weekly(n)" is parsed into `Weekly` objects with `n` as the day-of-week
* integer.
* - util-style data size strings (e.g. 3.megabytes, 1.gigabyte) are
* parsed into `StorageUnit` objects and used to produce `MaxSize` policies.
* See `StorageUnit.parse(String)` for more details.
*/
def parse(s: String): Policy =
(s, singletonPolicyNames.get(s.toLowerCase), Try(StorageUnit.parse(s.toLowerCase))) match {
case (weeklyRegex(dayOfWeek), _, _) => Weekly(dayOfWeek.toInt)
case (_, Some(singleton), _) => singleton
case (_, _, Return(storageUnit)) => MaxSize(storageUnit)
case _ => throw new Exception("Invalid log roll policy: " + s)
}
}
object FileHandler {
val UTF8 = Charset.forName("UTF-8")
/**
* Generates a HandlerFactory that returns a FileHandler
*
* @param filename
* Filename to log to.
*
* @param rollPolicy
* When to roll the logfile.
*
* @param append
* Append to an existing logfile, or truncate it?
*
* @param rotateCount
* How many rotated logfiles to keep around, maximum. -1 means to keep them all.
*/
def apply(
filename: String,
rollPolicy: Policy = Policy.Never,
append: Boolean = true,
rotateCount: Int = -1,
formatter: Formatter = new Formatter(),
level: Option[Level] = None
) = () => new FileHandler(filename, rollPolicy, append, rotateCount, formatter, level)
}
/**
* A log handler that writes log entries into a file, and rolls this file
* at a requested interval (hourly, daily, or weekly).
*/
class FileHandler(
path: String,
rollPolicy: Policy,
val append: Boolean,
rotateCount: Int,
formatter: Formatter,
level: Option[Level])
extends Handler(formatter, level) {
// This converts relative paths to absolute paths, as expected
val (filename, name) = {
val f = new File(path)
(f.getAbsolutePath, f.getName)
}
val (filenamePrefix, filenameSuffix) = {
val n = filename.lastIndexOf('.')
if (n > 0) {
(filename.substring(0, n), filename.substring(n))
} else {
(filename, "")
}
}
// Thread-safety is guarded by synchronized on this
private var stream: OutputStream = null
@volatile private var openTime: Long = 0
// Thread-safety is guarded by synchronized on this
private var nextRollTime: Option[Long] = None
// Thread-safety is guarded by synchronized on this
private var bytesWrittenToFile: Long = 0
private val maxFileSize: Option[StorageUnit] = rollPolicy match {
case Policy.MaxSize(size) => Some(size)
case _ => None
}
openLog()
// If nextRollTime.isDefined by openLog(), then it will always remain isDefined.
// This allows us to avoid volatile reads in the publish method.
private val examineRollTime = nextRollTime.isDefined
if (rollPolicy == Policy.SigHup) {
HandleSignal("HUP") { signal =>
val oldStream = stream
synchronized {
stream = openStream()
}
try {
oldStream.close()
} catch {
case e: Throwable => handleThrowable(e)
}
}
}
def flush() {
synchronized {
stream.flush()
}
}
def close() {
synchronized {
flush()
try {
stream.close()
} catch {
case e: Throwable => handleThrowable(e)
}
}
}
private def openStream(): OutputStream = {
val dir = new File(filename).getParentFile
if ((dir ne null) && !dir.exists) dir.mkdirs
new FileOutputStream(filename, append)
}
private def openLog() {
synchronized {
stream = openStream()
openTime = Time.now.inMilliseconds
nextRollTime = computeNextRollTime(openTime)
bytesWrittenToFile = 0
}
}
/**
* Compute the suffix for a rolled logfile, based on the roll policy.
*/
def timeSuffix(date: Date) = {
val dateFormat = rollPolicy match {
case Policy.Never => new SimpleDateFormat("yyyy")
case Policy.SigHup => new SimpleDateFormat("yyyy")
case Policy.Hourly => new SimpleDateFormat("yyyyMMdd-HH")
case Policy.Daily => new SimpleDateFormat("yyyyMMdd")
case Policy.Weekly(_) => new SimpleDateFormat("yyyyMMdd")
case Policy.MaxSize(_) => new SimpleDateFormat("yyyyMMdd-HHmmss")
}
dateFormat.setCalendar(formatter.calendar)
dateFormat.format(date)
}
/**
* Return the time (in absolute milliseconds) of the next desired
* logfile roll.
*/
def computeNextRollTime(now: Long): Option[Long] = {
lazy val next = {
val n = formatter.calendar.clone.asInstanceOf[Calendar]
n.setTimeInMillis(now)
n.set(Calendar.MILLISECOND, 0)
n.set(Calendar.SECOND, 0)
n.set(Calendar.MINUTE, 0)
n
}
val rv = rollPolicy match {
case Policy.MaxSize(_) | Policy.Never | Policy.SigHup => None
case Policy.Hourly => {
next.add(Calendar.HOUR_OF_DAY, 1)
Some(next)
}
case Policy.Daily => {
next.set(Calendar.HOUR_OF_DAY, 0)
next.add(Calendar.DAY_OF_MONTH, 1)
Some(next)
}
case Policy.Weekly(weekday) => {
next.set(Calendar.HOUR_OF_DAY, 0)
do {
next.add(Calendar.DAY_OF_MONTH, 1)
} while (next.get(Calendar.DAY_OF_WEEK) != weekday)
Some(next)
}
}
rv map { _.getTimeInMillis }
}
/**
* Delete files when "too many" have accumulated.
* This duplicates logrotate's "rotate count" option.
*/
private def removeOldFiles() {
if (rotateCount >= 0) {
// collect files which are not `filename`, but which share the prefix/suffix
val prefixName = new File(filenamePrefix).getName
val rotatedFiles =
new File(filename).getParentFile().listFiles(
new FilenameFilter {
def accept(f: File, fname: String): Boolean =
fname != name && fname.startsWith(prefixName) && fname.endsWith(filenameSuffix)
}
).sortBy(_.getName)
val toDeleteCount = math.max(0, rotatedFiles.size - rotateCount)
rotatedFiles.take(toDeleteCount).foreach(_.delete())
}
}
def roll() = synchronized {
stream.close()
val newFilename = filenamePrefix + "-" + timeSuffix(new Date(openTime)) + filenameSuffix
new File(filename).renameTo(new File(newFilename))
openLog()
removeOldFiles()
}
def publish(record: javalog.LogRecord) {
try {
val formattedLine = getFormatter.format(record)
val formattedBytes = formattedLine.getBytes(FileHandler.UTF8)
val lineSizeBytes = formattedBytes.length
if (examineRollTime) {
// Only allow a single thread at a time to do a roll
synchronized {
nextRollTime foreach { time =>
if (Time.now.inMilliseconds > time) roll()
}
}
}
maxFileSize foreach { size =>
synchronized {
if (bytesWrittenToFile + lineSizeBytes > size.bytes) roll()
}
}
synchronized {
stream.write(formattedBytes)
stream.flush()
bytesWrittenToFile += lineSizeBytes
}
} catch {
case e: Throwable => handleThrowable(e)
}
}
private def handleThrowable(e: Throwable) {
System.err.println(Formatter.formatStackTrace(e, 30).mkString("\\n"))
}
}
| mosesn/util | util-logging/src/main/scala/com/twitter/logging/FileHandler.scala | Scala | apache-2.0 | 8,974 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import scala.collection.mutable
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions.{ListQuery, SubqueryExpression}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.plans.physical.UnspecifiedDistribution
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreePattern.{DYNAMIC_PRUNING_SUBQUERY, IN_SUBQUERY, SCALAR_SUBQUERY}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.command.{DataWritingCommandExec, ExecutedCommandExec}
import org.apache.spark.sql.execution.datasources.v2.V2CommandExec
import org.apache.spark.sql.execution.exchange.Exchange
import org.apache.spark.sql.internal.SQLConf
/**
* This rule wraps the query plan with an [[AdaptiveSparkPlanExec]], which executes the query plan
* and re-optimize the plan during execution based on runtime data statistics.
*
* Note that this rule is stateful and thus should not be reused across query executions.
*/
case class InsertAdaptiveSparkPlan(
adaptiveExecutionContext: AdaptiveExecutionContext) extends Rule[SparkPlan] {
override def apply(plan: SparkPlan): SparkPlan = applyInternal(plan, false)
private def applyInternal(plan: SparkPlan, isSubquery: Boolean): SparkPlan = plan match {
case _ if !conf.adaptiveExecutionEnabled => plan
case _: ExecutedCommandExec => plan
case _: CommandResultExec => plan
case c: DataWritingCommandExec => c.copy(child = apply(c.child))
case c: V2CommandExec => c.withNewChildren(c.children.map(apply))
case _ if shouldApplyAQE(plan, isSubquery) =>
if (supportAdaptive(plan)) {
try {
// Plan sub-queries recursively and pass in the shared stage cache for exchange reuse.
// Fall back to non-AQE mode if AQE is not supported in any of the sub-queries.
val subqueryMap = buildSubqueryMap(plan)
val planSubqueriesRule = PlanAdaptiveSubqueries(subqueryMap)
val preprocessingRules = Seq(
planSubqueriesRule)
// Run pre-processing rules.
val newPlan = AdaptiveSparkPlanExec.applyPhysicalRules(plan, preprocessingRules)
logDebug(s"Adaptive execution enabled for plan: $plan")
AdaptiveSparkPlanExec(newPlan, adaptiveExecutionContext, preprocessingRules, isSubquery)
} catch {
case SubqueryAdaptiveNotSupportedException(subquery) =>
logWarning(s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is enabled " +
s"but is not supported for sub-query: $subquery.")
plan
}
} else {
logWarning(s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is enabled " +
s"but is not supported for query: $plan.")
plan
}
case _ => plan
}
// AQE is only useful when the query has exchanges or sub-queries. This method returns true if
// one of the following conditions is satisfied:
// - The config ADAPTIVE_EXECUTION_FORCE_APPLY is true.
// - The input query is from a sub-query. When this happens, it means we've already decided to
// apply AQE for the main query and we must continue to do it.
// - The query contains exchanges.
// - The query may need to add exchanges. It's an overkill to run `EnsureRequirements` here, so
// we just check `SparkPlan.requiredChildDistribution` and see if it's possible that the
// the query needs to add exchanges later.
// - The query contains sub-query.
private def shouldApplyAQE(plan: SparkPlan, isSubquery: Boolean): Boolean = {
conf.getConf(SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY) || isSubquery || {
plan.find {
case _: Exchange => true
case p if !p.requiredChildDistribution.forall(_ == UnspecifiedDistribution) => true
case p => p.expressions.exists(_.find {
case _: SubqueryExpression => true
case _ => false
}.isDefined)
}.isDefined
}
}
private def supportAdaptive(plan: SparkPlan): Boolean = {
sanityCheck(plan) &&
!plan.logicalLink.exists(_.isStreaming) &&
plan.children.forall(supportAdaptive)
}
private def sanityCheck(plan: SparkPlan): Boolean =
plan.logicalLink.isDefined
/**
* Returns an expression-id-to-execution-plan map for all the sub-queries.
* For each sub-query, generate the adaptive execution plan for each sub-query by applying this
* rule, or reuse the execution plan from another sub-query of the same semantics if possible.
*/
private def buildSubqueryMap(plan: SparkPlan): Map[Long, BaseSubqueryExec] = {
val subqueryMap = mutable.HashMap.empty[Long, BaseSubqueryExec]
if (!plan.containsAnyPattern(SCALAR_SUBQUERY, IN_SUBQUERY, DYNAMIC_PRUNING_SUBQUERY)) {
return subqueryMap.toMap
}
plan.foreach(_.expressions.foreach(_.foreach {
case expressions.ScalarSubquery(p, _, exprId, _)
if !subqueryMap.contains(exprId.id) =>
val executedPlan = compileSubquery(p)
verifyAdaptivePlan(executedPlan, p)
val subquery = SubqueryExec.createForScalarSubquery(
s"subquery#${exprId.id}", executedPlan)
subqueryMap.put(exprId.id, subquery)
case expressions.InSubquery(_, ListQuery(query, _, exprId, _, _))
if !subqueryMap.contains(exprId.id) =>
val executedPlan = compileSubquery(query)
verifyAdaptivePlan(executedPlan, query)
val subquery = SubqueryExec(s"subquery#${exprId.id}", executedPlan)
subqueryMap.put(exprId.id, subquery)
case expressions.DynamicPruningSubquery(value, buildPlan,
buildKeys, broadcastKeyIndex, onlyInBroadcast, exprId)
if !subqueryMap.contains(exprId.id) =>
val executedPlan = compileSubquery(buildPlan)
verifyAdaptivePlan(executedPlan, buildPlan)
val name = s"dynamicpruning#${exprId.id}"
val subquery = SubqueryAdaptiveBroadcastExec(
name, broadcastKeyIndex, buildKeys, executedPlan)
subqueryMap.put(exprId.id, subquery)
case _ =>
}))
subqueryMap.toMap
}
def compileSubquery(plan: LogicalPlan): SparkPlan = {
// Apply the same instance of this rule to sub-queries so that sub-queries all share the
// same `stageCache` for Exchange reuse.
this.applyInternal(
QueryExecution.createSparkPlan(adaptiveExecutionContext.session,
adaptiveExecutionContext.session.sessionState.planner, plan.clone()), true)
}
private def verifyAdaptivePlan(plan: SparkPlan, logicalPlan: LogicalPlan): Unit = {
if (!plan.isInstanceOf[AdaptiveSparkPlanExec]) {
throw SubqueryAdaptiveNotSupportedException(logicalPlan)
}
}
}
private case class SubqueryAdaptiveNotSupportedException(plan: LogicalPlan) extends Exception {}
| wangmiao1981/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/InsertAdaptiveSparkPlan.scala | Scala | apache-2.0 | 7,650 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.cli.test
import java.util.Date
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfter
import org.scalatest.junit.JUnitRunner
import common.TestHelpers
import common.TestUtils
import common.Wsk
import common.WskAdmin
import common.WskProps
import common.WskTestHelpers
import spray.json._
import spray.json.DefaultJsonProtocol.StringJsonFormat
import whisk.core.WhiskConfig
import whisk.core.database.test.DbUtils
import whisk.core.entity._
import whisk.core.entity.test.ExecHelpers
/**
* Tests that "old-style" sequences can be invoked
*/
@RunWith(classOf[JUnitRunner])
class SequenceMigrationTests
extends TestHelpers
with BeforeAndAfter
with DbUtils
with ExecHelpers
with WskTestHelpers {
implicit val wskprops = WskProps()
val wsk = new Wsk
val whiskConfig = new WhiskConfig(WhiskEntityStore.requiredProperties)
// handle on the entity datastore
val entityStore = WhiskEntityStore.datastore(whiskConfig)
val (user, namespace) = WskAdmin.getUser(wskprops.authKey)
val allowedActionDuration = 120 seconds
behavior of "Sequence Migration"
it should "check default namespace '_' is preserved in WhiskAction of old style sequence" in {
// read json file and add the appropriate namespace
val seqJsonFile = "seq_type_2.json"
val jsonFile = TestUtils.getTestActionFilename(seqJsonFile)
val source = scala.io.Source.fromFile(jsonFile)
val jsonString = try source.mkString finally source.close()
val entityJson = jsonString.parseJson.asJsObject
// add default namespace (i.e., user) to the json object
val entityJsonWithNamespace = JsObject(entityJson.fields + ("namespace" -> JsString(namespace)))
val wskEntity = entityJsonWithNamespace.convertTo[WhiskAction]
wskEntity.exec match {
case SequenceExec(components) =>
// check '_' is preserved
components.size shouldBe 2
assert(components.forall { _.path.namespace.contains('_') }, "default namespace lost")
case _ => assert(false)
}
}
it should "invoke an old-style (kind sequence) sequence and get the result" in {
val seqName = "seq_type_2"
testOldStyleSequence(seqName, s"$seqName.json")
}
it should "not display code from an old sequence on action get" in {
// install sequence in db
val seqName = "seq_type_2"
installActionInDb(s"$seqName.json")
val stdout = wsk.action.get(seqName).stdout
stdout.contains("code") shouldBe false
}
/**
* helper function that tests old style sequence based on two actions echo and word_count
* @param seqName the name of the sequence
* @param seqFileName the name of the json file that contains the whisk action associated with the sequence
*/
private def testOldStyleSequence(seqName: String, seqFileName: String) = {
// create entities to insert in the entity store
val echo = "echo.json"
val wc = "word_count.json"
val entities = Seq(echo, wc, seqFileName)
for (entity <- entities) {
installActionInDb(entity)
}
// invoke sequence
val now = "it is now " + new Date()
val run = wsk.action.invoke(seqName, Map("payload" -> now.mkString("\n").toJson))
withActivation(wsk.activation, run, totalWait = allowedActionDuration) {
activation =>
val result = activation.response.result.get
result.fields.get("count") shouldBe Some(JsNumber(now.split(" ").size))
}
}
/**
* helper function that takes a json file containing a whisk action (minus the namespace), adds namespace and installs in db
*/
private def installActionInDb(actionJson: String) = {
// read json file and add the appropriate namespace
val jsonFile = TestUtils.getTestActionFilename(actionJson)
val source = scala.io.Source.fromFile(jsonFile)
val jsonString = try source.mkString finally source.close()
val entityJson = jsonString.parseJson.asJsObject
// add default namespace (i.e., user) to the json object
val entityJsonWithNamespace = JsObject(entityJson.fields + ("namespace" -> JsString(namespace)))
val wskEntity = entityJsonWithNamespace.convertTo[WhiskAction]
implicit val tid = transid() // needed for put db below
put(entityStore, wskEntity)
}
after {
cleanup() // cleanup entities from db
}
}
| lzbj/openwhisk | tests/src/test/scala/whisk/core/cli/test/SequenceMigrationTests.scala | Scala | apache-2.0 | 5,462 |
/*
Copyright 2014 Reo_SP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reo7sp.boardpp.board
import com.badlogic.gdx.graphics.Color
import scala.collection.mutable
/**
* Created by reo7sp on 1/17/14 at 2:42 PM
*/
class Canvas {
val points = new mutable.HashSet[Point]
def +=(x: Int, y: Int): Canvas = +=(x, y, BoardSession.color, BoardSession.radius)
def +=(x: Int, y: Int, color: Color, radius: Int): Canvas = {
points += Point(x, y, color, radius)
this
}
def -=(x: Int, y: Int, radius: Int) = {
def intersects(x1: Int, y1: Int, radius1: Int) = {
val x0 = x - radius / 2
val y0 = y - radius / 2
val xmin = x1
val xmax = xmin + radius1
val ymin = y1
val ymax = ymin + radius1
((xmin > x0 && xmin < x0 + radius) && (xmax > x0 && xmax < x0 + radius)) && ((ymin > y0 && ymin < y0 + radius) && (ymax > y0 && ymax < y0 + radius))
}
for (point <- points) {
if (intersects(point.x, point.y, point.radius)) {
points -= point
}
}
this
}
case class Point(x: Int, y: Int, color: Color, radius: Int)
}
| reo7sp/BoardPP | src/main/java/reo7sp/boardpp/board/Canvas.scala | Scala | apache-2.0 | 1,604 |
/*Β§
===========================================================================
KnapScal - Core
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.knapscal.knapsack
import org.apache.commons.math3.fraction.Fraction
case class Item(profit: Int, weight: Int) extends Ordered[Item] {
require(profit > 0, s"Profit ${profit} must be >= 0")
require(weight > 0, s"Weight ${weight} must be >= 0")
val relativeProfit: Fraction = new Fraction(profit, weight)
override def compare(that: Item): Int = -relativeProfit.compareTo(that.relativeProfit)
override def toString: String = s"(${profit}, ${weight})"
}
| giancosta86/KnapScal-core | src/main/scala/info/gianlucacosta/knapscal/knapsack/Item.scala | Scala | apache-2.0 | 1,414 |
package eu.timepit.refined.internal
import eu.timepit.refined.api.{RefType, Validate}
import eu.timepit.refined.macros.RefineMacro
@deprecated(
"RefineMFullyApplied has been replaced in favor or RefinedTypeOps. " +
"Replace 'new RefineMFullyApplied[F, T, P]' with 'new RefinedTypeOps[F[T, P], T]'.",
"0.9.1"
)
final class RefineMFullyApplied[F[_, _], T, P] {
def apply(t: T)(implicit rt: RefType[F], v: Validate[T, P]): F[T, P] =
macro RefineMacro.impl[F, T, P]
}
| fthomas/refined | modules/core/shared/src/main/scala-3.0-/eu/timepit/refined/internal/RefineMFullyApplied.scala | Scala | mit | 481 |
package com.svds.kafka.connect.opentsdb
import org.scalatest.FlatSpec
import scala.collection.JavaConversions._
import scala.collection.mutable.Map
class OpenTsdbSinkConnectorSpec extends FlatSpec {
private val connector = new OpenTsdbSinkConnector
it should "create a list of the number of tasks configurations specified by maxTasks, even if each task configuration element of the list is empty" in {
assert(this.connector.taskConfigs(2).size == 2)
}
it should "have default configuration with localhost and 4242 as the OpenTSDB host & port" in {
val config = this.connector.config
val props = Map[String, String]()
val parsedProps = config.parse(props)
assert(parsedProps(OpenTsdbConnectorConfig.OpenTsdbHost) == "localhost")
assert(parsedProps(OpenTsdbConnectorConfig.OpenTsdbPort) == 4242)
}
it should "return OpenTsdbSinkTask as its task class" in {
assert(this.connector.taskClass == classOf[OpenTsdbSinkTask])
}
}
| jeff-svds/kafka-connect-opentsdb | src/test/scala/com/svds/kafka/connect/opentsdb/OpenTsdbSinkConnectorSpec.scala | Scala | apache-2.0 | 970 |
package com.twitter.finagle.thrift.service
import com.twitter.finagle.Filter
/**
* Used in conjunction with a `ServicePerEndpoint` builder to allow for filtering
* of a `ServicePerEndpoint`.
*/
trait Filterable[+T] extends com.twitter.finagle.service.Filterable[T] {
/**
* Prepend the given type-agnostic [[Filter]].
*/
def filtered(filter: Filter.TypeAgnostic): T
}
| twitter/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/service/Filterable.scala | Scala | apache-2.0 | 384 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.net.URI
import org.apache.hadoop.fs.Path
import org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER
import org.apache.parquet.hadoop.ParquetFileReader
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, TableAlreadyExistsException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.connector.catalog.SupportsNamespaces.{PROP_OWNER_NAME, PROP_OWNER_TYPE}
import org.apache.spark.sql.execution.command.{DDLSuite, DDLUtils}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.HiveExternalCatalog
import org.apache.spark.sql.hive.HiveUtils.{CONVERT_METASTORE_ORC, CONVERT_METASTORE_PARQUET}
import org.apache.spark.sql.hive.orc.OrcFileOperator
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.internal.SQLConf.ORC_IMPLEMENTATION
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
// TODO(gatorsmile): combine HiveCatalogedDDLSuite and HiveDDLSuite
class HiveCatalogedDDLSuite extends DDLSuite with TestHiveSingleton with BeforeAndAfterEach {
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
super.afterEach()
}
}
protected override def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean,
partitionCols: Seq[String] = Seq("a", "b")): CatalogTable = {
val storage =
if (isDataSource) {
val serde = HiveSerDe.sourceToSerDe("parquet")
assert(serde.isDefined, "The default format is not Hive compatible")
CatalogStorageFormat(
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = serde.get.inputFormat,
outputFormat = serde.get.outputFormat,
serde = serde.get.serde,
compressed = false,
properties = Map.empty)
} else {
CatalogStorageFormat(
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = Some("org.apache.hadoop.mapred.SequenceFileInputFormat"),
outputFormat = Some("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
compressed = false,
properties = Map("serialization.format" -> "1"))
}
val metadata = new MetadataBuilder()
.putString("key", "value")
.build()
val schema = new StructType()
.add("col1", "int", nullable = true, metadata = metadata)
.add("col2", "string")
CatalogTable(
identifier = name,
tableType = CatalogTableType.EXTERNAL,
storage = storage,
schema = schema.copy(
fields = schema.fields ++ partitionCols.map(StructField(_, IntegerType))),
provider = if (isDataSource) Some("parquet") else Some("hive"),
partitionColumnNames = partitionCols,
createTime = 0L,
createVersion = org.apache.spark.SPARK_VERSION,
tracksPartitionsInCatalog = true)
}
protected override def normalizeCatalogTable(table: CatalogTable): CatalogTable = {
val nondeterministicProps = Set(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
"COLUMN_STATS_ACCURATE",
// The following are hive specific schema parameters which we do not need to match exactly.
"numFiles",
"numRows",
"rawDataSize",
"totalSize",
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
table.copy(
createTime = 0L,
lastAccessTime = 0L,
owner = "",
properties = table.properties.filterKeys(!nondeterministicProps.contains(_)),
// View texts are checked separately
viewText = None
)
}
test("alter table: set location") {
testSetLocation(isDatasourceTable = false)
}
test("alter table: set properties") {
testSetProperties(isDatasourceTable = false)
}
test("alter table: unset properties") {
testUnsetProperties(isDatasourceTable = false)
}
test("alter table: set serde") {
testSetSerde(isDatasourceTable = false)
}
test("alter table: set serde partition") {
testSetSerdePartition(isDatasourceTable = false)
}
test("alter table: change column") {
testChangeColumn(isDatasourceTable = false)
}
test("alter table: rename partition") {
testRenamePartitions(isDatasourceTable = false)
}
test("alter table: drop partition") {
testDropPartitions(isDatasourceTable = false)
}
test("alter table: add partition") {
testAddPartitions(isDatasourceTable = false)
}
test("drop table") {
testDropTable(isDatasourceTable = false)
}
test("alter datasource table add columns - orc") {
testAddColumn("orc")
}
test("alter datasource table add columns - partitioned - orc") {
testAddColumnPartitioned("orc")
}
test("SPARK-22431: illegal nested type") {
val queries = Seq(
"CREATE TABLE t USING hive AS SELECT STRUCT('a' AS `$a`, 1 AS b) q",
"CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING hive",
"CREATE VIEW t AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
queries.foreach(query => {
val err = intercept[SparkException] {
spark.sql(query)
}.getMessage
assert(err.contains("Cannot recognize hive type string"))
})
withView("v") {
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(sql("SELECT q.`a`, q.b FROM v"), Row("a", 1) :: Nil)
val err = intercept[SparkException] {
spark.sql("ALTER VIEW v AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
}.getMessage
assert(err.contains("Cannot recognize hive type string"))
}
}
test("SPARK-22431: table with nested type") {
withTable("t", "x") {
spark.sql("CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
checkAnswer(spark.table("t"), Nil)
spark.sql("CREATE TABLE x (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
checkAnswer(spark.table("x"), Nil)
}
}
test("SPARK-22431: view with nested type") {
withView("v") {
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(spark.table("v"), Row(Row("a", 1)) :: Nil)
spark.sql("ALTER VIEW v AS SELECT STRUCT('a' AS `b`, 1 AS b) q1")
val df = spark.table("v")
assert("q1".equals(df.schema.fields(0).name))
checkAnswer(df, Row(Row("a", 1)) :: Nil)
}
}
test("SPARK-22431: alter table tests with nested types") {
withTable("t1", "t2", "t3") {
spark.sql("CREATE TABLE t1 (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
spark.sql("ALTER TABLE t1 ADD COLUMNS (newcol1 STRUCT<`col1`:STRING, col2:Int>)")
val newcol = spark.sql("SELECT * FROM t1").schema.fields(2).name
assert("newcol1".equals(newcol))
spark.sql("CREATE TABLE t2(q STRUCT<`a`:INT, col2:STRING>, i1 INT) USING PARQUET")
spark.sql("ALTER TABLE t2 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
spark.sql("ALTER TABLE t2 ADD COLUMNS (newcol2 STRUCT<`col1`:STRING, col2:Int>)")
val df2 = spark.table("t2")
checkAnswer(df2, Nil)
assert("newcol1".equals(df2.schema.fields(2).name))
assert("newcol2".equals(df2.schema.fields(3).name))
spark.sql("CREATE TABLE t3(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
spark.sql("ALTER TABLE t3 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
spark.sql("ALTER TABLE t3 ADD COLUMNS (newcol2 STRUCT<`col1`:STRING, col2:Int>)")
val df3 = spark.table("t3")
checkAnswer(df3, Nil)
assert("newcol1".equals(df3.schema.fields(2).name))
assert("newcol2".equals(df3.schema.fields(3).name))
}
}
test("SPARK-22431: negative alter table tests with nested types") {
withTable("t1") {
spark.sql("CREATE TABLE t1 (q STRUCT<col1:INT, col2:STRING>, i1 INT) USING hive")
val err = intercept[SparkException] {
spark.sql("ALTER TABLE t1 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
}.getMessage
assert(err.contains("Cannot recognize hive type string:"))
}
}
test("SPARK-26630: table with old input format and without partitioned will use HadoopRDD") {
withTable("table_old", "table_ctas_old") {
sql(
"""
|CREATE TABLE table_old (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_old
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_old"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_old AS SELECT col1, col2, col3, col4 FROM table_old")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_old"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("SPARK-26630: table with old input format and partitioned will use HadoopRDD") {
withTable("table_pt_old", "table_ctas_pt_old") {
sql(
"""
|CREATE TABLE table_pt_old (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|PARTITIONED BY (pt INT)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_pt_old PARTITION (pt = 1)
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_pt_old WHERE pt = 1"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_pt_old AS SELECT col1, col2, col3, col4 FROM table_pt_old")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_pt_old"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("SPARK-26630: table with new input format and without partitioned will use NewHadoopRDD") {
withTable("table_new", "table_ctas_new") {
sql(
"""
|CREATE TABLE table_new (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapreduce.lib.input.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_new
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_new"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_new AS SELECT col1, col2, col3, col4 FROM table_new")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_new"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("SPARK-26630: table with new input format and partitioned will use NewHadoopRDD") {
withTable("table_pt_new", "table_ctas_pt_new") {
sql(
"""
|CREATE TABLE table_pt_new (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|PARTITIONED BY (pt INT)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapreduce.lib.input.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_pt_new PARTITION (pt = 1)
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_pt_new WHERE pt = 1"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_pt_new AS SELECT col1, col2, col3, col4 FROM table_pt_new")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_pt_new"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("Create Table LIKE USING Hive built-in ORC in Hive catalog") {
val catalog = spark.sessionState.catalog
withTable("s", "t") {
sql("CREATE TABLE s(a INT, b INT) USING parquet")
val source = catalog.getTableMetadata(TableIdentifier("s"))
assert(source.provider == Some("parquet"))
sql("CREATE TABLE t LIKE s USING org.apache.spark.sql.hive.orc")
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.provider == Some("org.apache.spark.sql.hive.orc"))
}
}
private def checkOwner(db: String, expected: String): Unit = {
val owner = sql(s"DESCRIBE DATABASE EXTENDED $db")
.where("database_description_item='Owner Name'")
.collect().head.getString(1)
assert(owner === expected)
}
test("Database Ownership") {
val catalog = spark.sessionState.catalog
try {
val db1 = "spark_29425_1"
val db2 = "spark_29425_2"
val owner = "spark_29425"
sql(s"CREATE DATABASE $db1")
checkOwner(db1, Utils.getCurrentUserName())
sql(s"ALTER DATABASE $db1 SET DBPROPERTIES ('a'='a')")
checkOwner(db1, Utils.getCurrentUserName())
// TODO: Specify ownership should be forbidden after we implement `SET OWNER` syntax
sql(s"CREATE DATABASE $db2 WITH DBPROPERTIES('ownerName'='$owner')")
checkOwner(db2, owner)
sql(s"ALTER DATABASE $db2 SET DBPROPERTIES ('a'='a')")
checkOwner(db2, owner)
// TODO: Changing ownership should be forbidden after we implement `SET OWNER` syntax
sql(s"ALTER DATABASE $db2 SET DBPROPERTIES ('ownerName'='a')")
checkOwner(db2, "a")
} finally {
catalog.reset()
}
}
}
class HiveDDLSuite
extends QueryTest with SQLTestUtils with TestHiveSingleton with BeforeAndAfterEach {
import testImplicits._
val hiveFormats = Seq("PARQUET", "ORC", "TEXTFILE", "SEQUENCEFILE", "RCFILE", "AVRO")
private val reversedProperties = Seq("ownerName", "ownerType")
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
super.afterEach()
}
}
// check if the directory for recording the data of the table exists.
private def tableDirectoryExists(
tableIdentifier: TableIdentifier,
dbPath: Option[String] = None): Boolean = {
val expectedTablePath =
if (dbPath.isEmpty) {
hiveContext.sessionState.catalog.defaultTablePath(tableIdentifier)
} else {
new Path(new Path(dbPath.get), tableIdentifier.table).toUri
}
val filesystemPath = new Path(expectedTablePath.toString)
val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.exists(filesystemPath)
}
test("drop tables") {
withTable("tab1") {
val tabName = "tab1"
assert(!tableDirectoryExists(TableIdentifier(tabName)))
sql(s"CREATE TABLE $tabName(c1 int)")
assert(tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP TABLE $tabName")
assert(!tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP TABLE IF EXISTS $tabName")
sql(s"DROP VIEW IF EXISTS $tabName")
}
}
test("create a hive table without schema") {
import testImplicits._
withTempPath { tempDir =>
withTable("tab1", "tab2") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
var e = intercept[AnalysisException] { sql("CREATE TABLE tab1 USING hive") }.getMessage
assert(e.contains("Unable to infer the schema. The schema specification is required to " +
"create the table `default`.`tab1`"))
e = intercept[AnalysisException] {
sql(s"CREATE TABLE tab2 USING hive location '${tempDir.getCanonicalPath}'")
}.getMessage
assert(e.contains("Unable to infer the schema. The schema specification is required to " +
"create the table `default`.`tab2`"))
}
}
}
test("drop external tables in default database") {
withTempDir { tmpDir =>
val tabName = "tab1"
withTable(tabName) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|create table $tabName
|stored as parquet
|location '${tmpDir.toURI}'
|as select 1, '3'
""".stripMargin)
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
assert(tmpDir.listFiles.nonEmpty)
}
}
}
test("drop external data source table in default database") {
withTempDir { tmpDir =>
val tabName = "tab1"
withTable(tabName) {
assert(tmpDir.listFiles.isEmpty)
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
Seq(1 -> "a").toDF("i", "j")
.write
.mode(SaveMode.Overwrite)
.format("parquet")
.option("path", tmpDir.toString)
.saveAsTable(tabName)
}
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
// This data source table is external table
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
// The data are not deleted since the table type is EXTERNAL
assert(tmpDir.listFiles.nonEmpty)
}
}
}
test("create table and view with comment") {
val catalog = spark.sessionState.catalog
val tabName = "tab1"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(c1 int) COMMENT 'BLABLA'")
val viewName = "view1"
withView(viewName) {
sql(s"CREATE VIEW $viewName COMMENT 'no comment' AS SELECT * FROM $tabName")
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
val viewMetadata = catalog.getTableMetadata(TableIdentifier(viewName, Some("default")))
assert(tableMetadata.comment == Option("BLABLA"))
assert(viewMetadata.comment == Option("no comment"))
// Ensure that `comment` is removed from the table property
assert(tableMetadata.properties.get("comment").isEmpty)
assert(viewMetadata.properties.get("comment").isEmpty)
}
}
}
test("create Hive-serde table and view with unicode columns and comment") {
val catalog = spark.sessionState.catalog
val tabName = "tab1"
val viewName = "view1"
// scalastyle:off
// non ascii characters are not allowed in the source code, so we disable the scalastyle.
val colName1 = "ε"
val colName2 = "ε°Ό"
val comment = "εΊ"
// scalastyle:on
withTable(tabName) {
sql(s"""
|CREATE TABLE $tabName(`$colName1` int COMMENT '$comment')
|COMMENT '$comment'
|PARTITIONED BY (`$colName2` int)
""".stripMargin)
sql(s"INSERT OVERWRITE TABLE $tabName partition (`$colName2`=2) SELECT 1")
withView(viewName) {
sql(
s"""
|CREATE VIEW $viewName(`$colName1` COMMENT '$comment', `$colName2`)
|COMMENT '$comment'
|AS SELECT `$colName1`, `$colName2` FROM $tabName
""".stripMargin)
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
val viewMetadata = catalog.getTableMetadata(TableIdentifier(viewName, Some("default")))
assert(tableMetadata.comment == Option(comment))
assert(viewMetadata.comment == Option(comment))
assert(tableMetadata.schema.fields.length == 2 && viewMetadata.schema.fields.length == 2)
val column1InTable = tableMetadata.schema.fields.head
val column1InView = viewMetadata.schema.fields.head
assert(column1InTable.name == colName1 && column1InView.name == colName1)
assert(column1InTable.getComment() == Option(comment))
assert(column1InView.getComment() == Option(comment))
assert(tableMetadata.schema.fields(1).name == colName2 &&
viewMetadata.schema.fields(1).name == colName2)
checkAnswer(sql(s"SELECT `$colName1`, `$colName2` FROM $tabName"), Row(1, 2) :: Nil)
checkAnswer(sql(s"SELECT `$colName1`, `$colName2` FROM $viewName"), Row(1, 2) :: Nil)
}
}
}
test("create table: partition column names exist in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (a string)")
}
assert(e.message == "Found duplicate column(s) in the table definition of `default`.`tbl`: `a`")
}
test("create partitioned table without specifying data type for the partition columns") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (b) STORED AS parquet")
}
assert(e.message.contains("Must specify a data type for each partition column while creating " +
"Hive partitioned table."))
}
test("add/drop partition with location - managed table") {
val tab = "tab_with_partitions"
withTempDir { tmpDir =>
val basePath = new File(tmpDir.getCanonicalPath)
val part1Path = new File(basePath + "/part1")
val part2Path = new File(basePath + "/part2")
val dirSet = part1Path :: part2Path :: Nil
// Before data insertion, all the directory are empty
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
withTable(tab) {
sql(
s"""
|CREATE TABLE $tab (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
""".stripMargin)
sql(
s"""
|ALTER TABLE $tab ADD
|PARTITION (ds='2008-04-08', hr=11) LOCATION '${part1Path.toURI}'
|PARTITION (ds='2008-04-08', hr=12) LOCATION '${part2Path.toURI}'
""".stripMargin)
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
sql(s"INSERT OVERWRITE TABLE $tab partition (ds='2008-04-08', hr=11) SELECT 1, 'a'")
sql(s"INSERT OVERWRITE TABLE $tab partition (ds='2008-04-08', hr=12) SELECT 2, 'b'")
// add partition will not delete the data
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
checkAnswer(
spark.table(tab),
Row(1, "a", "2008-04-08", "11") :: Row(2, "b", "2008-04-08", "12") :: Nil
)
sql(s"ALTER TABLE $tab DROP PARTITION (ds='2008-04-08', hr=11)")
// drop partition will delete the data
assert(part1Path.listFiles == null || part1Path.listFiles.isEmpty)
assert(part2Path.listFiles.nonEmpty)
sql(s"DROP TABLE $tab")
// drop table will delete the data of the managed table
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
}
}
}
test("SPARK-19129: drop partition with a empty string will drop the whole table") {
val df = spark.createDataFrame(Seq((0, "a"), (1, "b"))).toDF("partCol1", "name")
df.write.mode("overwrite").partitionBy("partCol1").saveAsTable("partitionedTable")
val e = intercept[AnalysisException] {
spark.sql("alter table partitionedTable drop partition(partCol1='')")
}.getMessage
assert(e.contains("Partition spec is invalid. The spec ([partCol1=]) contains an empty " +
"partition column value"))
}
test("add/drop partitions - external table") {
val catalog = spark.sessionState.catalog
withTempDir { tmpDir =>
val basePath = tmpDir.getCanonicalPath
val partitionPath_1stCol_part1 = new File(basePath + "/ds=2008-04-08")
val partitionPath_1stCol_part2 = new File(basePath + "/ds=2008-04-09")
val partitionPath_part1 = new File(basePath + "/ds=2008-04-08/hr=11")
val partitionPath_part2 = new File(basePath + "/ds=2008-04-09/hr=11")
val partitionPath_part3 = new File(basePath + "/ds=2008-04-08/hr=12")
val partitionPath_part4 = new File(basePath + "/ds=2008-04-09/hr=12")
val dirSet =
tmpDir :: partitionPath_1stCol_part1 :: partitionPath_1stCol_part2 ::
partitionPath_part1 :: partitionPath_part2 :: partitionPath_part3 ::
partitionPath_part4 :: Nil
val externalTab = "extTable_with_partitions"
withTable(externalTab) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|CREATE EXTERNAL TABLE $externalTab (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
|LOCATION '${tmpDir.toURI}'
""".stripMargin)
// Before data insertion, all the directory are empty
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $externalTab
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
val hiveTable = catalog.getTableMetadata(TableIdentifier(externalTab, Some("default")))
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
// After data insertion, all the directory are not empty
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
val message = intercept[AnalysisException] {
sql(s"ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-09', unknownCol='12')")
}
assert(message.getMessage.contains("unknownCol is not a valid partition column in table " +
"`default`.`exttable_with_partitions`"))
sql(
s"""
|ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-08'),
|PARTITION (hr='12')
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(externalTab)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-09", "hr" -> "11")))
// drop partition will not delete the data of external table
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
sql(
s"""
|ALTER TABLE $externalTab ADD PARTITION (ds='2008-04-08', hr='12')
|PARTITION (ds='2008-04-08', hr=11)
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(externalTab)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-08", "hr" -> "11"),
Map("ds" -> "2008-04-08", "hr" -> "12"),
Map("ds" -> "2008-04-09", "hr" -> "11")))
// add partition will not delete the data
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
sql(s"DROP TABLE $externalTab")
// drop table will not delete the data of external table
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
}
}
}
test("drop views") {
withTable("tab1") {
val tabName = "tab1"
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val viewName = "view1"
assert(tableDirectoryExists(TableIdentifier(tabName)))
assert(!tableDirectoryExists(TableIdentifier(viewName)))
sql(s"CREATE VIEW $viewName AS SELECT * FROM tab1")
assert(tableDirectoryExists(TableIdentifier(tabName)))
assert(!tableDirectoryExists(TableIdentifier(viewName)))
sql(s"DROP VIEW $viewName")
assert(tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP VIEW IF EXISTS $viewName")
}
}
}
test("alter views - rename") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val oldViewName = "view1"
val newViewName = "view2"
withView(oldViewName, newViewName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE VIEW $oldViewName AS SELECT * FROM $tabName")
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
sql(s"ALTER VIEW $oldViewName RENAME TO $newViewName")
assert(!catalog.tableExists(TableIdentifier(oldViewName)))
assert(catalog.tableExists(TableIdentifier(newViewName)))
}
}
}
test("alter views - set/unset tblproperties") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val viewName = "view1"
withView(viewName) {
def checkProperties(expected: Map[String, String]): Boolean = {
val properties = spark.sessionState.catalog.getTableMetadata(TableIdentifier(viewName))
.properties
properties.filterNot { case (key, value) =>
Seq("transient_lastDdlTime", CatalogTable.VIEW_DEFAULT_DATABASE).contains(key) ||
key.startsWith(CatalogTable.VIEW_QUERY_OUTPUT_PREFIX)
} == expected
}
sql(s"CREATE VIEW $viewName AS SELECT * FROM $tabName")
checkProperties(Map())
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')")
checkProperties(Map("p" -> "an"))
// no exception or message will be issued if we set it again
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')")
checkProperties(Map("p" -> "an"))
// the value will be updated if we set the same key to a different value
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'b')")
checkProperties(Map("p" -> "b"))
sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')")
checkProperties(Map())
val message = intercept[AnalysisException] {
sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')")
}.getMessage
assert(message.contains(
"Attempted to unset non-existent property 'p' in table '`default`.`view1`'"))
}
}
}
private def assertErrorForAlterTableOnView(sqlText: String): Unit = {
val message = intercept[AnalysisException](sql(sqlText)).getMessage
assert(message.contains("Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead"))
}
private def assertErrorForAlterViewOnTable(sqlText: String): Unit = {
val message = intercept[AnalysisException](sql(sqlText)).getMessage
assert(message.contains("Cannot alter a table with ALTER VIEW. Please use ALTER TABLE instead"))
}
test("create table - SET TBLPROPERTIES EXTERNAL to TRUE") {
val tabName = "tab1"
withTable(tabName) {
val message = intercept[AnalysisException] {
sql(s"CREATE TABLE $tabName (height INT, length INT) TBLPROPERTIES('EXTERNAL'='TRUE')")
}.getMessage
assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'"))
}
}
test("alter table - SET TBLPROPERTIES EXTERNAL to TRUE") {
val tabName = "tab1"
withTable(tabName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE TABLE $tabName (height INT, length INT)")
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
val message = intercept[AnalysisException] {
sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('EXTERNAL' = 'TRUE')")
}.getMessage
assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'"))
// The table type is not changed to external
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
// The table property is case sensitive. Thus, external is allowed
sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('external' = 'TRUE')")
// The table type is not changed to external
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
}
}
test("alter views and alter table - misuse") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val oldViewName = "view1"
val newViewName = "view2"
withView(oldViewName, newViewName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE VIEW $oldViewName AS SELECT * FROM $tabName")
assert(catalog.tableExists(TableIdentifier(tabName)))
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName RENAME TO $newViewName")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RENAME TO $newViewName")
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName SET TBLPROPERTIES ('p' = 'an')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET TBLPROPERTIES ('p' = 'an')")
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName UNSET TBLPROPERTIES ('p')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName UNSET TBLPROPERTIES ('p')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET LOCATION '/path/to/home'")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDE 'whatever'")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDEPROPERTIES ('x' = 'y')")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName DROP IF EXISTS PARTITION (a='2')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RECOVER PARTITIONS")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName PARTITION (a='1') RENAME TO PARTITION (a='100')")
assert(catalog.tableExists(TableIdentifier(tabName)))
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
}
}
}
test("Insert overwrite Hive table should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl", "tbl2") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
sql(
s"""
|CREATE TABLE tbl2(ID long) USING hive
|OPTIONS(fileFormat 'parquet')
|LOCATION '${path.toURI}'
""".stripMargin)
spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT ID FROM view1")
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), Seq(Row(4)))
}
}
}
}
}
test("Create Hive table as select should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl", "tbl2") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
sql(
s"""
|CREATE TABLE tbl2 USING hive
|OPTIONS(fileFormat 'parquet')
|LOCATION '${path.toURI}'
|AS SELECT ID FROM view1
""".stripMargin)
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), Seq(Row(4)))
}
}
}
}
}
test("SPARK-25313 Insert overwrite directory should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
spark.sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path.getCanonicalPath}' " +
"STORED AS PARQUET SELECT ID FROM view1")
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.read.parquet(path.toString), Seq(Row(4)))
}
}
}
}
}
test("alter table partition - storage information") {
sql("CREATE TABLE boxes (height INT, length INT) PARTITIONED BY (width INT)")
sql("INSERT OVERWRITE TABLE boxes PARTITION (width=4) SELECT 4, 4")
val catalog = spark.sessionState.catalog
val expectedSerde = "com.sparkbricks.serde.ColumnarSerDe"
val expectedSerdeProps = Map("compress" -> "true")
val expectedSerdePropsString =
expectedSerdeProps.map { case (k, v) => s"'$k'='$v'" }.mkString(", ")
val oldPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4"))
assume(oldPart.storage.serde != Some(expectedSerde), "bad test: serde was already set")
assume(oldPart.storage.properties.filterKeys(expectedSerdeProps.contains) !=
expectedSerdeProps, "bad test: serde properties were already set")
sql(s"""ALTER TABLE boxes PARTITION (width=4)
| SET SERDE '$expectedSerde'
| WITH SERDEPROPERTIES ($expectedSerdePropsString)
|""".stripMargin)
val newPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4"))
assert(newPart.storage.serde == Some(expectedSerde))
assert(newPart.storage.properties.filterKeys(expectedSerdeProps.contains) ==
expectedSerdeProps)
}
test("MSCK REPAIR RABLE") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1")
sql("CREATE TABLE tab1 (height INT, length INT) PARTITIONED BY (a INT, b INT)")
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val root = new Path(catalog.getTableMetadata(tableIdent).location)
val fs = root.getFileSystem(spark.sessionState.newHadoopConf())
// valid
fs.mkdirs(new Path(new Path(root, "a=1"), "b=5"))
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "a.csv")) // file
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "_SUCCESS")) // file
fs.mkdirs(new Path(new Path(root, "A=2"), "B=6"))
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "b.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "c.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), ".hiddenFile")) // file
fs.mkdirs(new Path(new Path(root, "A=2/B=6"), "_temporary"))
// invalid
fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name
fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order
fs.mkdirs(new Path(root, "a=4")) // not enough columns
fs.createNewFile(new Path(new Path(root, "a=1"), "b=4")) // file
fs.createNewFile(new Path(new Path(root, "a=1"), "_SUCCESS")) // _SUCCESS
fs.mkdirs(new Path(new Path(root, "a=1"), "_temporary")) // _temporary
fs.mkdirs(new Path(new Path(root, "a=1"), ".b=4")) // start with .
try {
sql("MSCK REPAIR TABLE tab1")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2))
assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1")
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
} finally {
fs.delete(root, true)
}
}
test("drop table using drop view") {
withTable("tab1") {
sql("CREATE TABLE tab1(c1 int)")
val message = intercept[AnalysisException] {
sql("DROP VIEW tab1")
}.getMessage
assert(message.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead"))
}
}
test("drop view using drop table") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
sql("CREATE VIEW view1 AS SELECT * FROM tab1")
val message = intercept[AnalysisException] {
sql("DROP TABLE view1")
}.getMessage
assert(message.contains("Cannot drop a view with DROP TABLE. Please use DROP VIEW instead"))
}
}
}
test("create view with mismatched schema") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val e = intercept[AnalysisException] {
sql("CREATE VIEW view1 (col1, col3) AS SELECT * FROM tab1")
}.getMessage
assert(e.contains("the SELECT clause (num: `1`) does not match")
&& e.contains("CREATE VIEW (num: `2`)"))
}
}
}
test("create view with specified schema") {
withView("view1") {
sql("CREATE VIEW view1 (col1, col2) AS SELECT 1, 2")
checkAnswer(
sql("SELECT * FROM view1"),
Row(1, 2) :: Nil
)
}
}
test("desc table for Hive table - partitioned table") {
withTable("tbl") {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (b int)")
assert(sql("DESC tbl").collect().containsSlice(
Seq(
Row("a", "int", null),
Row("b", "int", null),
Row("# Partition Information", "", ""),
Row("# col_name", "data_type", "comment"),
Row("b", "int", null)
)
))
}
}
test("desc table for Hive table - bucketed + sorted table") {
withTable("tbl") {
sql(
s"""
|CREATE TABLE tbl (id int, name string)
|CLUSTERED BY(id)
|SORTED BY(id, name) INTO 1024 BUCKETS
|PARTITIONED BY (ds string)
""".stripMargin)
val x = sql("DESC FORMATTED tbl").collect()
assert(x.containsSlice(
Seq(
Row("Num Buckets", "1024", ""),
Row("Bucket Columns", "[`id`]", ""),
Row("Sort Columns", "[`id`, `name`]", "")
)
))
}
}
test("desc table for data source table using Hive Metastore") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive")
val tabName = "tab1"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(a int comment 'test') USING parquet ")
checkAnswer(
sql(s"DESC $tabName").select("col_name", "data_type", "comment"),
Row("a", "int", "test") :: Nil
)
}
}
private def createDatabaseWithLocation(tmpDir: File, dirExists: Boolean): Unit = {
val catalog = spark.sessionState.catalog
val dbName = "db1"
val tabName = "tab1"
val fs = new Path(tmpDir.toString).getFileSystem(spark.sessionState.newHadoopConf())
withTable(tabName) {
if (dirExists) {
assert(tmpDir.listFiles.isEmpty)
} else {
assert(!fs.exists(new Path(tmpDir.toString)))
}
sql(s"CREATE DATABASE $dbName Location '${tmpDir.toURI.getPath.stripSuffix("/")}'")
val db1 = catalog.getDatabaseMetadata(dbName)
val dbPath = new URI(tmpDir.toURI.toString.stripSuffix("/"))
assert(db1.copy(properties = db1.properties -- Seq(PROP_OWNER_NAME, PROP_OWNER_TYPE)) ===
CatalogDatabase(dbName, "", dbPath, Map.empty))
sql("USE db1")
sql(s"CREATE TABLE $tabName as SELECT 1")
assert(tableDirectoryExists(TableIdentifier(tabName), Option(tmpDir.toString)))
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
assert(tmpDir.listFiles.isEmpty)
sql("USE default")
sql(s"DROP DATABASE $dbName")
assert(!fs.exists(new Path(tmpDir.toString)))
}
}
test("create/drop database - location without pre-created directory") {
withTempPath { tmpDir =>
createDatabaseWithLocation(tmpDir, dirExists = false)
}
}
test("create/drop database - location with pre-created directory") {
withTempDir { tmpDir =>
createDatabaseWithLocation(tmpDir, dirExists = true)
}
}
private def dropDatabase(cascade: Boolean, tableExists: Boolean): Unit = {
val dbName = "db1"
val dbPath = new Path(spark.sessionState.conf.warehousePath)
val fs = dbPath.getFileSystem(spark.sessionState.newHadoopConf())
sql(s"CREATE DATABASE $dbName")
val catalog = spark.sessionState.catalog
val expectedDBLocation = s"file:${dbPath.toUri.getPath.stripSuffix("/")}/$dbName.db"
val expectedDBUri = CatalogUtils.stringToURI(expectedDBLocation)
val db1 = catalog.getDatabaseMetadata(dbName)
assert(db1.copy(properties = db1.properties -- Seq(PROP_OWNER_NAME, PROP_OWNER_TYPE)) ==
CatalogDatabase(
dbName,
"",
expectedDBUri,
Map.empty))
// the database directory was created
assert(fs.exists(dbPath) && fs.isDirectory(dbPath))
sql(s"USE $dbName")
val tabName = "tab1"
assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
sql(s"CREATE TABLE $tabName as SELECT 1")
assert(tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
if (!tableExists) {
sql(s"DROP TABLE $tabName")
assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
}
sql(s"USE default")
val sqlDropDatabase = s"DROP DATABASE $dbName ${if (cascade) "CASCADE" else "RESTRICT"}"
if (tableExists && !cascade) {
val message = intercept[AnalysisException] {
sql(sqlDropDatabase)
}.getMessage
assert(message.contains(s"Database $dbName is not empty. One or more tables exist."))
// the database directory was not removed
assert(fs.exists(new Path(expectedDBLocation)))
} else {
sql(sqlDropDatabase)
// the database directory was removed and the inclusive table directories are also removed
assert(!fs.exists(new Path(expectedDBLocation)))
}
}
test("drop database containing tables - CASCADE") {
dropDatabase(cascade = true, tableExists = true)
}
test("drop an empty database - CASCADE") {
dropDatabase(cascade = true, tableExists = false)
}
test("drop database containing tables - RESTRICT") {
dropDatabase(cascade = false, tableExists = true)
}
test("drop an empty database - RESTRICT") {
dropDatabase(cascade = false, tableExists = false)
}
test("drop default database") {
Seq("true", "false").foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
var message = intercept[AnalysisException] {
sql("DROP DATABASE default")
}.getMessage
assert(message.contains("Can not drop default database"))
// SQLConf.CASE_SENSITIVE does not affect the result
// because the Hive metastore is not case sensitive.
message = intercept[AnalysisException] {
sql("DROP DATABASE DeFault")
}.getMessage
assert(message.contains("Can not drop default database"))
}
}
}
test("Create Cataloged Table As Select - Drop Table After Runtime Exception") {
withTable("tab") {
intercept[SparkException] {
sql(
"""
|CREATE TABLE tab
|STORED AS TEXTFILE
|SELECT 1 AS a, (SELECT a FROM (SELECT 1 AS a UNION ALL SELECT 2 AS a) t) AS b
""".stripMargin)
}
// After hitting runtime exception, we should drop the created table.
assert(!spark.sessionState.catalog.tableExists(TableIdentifier("tab")))
}
}
test("CREATE TABLE LIKE a temporary view") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a temporary view.
withCreateTableLikeTempView(location = None, provider)
// CREATE TABLE LIKE a temporary view location ...
withTempDir { tmpDir =>
withCreateTableLikeTempView(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeTempView(
location : Option[String], provider: Option[String]): Unit = {
val sourceViewName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTempView(sourceViewName) {
withTable(targetTabName) {
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.createTempView(sourceViewName)
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName $providerClause $locationClause")
val sourceTable = spark.sessionState.catalog.getTempViewOrPermanentTableMetadata(
TableIdentifier(sourceViewName))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
}
test("CREATE TABLE LIKE a data source table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a data source table.
withCreateTableLikeDSTable(location = None, provider)
// CREATE TABLE LIKE a data source table location ...
withTempDir { tmpDir =>
withCreateTableLikeDSTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeDSTable(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.write.format("json").saveAsTable(sourceTabName)
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
val sourceTable =
spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
val targetTable =
spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
// The table type of the source table should be a Hive-managed data source table
assert(DDLUtils.isDatasourceTable(sourceTable))
assert(sourceTable.tableType == CatalogTableType.MANAGED)
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
test("CREATE TABLE LIKE an external data source table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE an external data source table.
withCreateTableLikeExtDSTable(location = None, provider)
// CREATE TABLE LIKE an external data source table location ...
withTempDir { tmpDir =>
withCreateTableLikeExtDSTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeExtDSTable(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.write.format("parquet").save(path)
sql(s"CREATE TABLE $sourceTabName USING parquet OPTIONS (PATH '${dir.toURI}')")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
// The source table should be an external data source table
val sourceTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
// The table type of the source table should be an external data source table
assert(DDLUtils.isDatasourceTable(sourceTable))
assert(sourceTable.tableType == CatalogTableType.EXTERNAL)
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
}
test("CREATE TABLE LIKE a managed Hive serde table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a managed Hive serde table.
withCreateTableLikeManagedHiveTable(location = None, provider)
// CREATE TABLE LIKE a managed Hive serde table location ...
withTempDir { tmpDir =>
withCreateTableLikeManagedHiveTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeManagedHiveTable(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
val catalog = spark.sessionState.catalog
withTable(sourceTabName, targetTabName) {
sql(s"CREATE TABLE $sourceTabName TBLPROPERTIES('prop1'='value1') AS SELECT 1 key, 'a'")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
val sourceTable = catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
assert(sourceTable.tableType == CatalogTableType.MANAGED)
assert(sourceTable.properties.get("prop1").nonEmpty)
val targetTable = catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
test("CREATE TABLE LIKE an external Hive serde table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE an external Hive serde table.
withCreateTableLikeExtHiveTable(location = None, provider)
// CREATE TABLE LIKE an external Hive serde table location ...
withTempDir { tmpDir =>
withCreateTableLikeExtHiveTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeExtHiveTable(
location : Option[String], provider: Option[String]): Unit = {
val catalog = spark.sessionState.catalog
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTempDir { tmpDir =>
val basePath = tmpDir.toURI
val sourceTabName = "tab1"
val targetTabName = "tab2"
withTable(sourceTabName, targetTabName) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|CREATE EXTERNAL TABLE $sourceTabName (key INT comment 'test', value STRING)
|COMMENT 'Apache Spark'
|PARTITIONED BY (ds STRING, hr STRING)
|LOCATION '$basePath'
""".stripMargin)
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $sourceTabName
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
val sourceTable = catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
assert(sourceTable.tableType == CatalogTableType.EXTERNAL)
assert(sourceTable.comment == Option("Apache Spark"))
val targetTable = catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
}
test("CREATE TABLE LIKE a view") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a view.
withCreateTableLikeView(location = None, provider)
// CREATE TABLE LIKE a view location ...
withTempDir { tmpDir =>
withCreateTableLikeView(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeView(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val sourceViewName = "view"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
withView(sourceViewName) {
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.write.format("json").saveAsTable(sourceTabName)
sql(s"CREATE VIEW $sourceViewName AS SELECT * FROM $sourceTabName")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName $providerClause $locationClause")
val sourceView = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceViewName, Some("default")))
// The original source should be a VIEW with an empty path
assert(sourceView.tableType == CatalogTableType.VIEW)
assert(sourceView.viewText.nonEmpty)
assert(sourceView.viewCatalogAndNamespace ==
Seq(CatalogManager.SESSION_CATALOG_NAME, "default"))
assert(sourceView.viewQueryColumnNames == Seq("a", "b", "c", "d"))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceView, targetTable, tableType, provider)
}
}
}
private def checkCreateTableLike(
sourceTable: CatalogTable,
targetTable: CatalogTable,
tableType: CatalogTableType,
provider: Option[String]): Unit = {
// The created table should be a MANAGED table or EXTERNAL table with empty view text
// and original text.
assert(targetTable.tableType == tableType,
s"the created table must be a/an ${tableType.name} table")
assert(targetTable.viewText.isEmpty,
"the view text in the created table must be empty")
assert(targetTable.viewCatalogAndNamespace.isEmpty,
"the view catalog and namespace in the created table must be empty")
assert(targetTable.viewQueryColumnNames.isEmpty,
"the view query output columns in the created table must be empty")
assert(targetTable.comment.isEmpty,
"the comment in the created table must be empty")
assert(targetTable.unsupportedFeatures.isEmpty,
"the unsupportedFeatures in the create table must be empty")
val metastoreGeneratedProperties = Seq(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
assert(targetTable.properties.filterKeys(!metastoreGeneratedProperties.contains(_)).isEmpty,
"the table properties of source tables should not be copied in the created table")
provider match {
case Some(_) =>
assert(targetTable.provider == provider)
if (DDLUtils.isHiveTable(provider)) {
assert(DDLUtils.isHiveTable(targetTable),
"the target table should be a hive table if provider is hive")
}
case None =>
if (sourceTable.tableType == CatalogTableType.VIEW) {
// Source table is a temporary/permanent view, which does not have a provider.
// The created target table uses the default data source format
assert(targetTable.provider == Option(spark.sessionState.conf.defaultDataSourceName))
} else {
assert(targetTable.provider == sourceTable.provider)
}
if (DDLUtils.isDatasourceTable(sourceTable) ||
sourceTable.tableType == CatalogTableType.VIEW) {
assert(DDLUtils.isDatasourceTable(targetTable),
"the target table should be a data source table")
} else {
assert(!DDLUtils.isDatasourceTable(targetTable),
"the target table should be a Hive serde table")
}
}
assert(targetTable.storage.locationUri.nonEmpty, "target table path should not be empty")
// User-specified location and sourceTable's location can be same or different,
// when we creating an external table. So we don't need to do this check
if (tableType != CatalogTableType.EXTERNAL) {
assert(sourceTable.storage.locationUri != targetTable.storage.locationUri,
"source table/view path should be different from target table path")
}
// The source table contents should not been seen in the target table.
assert(spark.table(sourceTable.identifier).count() != 0, "the source table should be nonempty")
assert(spark.table(targetTable.identifier).count() == 0, "the target table should be empty")
// Their schema should be identical
checkAnswer(
sql(s"DESC ${sourceTable.identifier}"),
sql(s"DESC ${targetTable.identifier}"))
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
// Check whether the new table can be inserted using the data from the original table
sql(s"INSERT INTO TABLE ${targetTable.identifier} SELECT * FROM ${sourceTable.identifier}")
}
// After insertion, the data should be identical
checkAnswer(
sql(s"SELECT * FROM ${sourceTable.identifier}"),
sql(s"SELECT * FROM ${targetTable.identifier}"))
}
test("create table with the same name as an index table") {
val tabName = "tab1"
val indexName = tabName + "_index"
withTable(tabName) {
// Spark SQL does not support creating index. Thus, we have to use Hive client.
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
sql(s"CREATE TABLE $tabName(a int)")
try {
client.runSqlHive(
s"CREATE INDEX $indexName ON TABLE $tabName (a) AS 'COMPACT' WITH DEFERRED REBUILD")
val indexTabName =
spark.sessionState.catalog.listTables("default", s"*$indexName*").head.table
// Even if index tables exist, listTables and getTable APIs should still work
checkAnswer(
spark.catalog.listTables().toDF(),
Row(indexTabName, "default", null, null, false) ::
Row(tabName, "default", null, "MANAGED", false) :: Nil)
assert(spark.catalog.getTable("default", indexTabName).name === indexTabName)
intercept[TableAlreadyExistsException] {
sql(s"CREATE TABLE $indexTabName(b int) USING hive")
}
intercept[TableAlreadyExistsException] {
sql(s"ALTER TABLE $tabName RENAME TO $indexTabName")
}
// When tableExists is not invoked, we still can get an AnalysisException
val e = intercept[AnalysisException] {
sql(s"DESCRIBE $indexTabName")
}.getMessage
assert(e.contains("Hive index table is not supported."))
} finally {
client.runSqlHive(s"DROP INDEX IF EXISTS $indexName ON $tabName")
}
}
}
test("insert skewed table") {
val tabName = "tab1"
withTable(tabName) {
// Spark SQL does not support creating skewed table. Thus, we have to use Hive client.
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
client.runSqlHive(
s"""
|CREATE Table $tabName(col1 int, col2 int)
|PARTITIONED BY (part1 string, part2 string)
|SKEWED BY (col1) ON (3, 4) STORED AS DIRECTORIES
""".stripMargin)
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
assert(hiveTable.unsupportedFeatures.contains("skewed columns"))
// Call loadDynamicPartitions against a skewed table with enabling list bucketing
sql(
s"""
|INSERT OVERWRITE TABLE $tabName
|PARTITION (part1='a', part2)
|SELECT 3, 4, 'b'
""".stripMargin)
// Call loadPartitions against a skewed table with enabling list bucketing
sql(
s"""
|INSERT INTO TABLE $tabName
|PARTITION (part1='a', part2='b')
|SELECT 1, 2
""".stripMargin)
checkAnswer(
sql(s"SELECT * from $tabName"),
Row(3, 4, "a", "b") :: Row(1, 2, "a", "b") :: Nil)
}
}
test("desc table for data source table - no user-defined schema") {
Seq("parquet", "json", "orc").foreach { fileFormat =>
withTable("t1") {
withTempPath { dir =>
val path = dir.toURI.toString
spark.range(1).write.format(fileFormat).save(path)
sql(s"CREATE TABLE t1 USING $fileFormat OPTIONS (PATH '$path')")
val desc = sql("DESC FORMATTED t1").collect().toSeq
assert(desc.contains(Row("id", "bigint", null)))
}
}
}
}
test("datasource and statistics table property keys are not allowed") {
import org.apache.spark.sql.hive.HiveExternalCatalog.DATASOURCE_PREFIX
import org.apache.spark.sql.hive.HiveExternalCatalog.STATISTICS_PREFIX
withTable("tbl") {
sql("CREATE TABLE tbl(a INT) STORED AS parquet")
Seq(DATASOURCE_PREFIX, STATISTICS_PREFIX).foreach { forbiddenPrefix =>
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${forbiddenPrefix}foo' = 'loser')")
}
assert(e.getMessage.contains(forbiddenPrefix + "foo"))
val e2 = intercept[AnalysisException] {
sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${forbiddenPrefix}foo')")
}
assert(e2.getMessage.contains(forbiddenPrefix + "foo"))
val e3 = intercept[AnalysisException] {
sql(s"CREATE TABLE tbl2 (a INT) TBLPROPERTIES ('${forbiddenPrefix}foo'='anything')")
}
assert(e3.getMessage.contains(forbiddenPrefix + "foo"))
}
}
}
test("truncate table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i, i) }.toDF("width", "length")
// Test both a Hive compatible and incompatible code path.
Seq("json", "parquet").foreach { format =>
withTable("rectangles") {
data.write.format(format).saveAsTable("rectangles")
assume(spark.table("rectangles").collect().nonEmpty,
"bad test; table was empty to begin with")
sql("TRUNCATE TABLE rectangles")
assert(spark.table("rectangles").collect().isEmpty)
// not supported since the table is not partitioned
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE rectangles PARTITION (width=1)")
}
assert(e.message.contains("Operation not allowed"))
}
}
}
test("truncate partitioned table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i % 3, i % 5, i) }.toDF("width", "length", "height")
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// supported since partitions are stored in the metastore
sql("TRUNCATE TABLE partTable PARTITION (width=1, length=1)")
assert(spark.table("partTable").filter($"width" === 1).collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1 && $"length" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// support partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=1)")
assert(spark.table("partTable").collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// do nothing if no partition is matched for the given partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=100)")
assert(spark.table("partTable").count() == data.count())
// throw exception if no partition is matched for the given non-partial partition spec.
intercept[NoSuchPartitionException] {
sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
}
// throw exception if the column in partition spec is not a partition column.
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE partTable PARTITION (unknown=1)")
}
assert(e.message.contains("unknown is not a valid partition column"))
}
}
test("create hive serde table with new syntax") {
withTable("t", "t2", "t3") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) USING hive
|OPTIONS(fileFormat 'orc', compression 'Zlib')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde"))
assert(table.storage.properties.get("compression") == Some("Zlib"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
// Check if this is compressed as ZLIB.
val maybeOrcFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeOrcFile, "orc", "ZLIB")
sql("CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2")
val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t2"))
assert(DDLUtils.isHiveTable(table2))
assert(table2.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
checkAnswer(spark.table("t2"), Row(1, "a"))
sql("CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)")
sql("INSERT INTO t3 PARTITION(p=1) SELECT 0")
checkAnswer(spark.table("t3"), Row(0, 1))
}
}
}
test("create hive serde table with Catalog") {
withTable("t") {
withTempDir { dir =>
val df = spark.catalog.createTable(
"t",
"hive",
new StructType().add("i", "int"),
Map("path" -> dir.getCanonicalPath, "fileFormat" -> "parquet"))
assert(df.collect().isEmpty)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
}
}
}
test("create hive serde table with DataFrameWriter.saveAsTable") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j")
.write.format("hive").option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
Seq("c" -> 1).toDF("i", "j").write.format("hive")
.mode(SaveMode.Overwrite).option("fileFormat", "parquet").saveAsTable("t")
checkAnswer(spark.table("t"), Row("c", 1))
var table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
Seq(9 -> "x").toDF("i", "j")
.write.format("hive").mode(SaveMode.Overwrite).option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(9, "x"))
table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.serde2.avro.AvroSerDe"))
val e2 = intercept[AnalysisException] {
Seq(1 -> "a").toDF("i", "j").write.format("hive").bucketBy(4, "i").saveAsTable("t1")
}
assert(e2.message.contains("Creating bucketed Hive serde table is not supported yet"))
val e3 = intercept[AnalysisException] {
spark.table("t").write.format("hive").mode("overwrite").saveAsTable("t")
}
assert(e3.message.contains("Cannot overwrite table default.t that is also being read from"))
}
}
test("append data to hive serde table") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j")
.write.format("hive").option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
sql("INSERT INTO t SELECT 2, 'b'")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Nil)
Seq(3 -> "c").toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Nil)
Seq(3.5 -> 3).toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c")
:: Row(3, "3") :: Nil)
Seq(4 -> "d").toDF("i", "j").write.saveAsTable("t1")
val e = intercept[AnalysisException] {
Seq(5 -> "e").toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t1")
}
assert(e.message.contains("The format of the existing table default.t1 is "))
assert(e.message.contains("It doesn't match the specified format `HiveFileFormat`."))
}
}
test("create partitioned hive serde table as select") {
withTable("t", "t1") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
Seq(10 -> "y").toDF("i", "j").write.format("hive").partitionBy("i").saveAsTable("t")
checkAnswer(spark.table("t"), Row("y", 10) :: Nil)
Seq((1, 2, 3)).toDF("i", "j", "k").write.mode("overwrite").format("hive")
.partitionBy("j", "k").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, 2, 3) :: Nil)
spark.sql("create table t1 using hive partitioned by (i) as select 1 as i, 'a' as j")
checkAnswer(spark.table("t1"), Row("a", 1) :: Nil)
}
}
}
test("read/write files with hive data source is not allowed") {
withTempDir { dir =>
val e = intercept[AnalysisException] {
spark.read.format("hive").load(dir.getAbsolutePath)
}
assert(e.message.contains("Hive data source can only be used with tables"))
val e2 = intercept[AnalysisException] {
Seq(1 -> "a").toDF("i", "j").write.format("hive").save(dir.getAbsolutePath)
}
assert(e2.message.contains("Hive data source can only be used with tables"))
val e3 = intercept[AnalysisException] {
spark.readStream.format("hive").load(dir.getAbsolutePath)
}
assert(e3.message.contains("Hive data source can only be used with tables"))
val e4 = intercept[AnalysisException] {
spark.readStream.schema(new StructType()).parquet(dir.getAbsolutePath)
.writeStream.format("hive").start(dir.getAbsolutePath)
}
assert(e4.message.contains("Hive data source can only be used with tables"))
}
}
test("partitioned table should always put partition columns at the end of table schema") {
def getTableColumns(tblName: String): Seq[String] = {
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tblName)).schema.map(_.name)
}
val provider = spark.sessionState.conf.defaultDataSourceName
withTable("t", "t1", "t2", "t3", "t4", "t5", "t6") {
sql(s"CREATE TABLE t(a int, b int, c int, d int) USING $provider PARTITIONED BY (d, b)")
assert(getTableColumns("t") == Seq("a", "c", "d", "b"))
sql(s"CREATE TABLE t1 USING $provider PARTITIONED BY (d, b) AS SELECT 1 a, 1 b, 1 c, 1 d")
assert(getTableColumns("t1") == Seq("a", "c", "d", "b"))
Seq((1, 1, 1, 1)).toDF("a", "b", "c", "d").write.partitionBy("d", "b").saveAsTable("t2")
assert(getTableColumns("t2") == Seq("a", "c", "d", "b"))
withTempPath { path =>
val dataPath = new File(new File(path, "d=1"), "b=1").getCanonicalPath
Seq(1 -> 1).toDF("a", "c").write.save(dataPath)
sql(s"CREATE TABLE t3 USING $provider LOCATION '${path.toURI}'")
assert(getTableColumns("t3") == Seq("a", "c", "d", "b"))
}
sql("CREATE TABLE t4(a int, b int, c int, d int) USING hive PARTITIONED BY (d, b)")
assert(getTableColumns("t4") == Seq("a", "c", "d", "b"))
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
sql("CREATE TABLE t5 USING hive PARTITIONED BY (d, b) AS SELECT 1 a, 1 b, 1 c, 1 d")
assert(getTableColumns("t5") == Seq("a", "c", "d", "b"))
Seq((1, 1, 1, 1)).toDF("a", "b", "c", "d").write.format("hive")
.partitionBy("d", "b").saveAsTable("t6")
assert(getTableColumns("t6") == Seq("a", "c", "d", "b"))
}
}
}
test("create hive table with a non-existing location") {
withTable("t", "t1") {
withTempPath { dir =>
spark.sql(s"CREATE TABLE t(a int, b int) USING hive LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t SELECT 1, 2")
assert(dir.exists())
checkAnswer(spark.table("t"), Row(1, 2))
}
// partition table
withTempPath { dir =>
spark.sql(
s"""
|CREATE TABLE t1(a int, b int)
|USING hive
|PARTITIONED BY(a)
|LOCATION '${dir.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t1 PARTITION(a=1) SELECT 2")
val partDir = new File(dir, "a=1")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(2, 1))
}
}
}
Seq(true, false).foreach { shouldDelete =>
val tcName = if (shouldDelete) "non-existing" else "existed"
test(s"CTAS for external hive table with a $tcName location") {
withTable("t", "t1") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t
|USING hive
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
checkAnswer(spark.table("t"), Row(3, 4, 1, 2))
}
// partition table
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t1
|USING hive
|PARTITIONED BY(a, b)
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
val partDir = new File(dir, "a=3")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(1, 2, 3, 4))
}
}
}
}
}
Seq("parquet", "hive").foreach { datasource =>
Seq("a b", "a:b", "a%b", "a,b").foreach { specialChars =>
test(s"partition column name of $datasource table containing $specialChars") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, `$specialChars` string)
|USING $datasource
|PARTITIONED BY(`$specialChars`)
|LOCATION '${dir.toURI}'
""".stripMargin)
assert(dir.listFiles().isEmpty)
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`=2) SELECT 1")
val partEscaped = s"${ExternalCatalogUtils.escapePathName(specialChars)}=2"
val partFile = new File(dir, partEscaped)
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Nil)
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`) SELECT 3, 4")
val partEscaped1 = s"${ExternalCatalogUtils.escapePathName(specialChars)}=4"
val partFile1 = new File(dir, partEscaped1)
assert(partFile1.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Row("3", "4") :: Nil)
}
}
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"hive table: location uri contains $specialChars") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING hive
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
if (specialChars != "a:b") {
spark.sql("INSERT INTO TABLE t SELECT 1")
assert(loc.listFiles().length >= 1)
checkAnswer(spark.table("t"), Row("1") :: Nil)
} else {
val e = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t SELECT 1")
}.getMessage
assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
}
}
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING hive
|PARTITIONED BY(b)
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
if (specialChars != "a:b") {
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
val partFile = new File(loc, "b=2")
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t1"), Row("1", "2") :: Nil)
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
val partFile1 = new File(loc, "b=2017-03-03 12:13%3A14")
assert(!partFile1.exists())
if (!Utils.isWindows) {
// Actual path becomes "b=2017-03-03%2012%3A13%253A14" on Windows.
val partFile2 = new File(loc, "b=2017-03-03 12%3A13%253A14")
assert(partFile2.listFiles().nonEmpty)
checkAnswer(spark.table("t1"),
Row("1", "2") :: Row("1", "2017-03-03 12:13%3A14") :: Nil)
}
} else {
val e = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
}.getMessage
assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
val e1 = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
}.getMessage
assert(e1.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
}
}
}
}
}
test("SPARK-19905: Hive SerDe table input paths") {
withTable("spark_19905") {
withTempView("spark_19905_view") {
spark.range(10).createOrReplaceTempView("spark_19905_view")
sql("CREATE TABLE spark_19905 STORED AS RCFILE AS SELECT * FROM spark_19905_view")
assert(spark.table("spark_19905").inputFiles.nonEmpty)
assert(sql("SELECT input_file_name() FROM spark_19905").count() > 0)
}
}
}
hiveFormats.foreach { tableType =>
test(s"alter hive serde table add columns -- partitioned - $tableType") {
withTable("tab") {
sql(
s"""
|CREATE TABLE tab (c1 int, c2 int)
|PARTITIONED BY (c3 int) STORED AS $tableType
""".stripMargin)
sql("INSERT INTO tab PARTITION (c3=1) VALUES (1, 2)")
sql("ALTER TABLE tab ADD COLUMNS (c4 int)")
checkAnswer(
sql("SELECT * FROM tab WHERE c3 = 1"),
Seq(Row(1, 2, null, 1))
)
assert(spark.table("tab").schema
.contains(StructField("c4", IntegerType)))
sql("INSERT INTO tab PARTITION (c3=2) VALUES (2, 3, 4)")
checkAnswer(
spark.table("tab"),
Seq(Row(1, 2, null, 1), Row(2, 3, 4, 2))
)
checkAnswer(
sql("SELECT * FROM tab WHERE c3 = 2 AND c4 IS NOT NULL"),
Seq(Row(2, 3, 4, 2))
)
sql("ALTER TABLE tab ADD COLUMNS (c5 char(10))")
assert(spark.table("tab").schema.find(_.name == "c5")
.get.metadata.getString("HIVE_TYPE_STRING") == "char(10)")
}
}
}
hiveFormats.foreach { tableType =>
test(s"alter hive serde table add columns -- with predicate - $tableType ") {
withTable("tab") {
sql(s"CREATE TABLE tab (c1 int, c2 int) STORED AS $tableType")
sql("INSERT INTO tab VALUES (1, 2)")
sql("ALTER TABLE tab ADD COLUMNS (c4 int)")
checkAnswer(
sql("SELECT * FROM tab WHERE c4 IS NULL"),
Seq(Row(1, 2, null))
)
assert(spark.table("tab").schema
.contains(StructField("c4", IntegerType)))
sql("INSERT INTO tab VALUES (2, 3, 4)")
checkAnswer(
sql("SELECT * FROM tab WHERE c4 = 4 "),
Seq(Row(2, 3, 4))
)
checkAnswer(
spark.table("tab"),
Seq(Row(1, 2, null), Row(2, 3, 4))
)
}
}
}
Seq(true, false).foreach { caseSensitive =>
test(s"alter add columns with existing column name - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withTable("tab") {
sql("CREATE TABLE tab (c1 int) PARTITIONED BY (c2 int) STORED AS PARQUET")
if (!caseSensitive) {
// duplicating partitioning column name
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C2 string)")
}.getMessage
assert(e1.contains("Found duplicate column(s)"))
// duplicating data column name
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C1 string)")
}.getMessage
assert(e2.contains("Found duplicate column(s)"))
} else {
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C2 string)")
}.getMessage
assert(e1.contains("HiveException"))
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C1 string)")
}.getMessage
assert(e2.contains("HiveException"))
}
}
}
}
}
test("SPARK-21216: join with a streaming DataFrame") {
import org.apache.spark.sql.execution.streaming.MemoryStream
import testImplicits._
implicit val _sqlContext = spark.sqlContext
Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word").createOrReplaceTempView("t1")
// Make a table and ensure it will be broadcast.
sql("""CREATE TABLE smallTable(word string, number int)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|STORED AS TEXTFILE
""".stripMargin)
sql(
"""INSERT INTO smallTable
|SELECT word, number from t1
""".stripMargin)
val inputData = MemoryStream[Int]
val joined = inputData.toDS().toDF()
.join(spark.table("smallTable"), $"value" === $"number")
val sq = joined.writeStream
.format("memory")
.queryName("t2")
.start()
try {
inputData.addData(1, 2)
sq.processAllAvailable()
checkAnswer(
spark.table("t2"),
Seq(Row(1, "one", 1), Row(2, "two", 2))
)
} finally {
sq.stop()
}
}
test("table name with schema") {
// regression test for SPARK-11778
withDatabase("usrdb") {
spark.sql("create schema usrdb")
withTable("usrdb.test") {
spark.sql("create table usrdb.test(c int)")
spark.read.table("usrdb.test")
}
}
}
private def assertCompression(maybeFile: Option[File], format: String, compression: String) = {
assert(maybeFile.isDefined)
val actualCompression = format match {
case "orc" =>
OrcFileOperator.getFileReader(maybeFile.get.toPath.toString).get.getCompression.name
case "parquet" =>
val footer = ParquetFileReader.readFooter(
sparkContext.hadoopConfiguration, new Path(maybeFile.get.getPath), NO_FILTER)
footer.getBlocks.get(0).getColumns.get(0).getCodec.toString
}
assert(compression === actualCompression)
}
Seq(("orc", "ZLIB"), ("parquet", "GZIP")).foreach { case (fileFormat, compression) =>
test(s"SPARK-22158 convertMetastore should not ignore table property - $fileFormat") {
withSQLConf(CONVERT_METASTORE_ORC.key -> "true", CONVERT_METASTORE_PARQUET.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) USING hive
|OPTIONS(fileFormat '$fileFormat', compression '$compression')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains(fileFormat))
assert(table.storage.properties.get("compression") == Some(compression))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeFile, fileFormat, compression)
}
}
}
}
}
private def getReader(path: String): org.apache.orc.Reader = {
val conf = spark.sessionState.newHadoopConf()
val files = org.apache.spark.sql.execution.datasources.orc.OrcUtils.listOrcFiles(path, conf)
assert(files.length == 1)
val file = files.head
val fs = file.getFileSystem(conf)
val readerOptions = org.apache.orc.OrcFile.readerOptions(conf).filesystem(fs)
org.apache.orc.OrcFile.createReader(file, readerOptions)
}
test("SPARK-23355 convertMetastoreOrc should not ignore table properties - STORED AS") {
Seq("native", "hive").foreach { orcImpl =>
withSQLConf(ORC_IMPLEMENTATION.key -> orcImpl, CONVERT_METASTORE_ORC.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) STORED AS ORC
|TBLPROPERTIES (
| orc.compress 'ZLIB',
| orc.compress.size '1001',
| orc.row.index.stride '2002',
| hive.exec.orc.default.block.size '3003',
| hive.exec.orc.compression.strategy 'COMPRESSION')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains("orc"))
val properties = table.properties
assert(properties.get("orc.compress") == Some("ZLIB"))
assert(properties.get("orc.compress.size") == Some("1001"))
assert(properties.get("orc.row.index.stride") == Some("2002"))
assert(properties.get("hive.exec.orc.default.block.size") == Some("3003"))
assert(properties.get("hive.exec.orc.compression.strategy") == Some("COMPRESSION"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
Utils.tryWithResource(getReader(maybeFile.head.getCanonicalPath)) { reader =>
assert(reader.getCompressionKind.name === "ZLIB")
assert(reader.getCompressionSize == 1001)
assert(reader.getRowIndexStride == 2002)
}
}
}
}
}
}
test("SPARK-23355 convertMetastoreParquet should not ignore table properties - STORED AS") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) STORED AS PARQUET
|TBLPROPERTIES (
| parquet.compression 'GZIP'
|)
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains("parquet"))
val properties = table.properties
assert(properties.get("parquet.compression") == Some("GZIP"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeFile, "parquet", "GZIP")
}
}
}
}
test("load command for non local invalid path validation") {
withTable("tbl") {
sql("CREATE TABLE tbl(i INT, j STRING) USING hive")
val e = intercept[AnalysisException](
sql("load data inpath '/doesnotexist.csv' into table tbl"))
assert(e.message.contains("LOAD DATA input path does not exist"))
}
}
test("SPARK-22252: FileFormatWriter should respect the input query schema in HIVE") {
withTable("t1", "t2", "t3", "t4") {
spark.range(1).select($"id" as "col1", $"id" as "col2").write.saveAsTable("t1")
spark.sql("select COL1, COL2 from t1").write.format("hive").saveAsTable("t2")
checkAnswer(spark.table("t2"), Row(0, 0))
// Test picking part of the columns when writing.
spark.range(1).select($"id", $"id" as "col1", $"id" as "col2").write.saveAsTable("t3")
spark.sql("select COL1, COL2 from t3").write.format("hive").saveAsTable("t4")
checkAnswer(spark.table("t4"), Row(0, 0))
}
}
test("SPARK-24812: desc formatted table for last access verification") {
withTable("t1") {
sql(
"CREATE TABLE IF NOT EXISTS t1 (c1_int INT, c2_string STRING, c3_float FLOAT)")
val desc = sql("DESC FORMATTED t1").filter($"col_name".startsWith("Last Access"))
.select("data_type")
// check if the last access time doesn't have the default date of year
// 1970 as its a wrong access time
assert((desc.first.toString.contains("UNKNOWN")))
}
}
test("SPARK-24681 checks if nested column names do not include ',', ':', and ';'") {
val expectedMsg = "Cannot create a table having a nested column whose name contains invalid " +
"characters (',', ':', ';') in Hive metastore."
Seq("nested,column", "nested:column", "nested;column").foreach { nestedColumnName =>
withTable("t") {
val e = intercept[AnalysisException] {
spark.range(1)
.select(struct(lit(0).as(nestedColumnName)).as("toplevel"))
.write
.format("hive")
.saveAsTable("t")
}.getMessage
assert(e.contains(expectedMsg))
}
}
}
test("desc formatted table should also show viewOriginalText for views") {
withView("v1", "v2") {
sql("CREATE VIEW v1 AS SELECT 1 AS value")
assert(sql("DESC FORMATTED v1").collect().containsSlice(
Seq(
Row("Type", "VIEW", ""),
Row("View Text", "SELECT 1 AS value", ""),
Row("View Original Text", "SELECT 1 AS value", "")
)
))
hiveClient.runSqlHive("CREATE VIEW v2 AS SELECT * FROM (SELECT 1) T")
assert(sql("DESC FORMATTED v2").collect().containsSlice(
Seq(
Row("Type", "VIEW", ""),
Row("View Text", "SELECT `t`.`_c0` FROM (SELECT 1) `T`", ""),
Row("View Original Text", "SELECT * FROM (SELECT 1) T", "")
)
))
}
}
test("Hive CTAS can't create partitioned table by specifying schema") {
val err1 = intercept[ParseException] {
spark.sql(
s"""
|CREATE TABLE t (a int)
|PARTITIONED BY (b string)
|STORED AS parquet
|AS SELECT 1 as a, "a" as b
""".stripMargin)
}.getMessage
assert(err1.contains("Schema may not be specified in a Create Table As Select " +
"(CTAS) statement"))
val err2 = intercept[ParseException] {
spark.sql(
s"""
|CREATE TABLE t
|PARTITIONED BY (b string)
|STORED AS parquet
|AS SELECT 1 as a, "a" as b
""".stripMargin)
}.getMessage
assert(err2.contains("Create Partitioned Table As Select cannot specify data type for " +
"the partition columns of the target table"))
}
test("Hive CTAS with dynamic partition") {
Seq("orc", "parquet").foreach { format =>
withTable("t") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
spark.sql(
s"""
|CREATE TABLE t
|PARTITIONED BY (b)
|STORED AS $format
|AS SELECT 1 as a, "a" as b
""".stripMargin)
checkAnswer(spark.table("t"), Row(1, "a"))
assert(spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
.partitionColumnNames === Seq("b"))
}
}
}
}
test("Create Table LIKE STORED AS Hive Format") {
val catalog = spark.sessionState.catalog
withTable("s") {
sql("CREATE TABLE s(a INT, b INT) STORED AS ORC")
hiveFormats.foreach { tableType =>
val expectedSerde = HiveSerDe.sourceToSerDe(tableType)
withTable("t") {
sql(s"CREATE TABLE t LIKE s STORED AS $tableType")
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.provider == Some("hive"))
assert(table.storage.serde == expectedSerde.get.serde)
assert(table.storage.inputFormat == expectedSerde.get.inputFormat)
assert(table.storage.outputFormat == expectedSerde.get.outputFormat)
}
}
}
}
test("Create Table LIKE with specified TBLPROPERTIES") {
val catalog = spark.sessionState.catalog
withTable("s", "t") {
sql("CREATE TABLE s(a INT, b INT) USING hive TBLPROPERTIES('a'='apple')")
val source = catalog.getTableMetadata(TableIdentifier("s"))
assert(source.properties("a") == "apple")
sql("CREATE TABLE t LIKE s STORED AS parquet TBLPROPERTIES('f'='foo', 'b'='bar')")
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.properties.get("a") === None)
assert(table.properties("f") == "foo")
assert(table.properties("b") == "bar")
}
}
test("Create Table LIKE with row format") {
val catalog = spark.sessionState.catalog
withTable("sourceHiveTable", "sourceDsTable", "targetHiveTable1", "targetHiveTable2") {
sql("CREATE TABLE sourceHiveTable(a INT, b INT) STORED AS PARQUET")
sql("CREATE TABLE sourceDsTable(a INT, b INT) USING PARQUET")
// row format doesn't work in create targetDsTable
var e = intercept[AnalysisException] {
spark.sql(
"""
|CREATE TABLE targetDsTable LIKE sourceHiveTable USING PARQUET
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
""".stripMargin)
}.getMessage
assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'"))
// row format doesn't work with provider hive
e = intercept[AnalysisException] {
spark.sql(
"""
|CREATE TABLE targetHiveTable LIKE sourceHiveTable USING hive
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
}.getMessage
assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'"))
// row format doesn't work without 'STORED AS'
e = intercept[AnalysisException] {
spark.sql(
"""
|CREATE TABLE targetDsTable LIKE sourceDsTable
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
}.getMessage
assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'"))
// row format works with STORED AS hive format (from hive table)
spark.sql(
"""
|CREATE TABLE targetHiveTable1 LIKE sourceHiveTable STORED AS PARQUET
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
var table = catalog.getTableMetadata(TableIdentifier("targetHiveTable1"))
assert(table.provider === Some("hive"))
assert(table.storage.inputFormat ===
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.serde === Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
assert(table.storage.properties("test") == "test")
// row format works with STORED AS hive format (from datasource table)
spark.sql(
"""
|CREATE TABLE targetHiveTable2 LIKE sourceDsTable STORED AS PARQUET
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
table = catalog.getTableMetadata(TableIdentifier("targetHiveTable2"))
assert(table.provider === Some("hive"))
assert(table.storage.inputFormat ===
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.serde === Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
assert(table.storage.properties("test") == "test")
}
}
test("SPARK-30098: create table without provider should " +
"use default data source under non-legacy mode") {
val catalog = spark.sessionState.catalog
withSQLConf(
SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key -> "false") {
withTable("s") {
val defaultProvider = conf.defaultDataSourceName
sql("CREATE TABLE s(a INT, b INT)")
val table = catalog.getTableMetadata(TableIdentifier("s"))
assert(table.provider === Some(defaultProvider))
}
}
}
test("SPARK-30098: create table without provider should " +
"use hive under legacy mode") {
val catalog = spark.sessionState.catalog
withSQLConf(
SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key -> "true") {
withTable("s") {
sql("CREATE TABLE s(a INT, b INT)")
val table = catalog.getTableMetadata(TableIdentifier("s"))
assert(table.provider === Some("hive"))
}
}
}
}
| jkbradley/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala | Scala | apache-2.0 | 108,983 |
package com.wavesplatform.it.asset
import com.typesafe.config.Config
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.NodeConfigs.Default
import com.wavesplatform.it.api.LeaseInfo
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.lang.v1.compiler.Terms.CONST_BYTESTR
import com.wavesplatform.lang.v1.estimator.v3.ScriptEstimatorV3
import com.wavesplatform.lang.v1.traits.domain.{Lease, Recipient}
import com.wavesplatform.transaction.TxVersion
import com.wavesplatform.transaction.smart.script.ScriptCompiler
class LeaseActionSuite extends BaseTransactionSuite {
override protected def nodeConfigs: Seq[Config] =
NodeConfigs
.Builder(Default, 2, Seq.empty)
.overrideBase(_.preactivatedFeatures((BlockchainFeatures.SynchronousCalls.id, 1)))
.buildNonConflicting()
private def compile(script: String): String =
ScriptCompiler.compile(script, ScriptEstimatorV3(fixOverflow = true)).explicitGet()._1.bytes().base64
private val dAppLeaseAmount = 123
private val txLeaseAmount = 456
private lazy val dAppAcc = firstKeyPair
private lazy val dAppAddress = firstAddress
private lazy val invoker = secondKeyPair
private lazy val invokerAddress = secondAddress
test("set script") {
val dApp = compile(
s"""
| {-# STDLIB_VERSION 5 #-}
| {-# CONTENT_TYPE DAPP #-}
| {-# SCRIPT_TYPE ACCOUNT #-}
|
| @Callable(i)
| func lease() = {
| [
| Lease(i.caller, $dAppLeaseAmount)
| ]
| }
|
| @Callable(i)
| func leaseCancel(leaseId: ByteVector) = {
| [
| LeaseCancel(leaseId)
| ]
| }
""".stripMargin
)
sender.setScript(dAppAcc, Some(dApp), waitForTx = true)
}
test("active leases") {
val leaseTxId = sender.lease(dAppAcc, invokerAddress, txLeaseAmount, smartMinFee, TxVersion.V2, waitForTx = true).id
val leaseTxHeight = sender.transactionStatus(leaseTxId).height.get
val invokeId = sender.invokeScript(invoker, dAppAddress, Some("lease"), Nil, fee = invokeFee, waitForTx = true)._1.id
val invokeHeight = sender.transactionStatus(invokeId).height.get
val recipient = Recipient.Address(ByteStr.decodeBase58(invokerAddress).get)
val leaseActionId = Lease.calculateId(Lease(recipient, dAppLeaseAmount, 0), ByteStr.decodeBase58(invokeId).get).toString
sender.activeLeases(dAppAddress) should contain theSameElementsAs Seq(
LeaseInfo(leaseTxId, leaseTxId, dAppAddress, invokerAddress, txLeaseAmount, leaseTxHeight),
LeaseInfo(leaseActionId, invokeId, dAppAddress, invokerAddress, dAppLeaseAmount, invokeHeight)
)
val leaseTxIdParam = List(CONST_BYTESTR(ByteStr.decodeBase58(leaseTxId).get).explicitGet())
sender.invokeScript(dAppAcc, dAppAddress, Some("leaseCancel"), leaseTxIdParam, fee = invokeFee, waitForTx = true)
sender.activeLeases(dAppAddress) shouldBe Seq(
LeaseInfo(leaseActionId, invokeId, dAppAddress, invokerAddress, dAppLeaseAmount, invokeHeight)
)
}
}
| wavesplatform/Waves | node-it/src/test/scala/com/wavesplatform/it/asset/LeaseActionSuite.scala | Scala | mit | 3,362 |
package com.sfxcode.sapphire.extension.concurrent.akka
import java.util.concurrent.{ ExecutorService, ThreadFactory }
import akka.dispatch.{ DispatcherPrerequisites, ExecutorServiceConfigurator, ExecutorServiceFactory }
import com.typesafe.config.Config
class JavaFXEventThreadExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites)
extends ExecutorServiceConfigurator(config, prerequisites) {
private val f = new ExecutorServiceFactory {
def createExecutorService: ExecutorService = JavaFXExecutorService
}
def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = f
}
| sfxcode/sapphire-extension | src/main/scala/com/sfxcode/sapphire/extension/concurrent/akka/JavaFXEventThreadExecutorServiceConfigurator.scala | Scala | apache-2.0 | 657 |
package com.redis.serialization
object Format {
def apply(f: PartialFunction[Any, Any]): Format = new Format(f)
implicit val default: Format = new Format(Map.empty)
def formatDouble(d: Double, inclusive: Boolean = true) =
(if (inclusive) ("") else ("(")) + {
if (d.isInfinity) {
if (d > 0.0) "+inf" else "-inf"
} else {
d.toString
}
}
}
class Format(val format: PartialFunction[Any, Any]) {
def apply(in: Any): Array[Byte] =
(if (format.isDefinedAt(in)) (format(in)) else (in)) match {
case b: Array[Byte] => b
case d: Double => Format.formatDouble(d, true).getBytes("UTF-8")
case x => x.toString.getBytes("UTF-8")
}
def orElse(that: Format): Format = Format(format orElse that.format)
def orElse(that: PartialFunction[Any, Any]): Format = Format(format orElse that)
}
object Parse {
def apply[T](f: (Array[Byte]) => T) = new Parse[T](f)
object Implicits {
implicit val parseString = Parse[String](new String(_, "UTF-8"))
implicit val parseByteArray = Parse[Array[Byte]](x => x)
implicit val parseInt = Parse[Int](new String(_, "UTF-8").toInt)
implicit val parseLong = Parse[Long](new String(_, "UTF-8").toLong)
implicit val parseDouble = Parse[Double](new String(_, "UTF-8").toDouble)
}
implicit val parseDefault = Parse[String](new String(_, "UTF-8"))
val parseStringSafe = Parse[String](xs => new String(xs.iterator.flatMap{
case x if x > 31 && x < 127 => Iterator.single(x.toChar)
case 10 => "\\\\n".iterator
case 13 => "\\\\r".iterator
case x => "\\\\x%02x".format(x).iterator
}.toArray))
}
class Parse[A](val f: (Array[Byte]) => A) extends Function1[Array[Byte], A] {
def apply(in: Array[Byte]): A = f(in)
}
| Tjoene/thesis | Case_Programs/scala-redis-2.9-pre-scala-2.10/src/main/scala/com/redis/Serialization.scala | Scala | gpl-2.0 | 1,750 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.schema
import scala.collection.JavaConverters._
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.execution.command.{AlterTableDataTypeChangeModel, RunnableCommand}
import org.apache.spark.sql.hive.{CarbonRelation, CarbonSessionState}
import org.apache.spark.util.AlterTableUtil
import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
import org.apache.carbondata.core.locks.{ICarbonLock, LockUsage}
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.path.CarbonStorePath
import org.apache.carbondata.events.{AlterTableAddColumnPreEvent, AlterTableDataTypeChangePreEvent, OperationListenerBus}
import org.apache.carbondata.format.{ColumnSchema, SchemaEvolutionEntry, TableInfo}
import org.apache.carbondata.spark.util.{CarbonScalaUtil, DataTypeConverterUtil}
private[sql] case class CarbonAlterTableDataTypeChangeCommand(
alterTableDataTypeChangeModel: AlterTableDataTypeChangeModel)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val LOGGER: LogService = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
val tableName = alterTableDataTypeChangeModel.tableName
val dbName = alterTableDataTypeChangeModel.databaseName
.getOrElse(sparkSession.catalog.currentDatabase)
LOGGER.audit(s"Alter table change data type request has been received for $dbName.$tableName")
val locksToBeAcquired = List(LockUsage.METADATA_LOCK, LockUsage.COMPACTION_LOCK)
var locks = List.empty[ICarbonLock]
// get the latest carbon table and check for column existence
var carbonTable: CarbonTable = null
var timeStamp = 0L
try {
locks = AlterTableUtil
.validateTableAndAcquireLock(dbName, tableName, locksToBeAcquired)(sparkSession)
val metastore = CarbonEnv.getInstance(sparkSession).carbonMetastore
carbonTable = metastore
.lookupRelation(Some(dbName), tableName)(sparkSession).asInstanceOf[CarbonRelation]
.tableMeta.carbonTable
val alterTableDataTypeChangeListener = AlterTableDataTypeChangePreEvent(carbonTable,
alterTableDataTypeChangeModel)
OperationListenerBus.getInstance().fireEvent(alterTableDataTypeChangeListener)
val columnName = alterTableDataTypeChangeModel.columnName
val carbonColumns = carbonTable.getCreateOrderColumn(tableName).asScala.filter(!_.isInvisible)
if (!carbonColumns.exists(_.getColName.equalsIgnoreCase(columnName))) {
LOGGER.audit(s"Alter table change data type request has failed. " +
s"Column $columnName does not exist")
sys.error(s"Column does not exist: $columnName")
}
val carbonColumn = carbonColumns.filter(_.getColName.equalsIgnoreCase(columnName))
if (carbonColumn.size == 1) {
CarbonScalaUtil
.validateColumnDataType(alterTableDataTypeChangeModel.dataTypeInfo, carbonColumn.head)
} else {
LOGGER.audit(s"Alter table change data type request has failed. " +
s"Column $columnName is invalid")
sys.error(s"Invalid Column: $columnName")
}
// read the latest schema file
val carbonTablePath = CarbonStorePath.getCarbonTablePath(carbonTable.getStorePath,
carbonTable.getCarbonTableIdentifier)
val tableInfo: TableInfo = metastore.getThriftTableInfo(carbonTablePath)(sparkSession)
// maintain the added column for schema evolution history
var addColumnSchema: ColumnSchema = null
var deletedColumnSchema: ColumnSchema = null
val columnSchemaList = tableInfo.fact_table.table_columns.asScala.filter(!_.isInvisible)
columnSchemaList.foreach { columnSchema =>
if (columnSchema.column_name.equalsIgnoreCase(columnName)) {
deletedColumnSchema = columnSchema.deepCopy
columnSchema.setData_type(DataTypeConverterUtil
.convertToThriftDataType(alterTableDataTypeChangeModel.dataTypeInfo.dataType))
columnSchema.setPrecision(alterTableDataTypeChangeModel.dataTypeInfo.precision)
columnSchema.setScale(alterTableDataTypeChangeModel.dataTypeInfo.scale)
addColumnSchema = columnSchema
}
}
timeStamp = System.currentTimeMillis
val schemaEvolutionEntry = new SchemaEvolutionEntry(timeStamp)
schemaEvolutionEntry.setAdded(List(addColumnSchema).asJava)
schemaEvolutionEntry.setRemoved(List(deletedColumnSchema).asJava)
tableInfo.getFact_table.getSchema_evolution.getSchema_evolution_history.get(0)
.setTime_stamp(System.currentTimeMillis)
AlterTableUtil
.updateSchemaInfo(carbonTable,
schemaEvolutionEntry,
tableInfo)(sparkSession,
sparkSession.sessionState.asInstanceOf[CarbonSessionState])
LOGGER.info(s"Alter table for data type change is successful for table $dbName.$tableName")
LOGGER.audit(s"Alter table for data type change is successful for table $dbName.$tableName")
} catch {
case e: Exception => LOGGER
.error("Alter table change datatype failed : " + e.getMessage)
if (carbonTable != null) {
AlterTableUtil.revertDataTypeChanges(dbName, tableName, timeStamp)(sparkSession)
}
sys.error(s"Alter table data type change operation failed: ${e.getMessage}")
} finally {
// release lock after command execution completion
AlterTableUtil.releaseLocks(locks)
}
Seq.empty
}
}
| HuaweiBigData/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDataTypeChangeCommand.scala | Scala | apache-2.0 | 6,360 |
package mr.merc.ai
import mr.merc.map.hex.TerrainHex
import mr.merc.map.terrain.TerrainType._
import mr.merc.unit.Soldier
import mr.merc.players.Player
import org.scalatest._
import mr.merc.unit.AttackAttribute._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class AttackResultPredictorTest extends AnyFunSuite with Matchers {
import mr.merc.unit.TestUtil._
val attackerHex = new TerrainHex(0, 0, DesertSand)
val defenderHex = new TerrainHex(0, 1, DesertSand)
test("simple prediction") {
val attacker = new Soldier("1", soldierType(10, 60, 10, 1), Player("1"))
val defender = new Soldier("2", soldierType(10, 40, 10, 1), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies === 0.6)
assert(pred.attackerDies === 0.4 * 0.4)
assert(pred.expectedDamageToAttacker === 0.4 * 0.4 * 10)
assert(pred.expectedDamageToDefender === 0.6 * 10)
}
test("complex prediction") {
val attacker = new Soldier("1", soldierType(10, 60, 5, 2), Player("1"))
val defender = new Soldier("2", soldierType(10, 40, 5, 2), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies === 0.6 * 0.6)
assert(pred.attackerDies === 0.4 * 0.4 * (1 - 0.6 * 0.6))
pred.expectedDamageToAttacker should be(((1 - 0.6 * 0.6) * 10 * 0.4 + 0.6 * 0.6 * 5 * 0.4) +- 0.01)
pred.expectedDamageToDefender should be(6d +- 0.01)
}
test("prediction with defender without attack") {
val attacker = new Soldier("1", soldierType(10, 40, 5, 2), Player("1"))
val defender = new Soldier("2", soldierType(10, 60, 10, 1), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), None)
assert(pred.attackerDies === 0)
assert(pred.defenderDies === 0.4 * 0.4)
assert(pred.expectedDamageToAttacker === 0)
assert(pred.expectedDamageToDefender === 4)
}
test("prediction with defender first strike") {
val attacker = new Soldier("1", soldierType(10, 40, 10, 1), Player("1"))
val defender = new Soldier("2", soldierType(10, 60, 10, 1, Set(Firststrike)), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.attackerDies === 0.6)
assert(pred.defenderDies === 0.4 * 0.4)
assert(pred.expectedDamageToDefender === 0.4 * 0.4 * 10)
assert(pred.expectedDamageToAttacker === 0.6 * 10)
}
test("prediction with attacker first strike") {
val attacker = new Soldier("1", soldierType(10, 60, 10, 1, Set(Firststrike)), Player("1"))
val defender = new Soldier("2", soldierType(10, 40, 10, 1), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies === 0.6)
assert(pred.attackerDies === 0.4 * 0.4)
assert(pred.expectedDamageToAttacker === 0.4 * 0.4 * 10)
assert(pred.expectedDamageToDefender === 0.6 * 10)
}
test("prediction with attacker berserk") {
val attacker = new Soldier("1", soldierType(10, 60, 10, 1, Set(Berserk)), Player("1"))
val defender = new Soldier("2", soldierType(10, 40, 0, 1), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies > 0.9)
assert(pred.attackerDies === 0)
assert(pred.expectedDamageToAttacker === 0)
assert(pred.expectedDamageToDefender > 9)
}
test("prediction with defender berserk") {
val attacker = new Soldier("1", soldierType(10, 60, 10, 1), Player("1"))
val defender = new Soldier("2", soldierType(10, 40, 0, 1, Set(Berserk)), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies > 0.9)
assert(pred.attackerDies === 0)
assert(pred.expectedDamageToAttacker === 0)
assert(pred.expectedDamageToDefender > 9)
}
test("prediction with attacker who drains") {
val attacker = new Soldier("1", soldierType(10, 50, 10, 1, Set(Drain)), Player("1"))
val defender = new Soldier("2", soldierType(10, 50, 10, 1), Player("2"))
attacker.hp = 5
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies === 0.5)
assert(pred.attackerDies === 0.5 * 0.5)
assert(pred.expectedDamageToAttacker === 0.5 * 0.5 * 5 - 0.5 * 5)
assert(pred.expectedDamageToDefender === 10 / 2)
}
test("prediction with defender who drains") {
val attacker = new Soldier("1", soldierType(10, 50, 5, 1), Player("1"))
val defender = new Soldier("2", soldierType(10, 50, 10, 1, Set(Drain)), Player("2"))
val pred = AttackResultPredictor.predictResult(attacker, defender, attackerHex, defenderHex, attacker.soldierType.attacks(0), Some(defender.soldierType.attacks(0)))
assert(pred.defenderDies === 0)
assert(pred.attackerDies === 0.5)
assert(pred.expectedDamageToAttacker === 5)
assert(pred.expectedDamageToDefender === (0.5 * 5) - (0.5 * 0.5 * 10 / 2))
}
} | RenualdMarch/merc | src/test/scala/mr/merc/ai/AttackResultPredictorTest.scala | Scala | gpl-3.0 | 5,713 |
package com.twitter.finagle.stats
import com.twitter.ostrich.Stats
class OstrichStatsReceiver extends StatsReceiverWithCumulativeGauges {
val repr: AnyRef = Stats
protected[this] def registerGauge(name: Seq[String], f: => Float) {
Stats.makeGauge(variableName(name)) { f }
}
protected[this] def deregisterGauge(name: Seq[String]) {
Stats.clearGauge(variableName(name))
}
def counter(name: String*) = new Counter {
private[this] val name_ = variableName(name)
def incr(delta: Int) { Stats.incr(name_, delta) }
}
def stat(name: String*) = new Stat {
private[this] val name_ = variableName(name)
def add(value: Float) {
Stats.addTiming(name_, value.toInt)
}
}
private[this] def variableName(name: Seq[String]) = name mkString "/"
}
| enachb/finagle_2.9_durgh | finagle-ostrich/src/main/scala/com/twitter/finagle/stats/OstrichStatsReceiver.scala | Scala | apache-2.0 | 792 |
package me.lachlanap.toc4pdf.proc
import org.apache.pdfbox.pdmodel.PDDocument
import org.apache.pdfbox.pdmodel.interactive.action.`type`._
import org.apache.pdfbox.pdmodel.interactive.documentnavigation.destination._
import org.apache.pdfbox.pdmodel.interactive.documentnavigation.outline.PDOutlineItem
import me.lachlanap.toc4pdf.model._
class BookmarkParser(document: PDDocument) {
def bookmarks: Outline = {
val outline = document.getDocumentCatalog().getDocumentOutline()
Outline(outlineItemsAsStream(outline.getFirstChild).map { item => processItem(item) })
}
private def outlineItemsAsStream(item: PDOutlineItem): Stream[PDOutlineItem] = {
if (item == null)
Stream.empty
else
item #:: outlineItemsAsStream(item.getNextSibling)
}
private def processItem(item: PDOutlineItem, depth: String = ""): Bookmark = {
val children = outlineItemsAsStream(item.getFirstChild).map(processItem(_))
item.getAction match {
case actionLaunch: PDActionLaunch =>
ExternalBookmark(item.getTitle, children, actionLaunch.getFile.getFile)
case actionGoto: PDActionGoTo =>
InternalBookmark(item.getTitle, children, pageNumber(actionGoto), zoom(actionGoto))
}
}
private def pageNumber(actionGoto: PDActionGoTo): Int = {
actionGoto.getDestination match {
case pageDestination: PDPageDestination => pageNumber(pageDestination)
case _ => 0
}
}
private def pageNumber(pageDestination: PDPageDestination) =
if (pageDestination.getPageNumber < 0)
pageDestination.findPageNumber
else
pageDestination.getPageNumber
private def zoom(actionGoto: PDActionGoTo): Zoom = {
actionGoto.getDestination match {
case pageDestination: PDPageDestination => zoom(pageDestination)
case _ => ZoomFitWidth
}
}
private def zoom(destination: PDPageDestination) = destination match {
case _: PDPageFitWidthDestination => ZoomFitWidth
case _: PDPageFitHeightDestination => ZoomFitHeight
case _: PDPageFitRectangleDestination => ZoomFitRectangle
case _: PDPageFitDestination => ZoomFitPage
case _: PDPageXYZDestination => ZoomAbsolute
case _ => ZoomOther
}
}
| thorinii/toc4pdf | src/main/scala/me/lachlanap/toc4pdf/proc/BookmarkParser.scala | Scala | mit | 2,321 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* This aims to handle a nested column aliasing pattern inside the `ColumnPruning` optimizer rule.
* If a project or its child references to nested fields, and not all the fields
* in a nested attribute are used, we can substitute them by alias attributes; then a project
* of the nested fields as aliases on the children of the child will be created.
*/
object NestedColumnAliasing {
def unapply(plan: LogicalPlan): Option[LogicalPlan] = plan match {
/**
* This pattern is needed to support [[Filter]] plan cases like
* [[Project]]->[[Filter]]->listed plan in `canProjectPushThrough` (e.g., [[Window]]).
* The reason why we don't simply add [[Filter]] in `canProjectPushThrough` is that
* the optimizer can hit an infinite loop during the [[PushDownPredicates]] rule.
*/
case Project(projectList, Filter(condition, child))
if SQLConf.get.nestedSchemaPruningEnabled && canProjectPushThrough(child) =>
val exprCandidatesToPrune = projectList ++ Seq(condition) ++ child.expressions
getAliasSubMap(exprCandidatesToPrune, child.producedAttributes.toSeq).map {
case (nestedFieldToAlias, attrToAliases) =>
NestedColumnAliasing.replaceToAliases(plan, nestedFieldToAlias, attrToAliases)
}
case Project(projectList, child)
if SQLConf.get.nestedSchemaPruningEnabled && canProjectPushThrough(child) =>
val exprCandidatesToPrune = projectList ++ child.expressions
getAliasSubMap(exprCandidatesToPrune, child.producedAttributes.toSeq).map {
case (nestedFieldToAlias, attrToAliases) =>
NestedColumnAliasing.replaceToAliases(plan, nestedFieldToAlias, attrToAliases)
}
case p if SQLConf.get.nestedSchemaPruningEnabled && canPruneOn(p) =>
val exprCandidatesToPrune = p.expressions
getAliasSubMap(exprCandidatesToPrune, p.producedAttributes.toSeq).map {
case (nestedFieldToAlias, attrToAliases) =>
NestedColumnAliasing.replaceToAliases(p, nestedFieldToAlias, attrToAliases)
}
case _ => None
}
/**
* Replace nested columns to prune unused nested columns later.
*/
private def replaceToAliases(
plan: LogicalPlan,
nestedFieldToAlias: Map[ExtractValue, Alias],
attrToAliases: Map[ExprId, Seq[Alias]]): LogicalPlan = plan match {
case Project(projectList, child) =>
Project(
getNewProjectList(projectList, nestedFieldToAlias),
replaceWithAliases(child, nestedFieldToAlias, attrToAliases))
// The operators reaching here was already guarded by `canPruneOn`.
case other =>
replaceWithAliases(other, nestedFieldToAlias, attrToAliases)
}
/**
* Return a replaced project list.
*/
def getNewProjectList(
projectList: Seq[NamedExpression],
nestedFieldToAlias: Map[ExtractValue, Alias]): Seq[NamedExpression] = {
projectList.map(_.transform {
case f: ExtractValue if nestedFieldToAlias.contains(f) =>
nestedFieldToAlias(f).toAttribute
}.asInstanceOf[NamedExpression])
}
/**
* Return a plan with new children replaced with aliases, and expressions replaced with
* aliased attributes.
*/
def replaceWithAliases(
plan: LogicalPlan,
nestedFieldToAlias: Map[ExtractValue, Alias],
attrToAliases: Map[ExprId, Seq[Alias]]): LogicalPlan = {
plan.withNewChildren(plan.children.map { plan =>
Project(plan.output.flatMap(a => attrToAliases.getOrElse(a.exprId, Seq(a))), plan)
}).transformExpressions {
case f: ExtractValue if nestedFieldToAlias.contains(f) =>
nestedFieldToAlias(f).toAttribute
}
}
/**
* Returns true for those operators that we can prune nested column on it.
*/
private def canPruneOn(plan: LogicalPlan) = plan match {
case _: Aggregate => true
case _: Expand => true
case _ => false
}
/**
* Returns true for those operators that project can be pushed through.
*/
private def canProjectPushThrough(plan: LogicalPlan) = plan match {
case _: GlobalLimit => true
case _: LocalLimit => true
case _: Repartition => true
case _: Sample => true
case _: RepartitionByExpression => true
case _: Join => true
case _: Window => true
case _: Sort => true
case _ => false
}
/**
* Return root references that are individually accessed as a whole, and `GetStructField`s
* or `GetArrayStructField`s which on top of other `ExtractValue`s or special expressions.
* Check `SelectedField` to see which expressions should be listed here.
*/
private def collectRootReferenceAndExtractValue(e: Expression): Seq[Expression] = e match {
case _: AttributeReference => Seq(e)
case GetStructField(_: ExtractValue | _: AttributeReference, _, _) => Seq(e)
case GetArrayStructFields(_: MapValues |
_: MapKeys |
_: ExtractValue |
_: AttributeReference, _, _, _, _) => Seq(e)
case es if es.children.nonEmpty => es.children.flatMap(collectRootReferenceAndExtractValue)
case _ => Seq.empty
}
/**
* Return two maps in order to replace nested fields to aliases.
*
* If `exclusiveAttrs` is given, any nested field accessors of these attributes
* won't be considered in nested fields aliasing.
*
* 1. ExtractValue -> Alias: A new alias is created for each nested field.
* 2. ExprId -> Seq[Alias]: A reference attribute has multiple aliases pointing it.
*/
def getAliasSubMap(exprList: Seq[Expression], exclusiveAttrs: Seq[Attribute] = Seq.empty)
: Option[(Map[ExtractValue, Alias], Map[ExprId, Seq[Alias]])] = {
val (nestedFieldReferences, otherRootReferences) =
exprList.flatMap(collectRootReferenceAndExtractValue).partition {
case _: ExtractValue => true
case _ => false
}
// Note that when we group by extractors with their references, we should remove
// cosmetic variations.
val exclusiveAttrSet = AttributeSet(exclusiveAttrs ++ otherRootReferences)
val aliasSub = nestedFieldReferences.asInstanceOf[Seq[ExtractValue]]
.filter(!_.references.subsetOf(exclusiveAttrSet))
.groupBy(_.references.head.canonicalized.asInstanceOf[Attribute])
.flatMap { case (attr, nestedFields: Seq[ExtractValue]) =>
// Remove redundant `ExtractValue`s if they share the same parent nest field.
// For example, when `a.b` and `a.b.c` are in project list, we only need to alias `a.b`.
// We only need to deal with two `ExtractValue`: `GetArrayStructFields` and
// `GetStructField`. Please refer to the method `collectRootReferenceAndExtractValue`.
val dedupNestedFields = nestedFields.filter {
case e @ (_: GetStructField | _: GetArrayStructFields) =>
val child = e.children.head
nestedFields.forall(f => child.find(_.semanticEquals(f)).isEmpty)
case _ => true
}
// Each expression can contain multiple nested fields.
// Note that we keep the original names to deliver to parquet in a case-sensitive way.
val nestedFieldToAlias = dedupNestedFields.distinct.map { f =>
val exprId = NamedExpression.newExprId
(f, Alias(f, s"_gen_alias_${exprId.id}")(exprId, Seq.empty, None))
}
// If all nested fields of `attr` are used, we don't need to introduce new aliases.
// By default, ColumnPruning rule uses `attr` already.
// Note that we need to remove cosmetic variations first, so we only count a
// nested field once.
if (nestedFieldToAlias.nonEmpty &&
dedupNestedFields.map(_.canonicalized)
.distinct
.map { nestedField => totalFieldNum(nestedField.dataType) }
.sum < totalFieldNum(attr.dataType)) {
Some(attr.exprId -> nestedFieldToAlias)
} else {
None
}
}
if (aliasSub.isEmpty) {
None
} else {
Some((aliasSub.values.flatten.toMap, aliasSub.map(x => (x._1, x._2.map(_._2)))))
}
}
/**
* Return total number of fields of this type. This is used as a threshold to use nested column
* pruning. It's okay to underestimate. If the number of reference is bigger than this, the parent
* reference is used instead of nested field references.
*/
private def totalFieldNum(dataType: DataType): Int = dataType match {
case _: AtomicType => 1
case StructType(fields) => fields.map(f => totalFieldNum(f.dataType)).sum
case ArrayType(elementType, _) => totalFieldNum(elementType)
case MapType(keyType, valueType, _) => totalFieldNum(keyType) + totalFieldNum(valueType)
case _ => 1 // UDT and others
}
}
/**
* This prunes unnecessary nested columns from `Generate` and optional `Project` on top
* of it.
*/
object GeneratorNestedColumnAliasing {
def unapply(plan: LogicalPlan): Option[LogicalPlan] = plan match {
// Either `nestedPruningOnExpressions` or `nestedSchemaPruningEnabled` is enabled, we
// need to prune nested columns through Project and under Generate. The difference is
// when `nestedSchemaPruningEnabled` is on, nested columns will be pruned further at
// file format readers if it is supported.
case Project(projectList, g: Generate) if (SQLConf.get.nestedPruningOnExpressions ||
SQLConf.get.nestedSchemaPruningEnabled) && canPruneGenerator(g.generator) =>
// On top on `Generate`, a `Project` that might have nested column accessors.
// We try to get alias maps for both project list and generator's children expressions.
val exprsToPrune = projectList ++ g.generator.children
NestedColumnAliasing.getAliasSubMap(exprsToPrune, g.qualifiedGeneratorOutput).map {
case (nestedFieldToAlias, attrToAliases) =>
// Defer updating `Generate.unrequiredChildIndex` to next round of `ColumnPruning`.
val newChild =
NestedColumnAliasing.replaceWithAliases(g, nestedFieldToAlias, attrToAliases)
Project(NestedColumnAliasing.getNewProjectList(projectList, nestedFieldToAlias), newChild)
}
case g: Generate if SQLConf.get.nestedSchemaPruningEnabled &&
canPruneGenerator(g.generator) =>
// If any child output is required by higher projection, we cannot prune on it even we
// only use part of nested column of it. A required child output means it is referred
// as a whole or partially by higher projection, pruning it here will cause unresolved
// query plan.
NestedColumnAliasing.getAliasSubMap(
g.generator.children, g.requiredChildOutput).map {
case (nestedFieldToAlias, attrToAliases) =>
// Defer updating `Generate.unrequiredChildIndex` to next round of `ColumnPruning`.
NestedColumnAliasing.replaceWithAliases(g, nestedFieldToAlias, attrToAliases)
}
case _ =>
None
}
/**
* This is a while-list for pruning nested fields at `Generator`.
*/
def canPruneGenerator(g: Generator): Boolean = g match {
case _: Explode => true
case _: Stack => true
case _: PosExplode => true
case _: Inline => true
case _ => false
}
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/NestedColumnAliasing.scala | Scala | apache-2.0 | 12,226 |
package main.scala
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.streaming.twitter._
import org.apache.spark.streaming._
import org.apache.spark.SparkContext._
import twitter4j.Status
object TweetMain {
def main(args: Array[String]) = {
System.setProperty("twitter4j.oauth.consumerKey", "")
System.setProperty("twitter4j.oauth.consumerSecret", "")
System.setProperty("twitter4j.oauth.accessToken", "")
System.setProperty("twitter4j.oauth.accessTokenSecret", "")
val conf = new SparkConf().setAppName("Hash Tags")
val sc = new SparkContext(conf)
val stopWords = sc.textFile("stopWords")
val stopWordSet = stopWords.collect.toSet
val ssc = new StreamingContext(sc, Seconds(5))
ssc.checkpoint("StreamingCheckpoint")
val twitterStream = TwitterUtils.createStream(ssc, None)
val topHashTags = twitterStream.flatMap(x => getHashTags(x))
val topUsername = twitterStream.flatMap(x => getUserName(x))
val popularText = twitterStream.flatMap(tweet => tweet.getText.split("\\\\W+"))
.filter(x => x.length() > 1)
.filter(word => !stopWordSet.contains(word))
.map(_.replaceAll("#|@", ""))
.map(x => x.toLowerCase())
topHashTags.saveAsTextFiles("twitterData/hash", "a")
topHashTags.print
topUsername.saveAsTextFiles("twitterData/user", "a")
topUsername.print
popularText.print
popularText.saveAsTextFiles("twitterData/text", "a")
ssc.start
ssc.awaitTermination
}
def getHashTags(tweet: Status) = {
raw"(?:(?<=\\s)|^)#(\\w*[A-Za-z_]+\\w*)".r
.findAllIn(tweet.getText).matchData.toList
}
def getUserName(tweet: Status) = {
raw"(?:(?<=\\s)|^)@(\\w*[A-Za-z_]+\\w*)".r
.findAllIn(tweet.getText).matchData.toList
}
}
| CoE4BD/TwitterTextAnalytics-SparkStreaming | TweetMain.scala | Scala | apache-2.0 | 1,793 |
package ict.spark.mllib.optimization
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.{ Vectors, Vector }
import ict.spark.mllib.linalg.MyBLAS._
import scala.collection.mutable.ArrayBuffer
trait MSVMOptimizer extends Serializable {
def optimize(data: RDD[(Double, Vector)], initialWeights: Array[Vector]): Array[Vector]
}
/**
* ImprovedGradientDescent
* Class used to solve an optimization problem using Gradient Descent.
* @param gradient Gradient function to be used.
* @param updater Updater to be used to update weights after every iteration.
*/
class MSVMGGradientDescent private[mllib] (
private var gradient: MSVMGradient,
private var updater: MSVMUpdater)
extends MSVMOptimizer with Logging {
private var numClass: Int = 3
private var stepSize: Double = 1.0
private var numIterations: Int = 100
private var regParam: Double = 0.0
private var miniBatchFraction: Double = 1.0
def setNumClass(numClass: Int): this.type = {
this.numClass = numClass
this
}
/**
* Set the initial step size of SGD for the first step. Default 1.0.
* In subsequent steps, the step size will decrease with stepSize/sqrt(t)
*/
def setStepSize(step: Double): this.type = {
this.stepSize = step
this
}
/**
* Set fraction of data to be used for each SGD iteration.
* Default 1.0 (corresponding to deterministic/classical gradient descent)
*/
def setMiniBatchFraction(fraction: Double): this.type = {
this.miniBatchFraction = fraction
this
}
/**
* Set the number of iterations for SGD. Default 100.
*/
def setNumIterations(iters: Int): this.type = {
this.numIterations = iters
this
}
/**
* Set the regularization parameter. Default 0.0.
*/
def setRegParam(regParam: Double): this.type = {
this.regParam = regParam
this
}
/**
* Set the gradient function (of the loss function of one single data example)
* to be used for SGD.
*/
def setGradient(gradient: MSVMGradient): this.type = {
this.gradient = gradient
this
}
/**
* Set the updater function to actually perform a gradient step in a given direction.
* The updater is responsible to perform the update from the regularization term as well,
* and therefore determines what kind or regularization is used, if any.
*/
def setUpdater(updater: MSVMUpdater): this.type = {
this.updater = updater
this
}
/**
* Runs gradient descent on the given training data.
* @param data training data
* @param initialWeights initial weights
* @return solution vector
*/
def optimize(data: RDD[(Double, Vector)], initialWeights: Array[Vector]): Array[Vector] = {
val weights = MSVMGGradientDescent.runMiniBatchSGD(
data,
gradient,
updater,
numClass,
stepSize,
numIterations,
regParam,
miniBatchFraction,
initialWeights)
weights
}
}
/**
* Top-level method to run gradient descent.
*/
object MSVMGGradientDescent extends Logging {
/**
* Run stochastic gradient descent (SGD) in parallel using mini batches.
* In each iteration, we sample a subset (fraction miniBatchFraction) of the total data
* in order to compute a gradient estimate.
* Sampling, and averaging the subgradients over this subset is performed using one standard
* spark map-reduce in each iteration.
*
* @param data - Input data for SGD. RDD of the set of data examples, each of
* the form (label, [feature values]).
* @param gradient - Gradient object (used to compute the gradient of the loss function of
* one single data example)
* @param updater - Updater function to actually perform a gradient step in a given direction.
* @param stepSize - initial step size for the first step
* @param numIterations - number of iterations that SGD should be run.
* @param regParam - regularization parameter
* @param miniBatchFraction - fraction of the input data set that should be used for
* one iteration of SGD. Default value 1.0.
*
* @return A tuple containing two elements. The first element is a column matrix containing
* weights for every feature, and the second element is an array containing the
* stochastic loss computed for every iteration.
*/
def runMiniBatchSGD(
data: RDD[(Double, Vector)],
gradient: MSVMGradient,
updater: MSVMUpdater,
numClass: Int,
stepSize: Double,
numIterations: Int,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Array[Vector]): Array[Vector] = {
val stochasticLossHistory = new ArrayBuffer[Double](numIterations)
val numExamples = data.count()
val miniBatchSize = numExamples * miniBatchFraction
// if no data, return initial weights to avoid NaNs
if (numExamples == 0) {
logInfo("ImprovedGradientDescent.runMiniBatchSGD returning initial weights, no data found")
return initialWeights
}
// Initialize weights as a column vector
var weights = initialWeights
val numFeatures = weights(0).size
/**
* For the first iteration, the regVal will be initialized as sum of weight squares
* if it's L2 updater; for L1 updater, the same logic is followed.
*/
for (i <- 1 to numIterations) {
val bcWeights = data.context.broadcast(weights)
// val bcWeights = weights
// Sample a subset (fraction miniBatchFraction) of the total data
// compute and sum up the subgradients on this subset (this is one map-reduce)
val sampledData: RDD[(Double, Vector)] = data.sample(false, miniBatchFraction, 42 + i)
val subGradRDD: RDD[Array[Vector]] = sampledData
.map(sData => {
val grad = gradient.compute(sData._2, sData._1.toInt, bcWeights.value)
grad
})
var gradientSum: Array[Vector] = subGradRDD.reduce(
(subGrad1: Array[Vector], subGrad2: Array[Vector]) => {
// axpy(1, subGrad1._1, subGrad2._1)
var retGrad = subGrad1
for (i <- 0 to subGrad1.length - 1) {
retGrad(i) = add(subGrad1(i), subGrad2(i))
}
retGrad
})
// scal
gradientSum.map(elem => scal(1 / miniBatchSize, elem))
val update = updater.compute(weights, gradientSum, stepSize, i, regParam)
weights = update
}
weights
}
} | opinion-extraction-propagation/TASC-PTASC | src/main/scala/ict/spark/mllib/optimization/Optimizer.scala | Scala | apache-2.0 | 6,466 |
package de.frosner.broccoli.controllers
import de.frosner.broccoli.models._
import de.frosner.broccoli.services.WebSocketService.Msg
import de.frosner.broccoli.services._
import de.frosner.broccoli.RemoveSecrets.ToRemoveSecretsOps
import de.frosner.broccoli.auth.{Account, Role}
import de.frosner.broccoli.instances.NomadInstances
import de.frosner.broccoli.nomad
import de.frosner.broccoli.websocket.{BroccoliMessageHandler, IncomingMessage, OutgoingMessage}
import jp.t2v.lab.play2.auth.test.Helpers._
import org.mockito.Matchers
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.specs2.mock.Mockito
import play.api.libs.iteratee.Enumerator
import play.api.libs.json._
import play.api.test._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.util.Success
class WebSocketControllerSpec
extends PlaySpecification
with AuthUtils
with ModelArbitraries
with nomad.ModelArbitraries
with Mockito
with ToRemoveSecretsOps {
val instanceWithStatus = InstanceWithStatus(
instance = Instance(
id = "i",
template = Template(
id = "t",
template = "{{id}} {{secret}}",
description = "d",
parameterInfos = Map(
"id" -> ParameterInfo("id", None, None, None, ParameterType.Raw, None),
"secret" -> ParameterInfo(
id = "secret",
name = None,
default = Some(StringParameterValue("value")),
secret = Some(true),
`type` = ParameterType.String,
orderIndex = None
)
)
),
parameterValues = Map(
"id" -> StringParameterValue("i"),
"secret" -> StringParameterValue("thisshouldnotappearanywhere")
)
),
status = JobStatus.Unknown,
services = List(
Service(
name = "n",
protocol = "http",
address = "localhost",
port = 8888,
status = ServiceStatus.Unknown
)
),
periodicRuns = Seq.empty
)
private def wrap(messageType: IncomingMessage.Type, payload: JsValue): JsValue =
JsObject(
Map(
"messageType" -> Json.toJson(messageType),
"payload" -> payload
))
private def testWs(controllerSetup: SecurityService => WebSocketController,
inMsg: IncomingMessage,
expectations: Map[Option[(String, Role)], OutgoingMessage]) =
expectations.foreach {
case (maybeInstanceRegexAndRole, outMsg) =>
val maybeAccount = maybeInstanceRegexAndRole.map {
case (instanceRegex, role) => Account("user", instanceRegex, role)
}
val securityService = maybeAccount
.map { account =>
withAuthConf(mock[SecurityService], List(account))
}
.getOrElse {
withAuthNone(mock[SecurityService])
}
val controller = controllerSetup(securityService)
when(controller.webSocketService.newConnection(Matchers.anyString(), any[Account]))
.thenReturn(Enumerator.empty[Msg])
when(controller.webSocketService.newConnection(any[Account]))
.thenReturn(("session_id", Enumerator.empty[Msg]))
val result = maybeAccount
.map { account =>
controller.requestToSocket(FakeRequest().withLoggedIn(controller)(account.name))
}
.getOrElse {
controller.requestToSocket(FakeRequest())
}
WsTestUtil.wrapConnection(result) match {
case Right((incoming, outgoing)) =>
incoming.feed(Json.toJson(inMsg)).end
verify(controller.webSocketService)
.send(Matchers.anyString(), Matchers.eq(Json.toJson(outMsg)))
case Left(_) => throw new IllegalStateException()
}
}
sequential // http://stackoverflow.com/questions/31041842/error-with-play-2-4-tests-the-cachemanager-has-been-shut-down-it-can-no-longe
"WebSocketController" should {
"establish a websocket connection correctly (with authentication)" in new WithApplication {
val account = Account("user", ".*", Role.Administrator)
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = withAuthConf(mock[SecurityService], List(account)),
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
val result = controller.requestToSocket(FakeRequest().withLoggedIn(controller)(account.name))
val maybeConnection = WsTestUtil.wrapConnection(result)
maybeConnection should beRight
}
"establish a websocket connection correctly (without authentication)" in new WithApplication {
val account = Account("user", ".*", Role.Administrator)
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = withAuthNone(mock[SecurityService]),
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(controller.webSocketService.newConnection(any[Account])).thenReturn(("id", null))
val result = controller.requestToSocket(FakeRequest())
val maybeConnection = WsTestUtil.wrapConnection(result)
maybeConnection should beRight
}
"decline the websocket connection if not authenticated" in new WithApplication {
val account = Account("user", ".*", Role.Administrator)
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = withAuthConf(mock[SecurityService], List(account)),
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
val result = controller.requestToSocket(FakeRequest())
val maybeConnection = WsTestUtil.wrapConnection(result)
maybeConnection should beLeft.like {
case d => d.header.status === 403
}
}
"send about info, template and instance list after establishing the connection" in new WithApplication {
val account = Account("user", ".*", Role.Administrator)
val instances = Seq(
instanceWithStatus
)
val templates = Seq.empty[Template]
private val instanceService = withInstances(mock[InstanceService], instances)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], templates),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = withAuthNone(mock[SecurityService]),
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(controller.webSocketService.newConnection(any[Account])).thenReturn(("id", Enumerator.empty[Msg]))
val result = controller.requestToSocket(FakeRequest())
val maybeConnection = WsTestUtil.wrapConnection(result)
maybeConnection should beRight.like {
case (incoming, outgoing) =>
val messages = outgoing.get
(messages should haveSize(3)) and
(messages should contain(
Json.toJson(OutgoingMessage.ListTemplates(templates)),
Json.toJson(OutgoingMessage.ListInstances(instances)),
Json.toJson(OutgoingMessage.AboutInfoMsg(controller.aboutService.aboutInfo(null)))
))
}
}
"process instance addition requests if no auth is enabled" in new WithApplication {
val id = "id"
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = withAuthNone(mock[SecurityService]),
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(controller.webSocketService.newConnection(any[Account])).thenReturn((id, Enumerator.empty[Msg]))
val instanceCreation = InstanceCreation(
"template",
Map(
"id" -> JsString("blib")
)
)
when(controller.instanceService.addInstance(instanceCreation)).thenReturn(Success(instanceWithStatus))
val result = controller.requestToSocket(FakeRequest())
val maybeConnection = WsTestUtil.wrapConnection(result)
maybeConnection match {
case Right((incoming, outgoing)) =>
val resultMsg = OutgoingMessage.AddInstanceSuccess(
InstanceCreated(
instanceCreation,
instanceWithStatus
)
)
incoming.feed(Json.toJson(IncomingMessage.AddInstance(instanceCreation))).end
verify(controller.webSocketService).send(id, Json.toJson(resultMsg))
case Left(_) => throw new IllegalStateException()
}
}
"process instance addition correctly" in new WithApplication {
val instanceCreation = InstanceCreation(
"template",
Map(
"id" -> JsString("blib")
)
)
val success = OutgoingMessage.AddInstanceSuccess(
InstanceCreated(
instanceCreation,
instanceWithStatus
)
)
val roleFailure = OutgoingMessage.AddInstanceError(InstanceError.RolesRequired(Role.Administrator))
val regexFailure =
OutgoingMessage.AddInstanceError(InstanceError.UserRegexDenied("blib", "bla"))
testWs(
controllerSetup = { securityService =>
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = securityService,
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(controller.instanceService.addInstance(instanceCreation)).thenReturn(Success(instanceWithStatus))
controller
},
inMsg = IncomingMessage.AddInstance(instanceCreation),
expectations = Map(
None -> success,
Some((".*", Role.Administrator)) -> success,
Some(("bla", Role.Administrator)) -> regexFailure,
Some((".*", Role.Operator)) -> roleFailure,
Some((".*", Role.User)) -> roleFailure
)
)
}
"process instance deletion correctly" in new WithApplication {
val instanceDeletion = "id"
val success = OutgoingMessage.DeleteInstanceSuccess(
InstanceDeleted(
instanceDeletion,
instanceWithStatus
)
)
val roleFailure = OutgoingMessage.DeleteInstanceError(InstanceError.RolesRequired(Role.Administrator))
val regexFailure = OutgoingMessage.DeleteInstanceError(InstanceError.UserRegexDenied(instanceDeletion, "bla"))
testWs(
controllerSetup = { securityService =>
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = securityService,
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(controller.instanceService.deleteInstance(instanceDeletion)).thenReturn(Success(instanceWithStatus))
controller
},
inMsg = IncomingMessage.DeleteInstance(instanceDeletion),
expectations = Map(
None -> success,
Some((".*", Role.Administrator)) -> success,
Some(("bla", Role.Administrator)) -> regexFailure,
Some((".*", Role.Operator)) -> roleFailure,
Some((".*", Role.User)) -> roleFailure
)
)
}
"process instance parameter updates correctly" in new WithApplication {
val instanceUpdate = InstanceUpdate(
instanceId = Some("id"),
status = None,
parameterValues = Some(
Map(
"id" -> JsString("blib")
)
),
periodicJobsToStop = None,
selectedTemplate = None
)
val success = OutgoingMessage.UpdateInstanceSuccess(
InstanceUpdated(
instanceUpdate,
instanceWithStatus
)
)
testWs(
controllerSetup = { securityService =>
val instanceService = withInstances(mock[InstanceService], Seq.empty)
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = securityService,
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(
controller.instanceService.updateInstance(
id = instanceUpdate.instanceId.get,
statusUpdater = instanceUpdate.status,
parameterValuesUpdater = instanceUpdate.parameterValues,
templateSelector = instanceUpdate.selectedTemplate,
periodicJobsToStop = instanceUpdate.periodicJobsToStop
)).thenReturn(Success(instanceWithStatus))
controller
},
inMsg = IncomingMessage.UpdateInstance(instanceUpdate),
expectations = Map(
None -> success,
Some((".*", Role.Administrator)) -> success,
Some(("bla", Role.Administrator)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.UserRegexDenied(instanceUpdate.instanceId.get, "bla")),
Some((".*", Role.Operator)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.RolesRequired(Role.Administrator)
),
Some((".*", Role.User)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.RolesRequired(Role.Administrator, Role.Operator)
)
)
)
}
"process instance status updates correctly" in new WithApplication {
val instanceUpdate = InstanceUpdate(
instanceId = Some("id"),
status = Some(JobStatus.Running),
parameterValues = None,
periodicJobsToStop = None,
selectedTemplate = None
)
val success = OutgoingMessage.UpdateInstanceSuccess(
InstanceUpdated(
instanceUpdate,
instanceWithStatus
)
)
val secretSuccess = OutgoingMessage.UpdateInstanceSuccess(
InstanceUpdated(
instanceUpdate,
instanceWithStatus.removeSecrets
)
)
val instanceService = withInstances(mock[InstanceService], Seq.empty)
testWs(
controllerSetup = { securityService =>
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = securityService,
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(
controller.instanceService.updateInstance(
id = instanceUpdate.instanceId.get,
statusUpdater = instanceUpdate.status,
parameterValuesUpdater = instanceUpdate.parameterValues,
templateSelector = instanceUpdate.selectedTemplate,
periodicJobsToStop = instanceUpdate.periodicJobsToStop
)).thenReturn(Success(instanceWithStatus))
controller
},
inMsg = IncomingMessage.UpdateInstance(instanceUpdate),
expectations = Map(
None -> success,
Some((".*", Role.Administrator)) -> success,
Some(("bla", Role.Administrator)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.UserRegexDenied(instanceUpdate.instanceId.get, "bla")
),
Some((".*", Role.Operator)) -> secretSuccess,
Some((".*", Role.User)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.RolesRequired(Role.Administrator, Role.Operator)
)
)
)
}
"process instance template updates correctly" in new WithApplication {
val instanceUpdate = InstanceUpdate(
instanceId = Some("id"),
status = None,
parameterValues = None,
periodicJobsToStop = None,
selectedTemplate = Some("templateId")
)
val success = OutgoingMessage.UpdateInstanceSuccess(
InstanceUpdated(
instanceUpdate,
instanceWithStatus
)
)
val instanceService = withInstances(mock[InstanceService], Seq.empty)
testWs(
controllerSetup = { securityService =>
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = securityService,
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(
controller.instanceService.updateInstance(
id = instanceUpdate.instanceId.get,
statusUpdater = instanceUpdate.status,
parameterValuesUpdater = instanceUpdate.parameterValues,
templateSelector = instanceUpdate.selectedTemplate,
periodicJobsToStop = instanceUpdate.periodicJobsToStop
)).thenReturn(Success(instanceWithStatus))
controller
},
inMsg = IncomingMessage.UpdateInstance(instanceUpdate),
expectations = Map(
None -> success,
Some((".*", Role.Administrator)) -> success,
Some(("bla", Role.Administrator)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.UserRegexDenied(instanceUpdate.instanceId.get, "bla")
),
Some((".*", Role.Operator)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.RolesRequired(Role.Administrator)
),
Some((".*", Role.User)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.RolesRequired(Role.Administrator, Role.Operator)
)
)
)
}
"process instance periodic run stops correctly" in new WithApplication {
val instanceUpdate = InstanceUpdate(
instanceId = Some("id"),
status = None,
parameterValues = None,
periodicJobsToStop = Some(List("id/periodic-1518101460")),
selectedTemplate = None
)
val success = OutgoingMessage.UpdateInstanceSuccess(
InstanceUpdated(
instanceUpdate,
instanceWithStatus
)
)
val secretSuccess = OutgoingMessage.UpdateInstanceSuccess(
InstanceUpdated(
instanceUpdate,
instanceWithStatus.removeSecrets
)
)
val instanceService = withInstances(mock[InstanceService], Seq.empty)
testWs(
controllerSetup = { securityService =>
val controller = WebSocketController(
webSocketService = mock[WebSocketService],
templateService = withTemplates(mock[TemplateService], Seq.empty),
instanceService = instanceService,
aboutService = withDummyValues(mock[AboutInfoService]),
securityService = securityService,
messageHandler = new BroccoliMessageHandler(mock[NomadInstances], instanceService),
playEnv = playEnv,
cacheApi = cacheApi
)
when(
controller.instanceService.updateInstance(
id = instanceUpdate.instanceId.get,
statusUpdater = instanceUpdate.status,
parameterValuesUpdater = instanceUpdate.parameterValues,
templateSelector = instanceUpdate.selectedTemplate,
periodicJobsToStop = instanceUpdate.periodicJobsToStop
)).thenReturn(Success(instanceWithStatus))
controller
},
inMsg = IncomingMessage.UpdateInstance(instanceUpdate),
expectations = Map(
None -> success,
Some((".*", Role.Administrator)) -> success,
Some(("bla", Role.Administrator)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.UserRegexDenied(instanceUpdate.instanceId.get, "bla")
),
Some((".*", Role.Operator)) -> secretSuccess,
Some((".*", Role.User)) -> OutgoingMessage.UpdateInstanceError(
InstanceError.RolesRequired(Role.Administrator, Role.Operator)
)
)
)
}
}
}
| FRosner/cluster-broccoli | server/src/test/scala/de/frosner/broccoli/controllers/WebSocketControllerSpec.scala | Scala | apache-2.0 | 22,748 |
package dotty.tools
package dotc
import core.Contexts._
import reporting.Reporter
import io.AbstractFile
import scala.annotation.internal.sharable
/** A main class for running compiler benchmarks. Can instantiate a given
* number of compilers and run each (sequentially) a given number of times
* on the same sources.
*/
object Bench extends Driver:
@sharable private var numRuns = 1
private def ntimes(n: Int)(op: => Reporter): Reporter =
(0 until n).foldLeft(emptyReporter)((_, _) => op)
@sharable private var times: Array[Int] = _
override def doCompile(compiler: Compiler, files: List[AbstractFile])(using Context): Reporter =
times = new Array[Int](numRuns)
var reporter: Reporter = emptyReporter
for i <- 0 until numRuns do
val start = System.nanoTime()
reporter = super.doCompile(compiler, files)
times(i) = ((System.nanoTime - start) / 1000000).toInt
println(s"time elapsed: ${times(i)}ms")
if ctx.settings.Xprompt.value then
print("hit <return> to continue >")
System.in.nn.read()
println()
reporter
def extractNumArg(args: Array[String], name: String, default: Int = 1): (Int, Array[String]) = {
val pos = args indexOf name
if (pos < 0) (default, args)
else (args(pos + 1).toInt, (args take pos) ++ (args drop (pos + 2)))
}
def reportTimes() =
val best = times.sorted
val measured = numRuns / 3
val avgBest = best.take(measured).sum / measured
val avgLast = times.reverse.take(measured).sum / measured
println(s"best out of $numRuns runs: ${best(0)}")
println(s"average out of best $measured: $avgBest")
println(s"average out of last $measured: $avgLast")
override def process(args: Array[String], rootCtx: Context): Reporter =
val (numCompilers, args1) = extractNumArg(args, "#compilers")
val (numRuns, args2) = extractNumArg(args1, "#runs")
this.numRuns = numRuns
var reporter: Reporter = emptyReporter
for i <- 0 until numCompilers do
reporter = super.process(args2, rootCtx)
reportTimes()
reporter
end Bench
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/Bench.scala | Scala | apache-2.0 | 2,104 |
/**
* Created by Romain Reuillon on 02/11/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package fr.iscpif.doors.server
import better.files.File
import cats.{Applicative, Monad}
import cats.data.{Ior, Kleisli}
import fr.iscpif.doors.ext.Data._
import fr.iscpif.doors.server.DSL.Executable
import slick.dbio.DBIOAction
import slick.driver.H2Driver
import slick.driver.H2Driver.api._
import slick.lifted.{Query, QueryBase, TableQuery}
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.util._
import squants.time._
import scala.Either
package object db {
private[server] lazy val dbScheme = new db.DBScheme {
lazy val users = TableQuery[db.Users]
lazy val locks = TableQuery[db.Locks]
lazy val userLocks = TableQuery[db.UserLocks]
lazy val emails = TableQuery[db.Emails]
lazy val versions = TableQuery[db.Versions]
lazy val secrets = TableQuery[db.Secrets]
}
type Database = slick.driver.H2Driver.api.Database
trait DBScheme {
def users: TableQuery[Users]
def locks: TableQuery[Locks]
def userLocks: TableQuery[UserLocks]
def emails: TableQuery[Emails]
def versions: TableQuery[Versions]
def secrets: TableQuery[Secrets]
}
object DB {
object ConvertToDB {
implicit def action[U] = new ConvertToDB[DBIOAction[U, NoStream, Effect.All], U] {
def toDB(t: DBIOAction[U, NoStream, Effect.All]) = t
}
implicit def query[U] = new ConvertToDB[QueryBase[U], U] {
def toDB(t: QueryBase[U]): DBIOAction[U, NoStream, Effect.All] = t.result
}
implicit def rep[U] = new ConvertToDB[Rep[U], U] {
def toDB(t: Rep[U]): DBIOAction[U, NoStream, Effect.All] = t.result
}
}
trait ConvertToDB[-T, U] {
def toDB(t: T): DBIOAction[U, NoStream, Effect.All]
}
def pure[T](t: T): DB[T] = Kleisli[DBIOAction[?, NoStream, Effect.All], DBScheme, T] { _ => DBIOAction.successful(t) }
def apply[T, D](dbEffect: fr.iscpif.doors.server.db.DBScheme => D)(implicit toDB: ConvertToDB[D, T]): DB[T] =
Kleisli[DBIOAction[?, NoStream, Effect.All], fr.iscpif.doors.server.db.DBScheme, T] { (s: fr.iscpif.doors.server.db.DBScheme) =>
toDB.toDB(dbEffect(s))
}
}
implicit def dbIOActionIsMonad = new Monad[DBIOAction[?, NoStream, Effect.All]] {
override def pure[A](x: A): DBIOAction[A, NoStream, Effect.All] = DBIOAction.successful(x)
override def flatMap[A, B](fa: DBIOAction[A, NoStream, Effect.All])(f: (A) => DBIOAction[B, NoStream, Effect.All]): DBIOAction[B, NoStream, Effect.All] =
for {
a <- fa
b <- f(a)
} yield b
override def tailRecM[A, B](a: A)(f: (A) => DBIOAction[Either[A, B], NoStream, Effect.All]): DBIOAction[B, NoStream, Effect.All] =
flatMap(f(a)) {
case Right(b) => pure(b)
case Left(nextA) => tailRecM(nextA)(f)
}
}
type DB[T] = Kleisli[DBIOAction[?, NoStream, Effect.All], fr.iscpif.doors.server.db.DBScheme, T]
def runTransaction[T, M[_]](f: DB[T], db: Database)(implicit io: freedsl.io.IO[M]) =
io(doRunTransaction(f, db))
def doRunTransaction[T](f: DB[T], db: Database) =
Await.result(db.run(f(dbScheme).transactionally), Duration.Inf)
lazy val dbVersion = 1
case class User(id: UserID, name: String, password: Password, hashAlgorithm: HashingAlgorithm)
case class Lock(id: LockID, state: StateID, time: Time, increment: Option[Long])
sealed trait EmailStatus
object EmailStatus {
case object Contact extends EmailStatus
case object Other extends EmailStatus
case object Deprecated extends EmailStatus
}
case class Email(lockID: LockID, address: EmailAddress, status: EmailStatus)
case class UserLock(userID: UserID, lock: LockID)
case class Version(id: Int)
case class Secret(lockID: LockID, secret: String, deadline: Long)
lazy val dbName = "h2"
def saltConfig = "salt"
def adminLogin = "adminLogin"
def adminPass = "adminPass"
def smtpHostName = "smtpHostName"
def smtpPort = "smtpPort"
def updateDB(db: Database) = {
def max(s: Seq[Int]): Option[Int] = if (s.isEmpty) None else Some(s.max)
doRunTransaction(
DB { scheme =>
for {
v <- scheme.versions.map{_.id}.max.filter{_ < dbVersion}.result
//FIXME: Versions table to be updated
// _ <- scheme.versions += Version(f)
} yield ()
//TODO:Β UPDATEΒ DB
}
, db
)
}
def initDB(location: File) = {
location.parent.toJava.mkdirs()
lazy val db: Database = Database.forDriver(
driver = new org.h2.Driver,
url = s"jdbc:h2:/${location}"
)
def dbWorks =
Try {
Await.result(db.run(dbScheme.versions.length.result), Duration.Inf)
} match {
case Failure(_) β false
case Success(_) β true
}
if (!dbWorks)
doRunTransaction(DB(
scheme =>
(scheme.users.schema ++
scheme.locks.schema ++
scheme.userLocks.schema ++
scheme.emails.schema ++
scheme.versions.schema ++
scheme.secrets.schema).create
), db)
db
}
}
| ISCPIF/doors | server/src/main/scala/fr/iscpif/doors/server/db/package.scala | Scala | agpl-3.0 | 5,874 |
package com.nutomic.ensichat.fragments
import android.content.SharedPreferences.OnSharedPreferenceChangeListener
import android.content.{Intent, SharedPreferences}
import android.os.Bundle
import android.preference.{PreferenceFragment, PreferenceManager}
import com.nutomic.ensichat.R
import com.nutomic.ensichat.activities.EnsichatActivity
import com.nutomic.ensichat.core.interfaces.SettingsInterface._
import com.nutomic.ensichat.core.messages.body.UserInfo
import com.nutomic.ensichat.fragments.SettingsFragment._
import com.nutomic.ensichat.service.ChatService
object SettingsFragment {
val Version = "version"
}
/**
* Settings screen.
*/
class SettingsFragment extends PreferenceFragment with OnSharedPreferenceChangeListener {
private lazy val activity = getActivity.asInstanceOf[EnsichatActivity]
private lazy val version = findPreference(Version)
private lazy val prefs = PreferenceManager.getDefaultSharedPreferences(getActivity)
override def onCreate(savedInstanceState: Bundle): Unit = {
super.onCreate(savedInstanceState)
addPreferencesFromResource(R.xml.settings)
val packageInfo = getActivity.getPackageManager.getPackageInfo(getActivity.getPackageName, 0)
version.setSummary(packageInfo.versionName)
prefs.registerOnSharedPreferenceChangeListener(this)
}
override def onDestroy(): Unit = {
super.onDestroy()
prefs.unregisterOnSharedPreferenceChangeListener(this)
}
/**
* Sends the updated username or status to all contacts.
*/
override def onSharedPreferenceChanged(sharedPreferences: SharedPreferences, key: String) {
key match {
case KeyUserName | KeyUserStatus =>
val ui = new UserInfo(prefs.getString(KeyUserName, ""), prefs.getString(KeyUserStatus, ""))
activity.database.get.getContacts.foreach(c => activity.service.get.sendTo(c.address, ui))
case KeyAddresses =>
val intent = new Intent(getActivity, classOf[ChatService])
intent.setAction(ChatService.ActionNetworkChanged)
getActivity.startService(intent)
case _ =>
}
}
}
| Nutomic/ensichat | android/src/main/scala/com/nutomic/ensichat/fragments/SettingsFragment.scala | Scala | mpl-2.0 | 2,085 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
import java.io.PrintWriter
import logic.Modes.ModeAtom
import logic.{AtomSignature, Clause, Literal, Theory, Variable}
import utils.lookaheads._
import utils.parsers.ModesParser
import BKHandling._
import com.typesafe.scalalogging.LazyLogging
import woled.State
import scala.io.Source
import scala.util.matching.Regex
/**
* Created by nkatz on 9/13/16.
*/
object Globals {
def apply(): Unit = {
new Globals("")
}
//var hedgePredictionThreshold = 0.0 // Quick & dirty, for experiments
var sleepingExpertsLearningRate = 0.0 // Quick & dirty, for experiments
var sleepingExpertsFeedBackBias = 0.0
var hedgeInertia = false
var timeDebug = List[Double]()
var scoringFunction = "default" // precision for initiation, recall for termination
var totalPos = 0
var totalNegs = 0
// This may be set to a different value (e.g. passed from cmd) during the construction of the Globals instance
var MAX_CLAUSE_LENGTH = 15
var LEARNING_WHOLE_THEORIES = false // not really used anywhere
val cwd: String = System.getProperty("user.dir") // Current working dir
val ASPHandler = s"$cwd/asp/ASPHandler.py"
/* Global names */
val FIND_ALL_REFMS = "findAllRefs"
val ABDUCTION = "abduction"
val DEDUCTION = "deduction"
val GET_QUERIES = "getQueries"
val GET_GROUNDINGS = "getGroundings"
val XHAIL = "xhail"
val CHECKSAT = "checksat"
val ILED = "iled"
val INFERENCE = "inference"
val SEARCH_MODELS = "search_models" //used to search alternative abductive explanations with iterative abductive search
val SCORE_RULES = "score_rules"
val GROW_NEW_RULE_TEST = "grow_new_rule_test"
// These values may be set during the construction of the Globals instance
var glvalues =
scala.collection.mutable.Map[String, String](
"cwa" -> "true",
"iter-deepening" -> "false",
"mode" -> "incremental",
"perfect-fit" -> "true",
"iterations" -> "1", //"1000",
"variableDepth" -> "1",
"withWeaks" -> "false",
"withBacktracking" -> "true",
"refinementSearch" -> "setCover", // either setCover or fullSearch
// specializeOnly does not generate new kernel sets on new examples,
// it only tries to refine an initial hypothesis based
// on an initial kernel set, acquired from the first window
"specializeOnly" -> "false",
"compressKernels" -> "true",
"ruleEvaluationFunction" -> "precision", //"mestimate"//"precision"
// specializationDepth is used by OLED only. It specifies how "deep" in the
// specialization lattice of a bottom clause we want to search. For instance
// with specializationDepth=2, OLED generates candidate refinements of a clause
// by using the 1-subsets and the 2-subsets of the corresponding bottom clause.
// with specializationDepth=2 it uses 1-subsets, 2-subsets and 3-subsets and so on
"specializationDepth" -> "1",
// if OLEDdownscoreBySimilarity is true then OLED penalizes candidate clauses
// that are too similar to existing clauses in the current hypothesis,
// to allow for exploring the quality of different clauses (that may be never
// selected, because of a tie in score with other clauses)
"OLEDdownscoreBySimilarity" -> "true",
"distributed" -> "false",
"with-jep" -> "false",
"domain" -> "any",
// Use this to get non-empty revisions at any point. This is necessary
// because there are cases where the model corresponding to an empty
// theory may have lower cost (in the optimization) than a model that
// corresponds to a theory (e.g. when including any initiation rule in the theory yields
// many fps). In such cases the solver will opt for an empty theory, which is not
// always desirable. This parameter is used by the MCTS version of OLED.
"smallest-nonempty" -> "false",
// Weights on examples
"tp-weight" -> "1",
"fp-weight" -> "1",
"fn-weight" -> "1",
"with-inertia" -> "false",
"weight-learning" -> "false",
"with-ec" -> "true"
)
// if jep is used "UNSAT" else "UNSATISFIABLE"
def UNSAT = if (glvalues("with-jep").toBoolean) "UNSAT" else "UNSATISFIABLE"
// This is a storage of the current initiation/termination
// parts of the theory. These fields are used by the monolithic version
// of OLED only, when learning with inertia (from edge interval points)
// to get the joint theory and see if it satisfies each new example. Abduction
// and new clauses are generated if not.
//--------------------------------------------------------------------------------------
// UPDATE: Testing for new clause generation using the satisfiability of the
// current joint theory works, for srongly-initiated fluents with no or very
// small amount of noise, but it takes a lot of time in large learning tasks.
// The reason is that the joint theory is unsatisfiable in most cases, since it
// contains over-general rules that erroneously re-initiate or terminate a target
// fluent. This means that abduction and new kernel set generation takes place
// almost always, in every new mini-batch, which causes great delays in the execution.
// For this to work we'd need a more ILED-style apporach, where clauses are not scored,
// but corrected at every new mistake. In the absence on noise this makes the joint
// theory to quickly converge to the correct one. On the other hand, if there is a
// substantial amount of noise in the data, therefore the edge interval points are
// frequently corrupted, there is no hope to learn strongly-initiated fluents, so there
// is no point discussing it or trying to fix it with simple modifications in the BK.
//--------------------------------------------------------------------------------------
var CURRENT_THEORY_INITIATED: Vector[Clause] = Vector[Clause]()
var CURRENT_THEORY_TERMINATED: Vector[Clause] = Vector[Clause]()
def getCurrentJointTheory() = {
Theory((CURRENT_THEORY_INITIATED ++ CURRENT_THEORY_TERMINATED).toList)
}
//var errorProb = Vector.empty[Int]
}
class Globals(val entryPath: String) extends LazyLogging {
/*
* Global values and utils.
*/
val state = new State
val cwd: String = System.getProperty("user.dir") // Current working dir
val inputPath: String = entryPath // Path to bk and modes files
val modesFile: String = s"$inputPath/modes" // Mode Declarations file
//val AUXILIARY_PREDS = "auxiliaryPredicates"
val BK_INITIATED_ONLY = s"$inputPath/bk-initiated-only.lp"
val BK_TERMINATED_ONLY = s"$inputPath/bk-terminated-only.lp"
val ABDUCE_WITH_INERTIA = s"$inputPath/abduce-with-inertia.lp"
val INITIATED_ONLY_INERTIA = s"$inputPath/initiated-only-with-inertia.lp"
val BK_INITIATED_ONLY_MARKDED = s"$inputPath/bk-score-initiated.lp" // BK for scoring initiation rules
val BK_TERMINATED_ONLY_MARKDED = s"$inputPath/bk-score-terminated.lp" // BK for scoring termination rules
val BK_RULE_SCORING_MARKDED = s"$inputPath/bk-score.lp" // BK for rule scoring when learning without the EC.
val USER_BK = s"$inputPath/bk"
val BK_WHOLE_EC = s"$inputPath/bk.lp"
val BK_WHOLE = s"$inputPath/bk.lp" // for learning without the EC, no practical difference
val BK_CROSSVAL = s"$inputPath/bk-for-crossval.lp"
val ILED_NO_INERTIA: String = inputPath + "/bk-no-inertia.lp"
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
val modesParser = new ModesParser
val MODES: List[String] = Source.fromFile(modesFile).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val MODEHS: List[ModeAtom] = MODES.filter(m => m.contains("modeh") && !m.startsWith("%")).map(x => x).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.modeh, x)))
if (MODEHS.isEmpty) logger.error("No head mode declarations found.")
val MODEBS: List[ModeAtom] = MODES.filter(m => m.contains("modeb") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.modeb, x)))
if (MODEBS.isEmpty) logger.error("No body mode declarations found.")
/* The input to this method is a Literal representation of mode atoms and example pattern atoms (variabilized). */
def getTypeAxioms(m: Literal): Set[String] = {
val plmrkTerms = m.placeMarkers
val (posPlmrkTerms, negPlrmTerms, grndPlmrkTerms) = (plmrkTerms._1, plmrkTerms._2, plmrkTerms._3)
val allPlmrks = (posPlmrkTerms ++ negPlrmTerms ++ grndPlmrkTerms).map(x => x.asInstanceOf[Variable]).toSet
allPlmrks.foldLeft(Set[String]()) { (accum, y) =>
val allOtherPlmrks = allPlmrks diff Set(y)
if (y.inOrOutVar == "+" || y.inOrOutVar == "-") {
val result_ = s"${y._type}(${{ y.name }}) :- ${m.tostring}."
// the regex below matches variable symbols which do not appear in predicate of function
// names. So it will match X0 in p(X0) but not in pX0(X0), pxX0(X0), pX_0(X0), p_2X0(X0) and so on
val result = allOtherPlmrks.foldLeft(result_) { (x1, y1) => x1.replaceAll(s"(?<![a-zA-Z0-9_]+)${y1.name}", "_") }
accum + result
} else {
accum
}
}
}
// Example patterns as a list[ModeAtom] (helper)
val eps1: List[ModeAtom] =
MODES.filter(m => m.contains("examplePattern") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.exmplPattern, x)))
val eps2: List[ModeAtom] = eps1 match {
case List() => MODEHS // if no example patterns are found, use the head mode declarations for them
case _ => eps1
}
// Auxiliary predicates. These are input predicates which are not part of the target language
// but are necessary for extracting the types of entities in the domain (e.g. think of coords/4 in CAVIAR).
private val inputPreds: List[ModeAtom] = {
MODES.filter(m => m.contains("inputPredicate") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.inputPred, x)))
}
if (inputPreds.exists(p => p.isNAF)) {
logger.error(s"NAF is not allowed in input predicates.")
System.exit(-1)
}
// This method generates types axioms for the mode declarations,
// i.e. rules of the form: time(X1) :- happensAt(active(_),X1).
private val typeAxioms = {
val m = inputPreds.filter(x => !x.isNAF).map(x => x.varbed)
val x = m.flatMap(getTypeAxioms).toSet
//x foreach println
x
}
/*
* Comparison predicates compare numerical values to a threshold, e.g:
*
* close(p1, p2, 30, 10)
*
* meaning that the Euclidean distance of p1, p2 at time 10 is less than 30.
*
* Comparison predicates may be declared in the modes file like this:
*
* comparisonPredicate(close(+person,+person,#numvalue,+time), lessThan, comparison_term_position(3))
*
* The #numvalue placemarker indicates the position of the actual numerical threshold
* while the 'lessThan' term (can also be 'greaterThan') declares the intended "semantics"
* of the predicate. Note that numvalue has to be the type of this term in the corresponding body declaration. The
* comparison_term_position(3) indicates the position of the comparison term in the atom. In folded atoms the whole
* "path" to this term needs to be specified e.g.
*
* comparisonPredicate(far(+person,+person,test(+person, p(#threshold_value)),+time), greaterThan, comparison_term_position(3,2,1))
*
* Here to find the comparison term take atom.terms(3).terms(2).terms(1). See also the method getComparisonTerm
* in the Modes class and the getComparisonTerm in the Literal class.
*
* Comparison predicate declarations are used internally to allow for two tasks that simplify the learning process:
*
* 1. Reduce clauses: When a comparison predicate in the lessThan semantics and with numvalue1 is added to a rule,
* then any other similar predicate with numvalue2 such that numvalue2 > numvalue1 is removed from the rule.
* Rules with comparison predicate in the greaterThan semantics are reduced accordingly.
* 2. When generating candidate specializations, rules that consist of comparison predicates only (e.g. close/4
* predicates only) are omitted.
* */
val comparisonPredicates: List[ModeAtom] = {
MODES.filter(m => m.contains("comparisonPredicate") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.compPred, x)))
}
MODEBS foreach { m =>
val x = comparisonPredicates.find(z => z == m).getOrElse(ModeAtom())
if (x != ModeAtom()) {
m.compRelation = x.compRelation
m.comparisonTermPosition = x.comparisonTermPosition
}
}
val headAtomSignatures: List[AtomSignature] = {
MODEHS.map(x => new AtomSignature(x.functor, x.arity))
}
val bodyAtomSignatures: List[AtomSignature] = {
MODEBS.map(x => new AtomSignature(x.functor, x.arity))
}
/* Reads the background knowledge from $inputPath/bk.lp and produces helper files (e.g. for rule evaluation,
bottom clause generation etc.) */
def generateBKFiles_Event_Calculus() = {
//private val PY_LESSTHAN =
// "#script (python)\nfrom gringo import Fun\nimport math\n\ndef less_than(x,y):\n return float(x) < float(y)\n\n#end."
val EC_AXIOM_1 = "holdsAt(F,Te) :- fluent(F), not sdFluent(F), initiatedAt(F,Ts), next(Ts, Te)."
val EC_AXIOM_2 = "holdsAt(F,Te) :- fluent(F), not sdFluent(F), holdsAt(F,Ts), " +
"not terminatedAt(F,Ts), next(Ts, Te)."
//private val RIGHT_BEFORE_DEF = "right_before(X,Z) :- time(X), time(Z), Z = X+40."
///*
/*val RIGHT_BEFORE_DEF ="\n#script (python)\ntimes = []\ndef collect_all(a):\n times.append(a)\n " +
"return 1\ndef sorted():\n times.sort()\n return zip(range(len(times)), times)\n#end.\ncollect_all." +
"\ncollect_all :- time(X), @collect_all(X) == 0.\nsorted_pair(X,N) :- collect_all, " +
"(X,N) = @sorted().\nnext(X, Y) :- sorted_pair(A,X), sorted_pair(A+1,Y).\n"*/
val RIGHT_BEFORE_DEF =
"""
|#script (python)
|times = []
|def collect_all(a):
| times.append(a)
| return 1
|def sorted():
| times.sort()
| return zip(range(len(times)), times)
|def end_time():
| times.sort()
| return times[-1]
|def start_time():
| times.sort()
| return times[0]
|#end.
|collect_all.
|collect_all :- time(X), @collect_all(X) == 0.
|sorted_pair(X,N) :- collect_all, (X,N) = @sorted().
|next(X, Y) :- sorted_pair(A,X), sorted_pair(A+1,Y).
|start_end :- collect_all.
|start_end(X,Y) :- start_end, X = @start_time(), Y = @end_time().
|%endTime(X) :- X = @end_time().
|startTime(X) :- X = @start_time().
|""".stripMargin
//*/
val INIT_TIME_DEF = "initialTime(X) :- time(X), #false : X > Y, time(Y)."
val INIT_HOLDS_DEF = "%THIS SHOULD NOT BE HERE!\nholdsAt(F,T) :- initialTime(T), example(holdsAt(F,T))."
val CORE_EVENT_CALCULUS_BK = List(EC_AXIOM_1, EC_AXIOM_2, RIGHT_BEFORE_DEF, INIT_TIME_DEF, INIT_HOLDS_DEF)
val CROSSVAL_EVENT_CALCULUS_BK = List(EC_AXIOM_1, EC_AXIOM_2, RIGHT_BEFORE_DEF)
val INITIATED_ONLY_EVENT_CALCULUS_BK = List(EC_AXIOM_1, RIGHT_BEFORE_DEF, INIT_TIME_DEF, INIT_HOLDS_DEF)
val TERMINATED_ONLY_EVENT_CALCULUS_BK =
List(EC_AXIOM_1, EC_AXIOM_2, RIGHT_BEFORE_DEF, INIT_TIME_DEF, INIT_HOLDS_DEF,
"holdsAt(F,T) :- fluent(F), not sdFluent(F), examplesInitialTime(T), example(holdsAt(F,T)).",
"examplesInitialTime(X) :- example(holdsAt(_,X)), #false : X > Y, example(holdsAt(_,Y)).")
// Read the user-input BK
val userBK = Source.fromFile(USER_BK).getLines.toList.mkString("\n")
// Generate the ASP scoring rules:
val scoringRules = generateScoringBK(MODEHS)
// Type axioms:
val tas = this.typeAxioms.mkString("\n")
// Generate bk.lp file (it will be used for reasoning)
val bkFile = new java.io.File(BK_WHOLE_EC)
val pw1 = new PrintWriter(bkFile)
pw1.write(userBK + "\n")
pw1.write(CORE_EVENT_CALCULUS_BK.mkString("\n"))
pw1.write("\n" + tas)
pw1.close()
bkFile.deleteOnExit()
// Generate initiation-only BK file
val initOnlyBKFile = new java.io.File(BK_INITIATED_ONLY)
val pw2 = new PrintWriter(initOnlyBKFile)
pw2.write(userBK + "\n")
pw2.write(INITIATED_ONLY_EVENT_CALCULUS_BK.mkString("\n"))
pw2.write("\n" + tas)
pw2.close()
initOnlyBKFile.deleteOnExit()
// Generate termination-only BK file
val termOnlyBKFile = new java.io.File(BK_TERMINATED_ONLY)
val pw3 = new PrintWriter(termOnlyBKFile)
pw3.write(userBK + "\n")
pw3.write(TERMINATED_ONLY_EVENT_CALCULUS_BK.mkString("\n"))
pw3.write("\n" + tas)
pw3.close()
termOnlyBKFile.deleteOnExit()
// Generate initiation-scoring rules
val scoreInitFile = new java.io.File(BK_INITIATED_ONLY_MARKDED)
val pw4 = new PrintWriter(scoreInitFile)
pw4.write(userBK + "\n")
pw4.write("\n" + scoringRules._1 + "\n" + RIGHT_BEFORE_DEF + "\n")
pw4.write("\n" + tas)
pw4.close()
scoreInitFile.deleteOnExit()
// Generate termination-scoring rules
val scoreTermFile = new java.io.File(BK_TERMINATED_ONLY_MARKDED)
val pw5 = new PrintWriter(scoreTermFile)
pw5.write(userBK + "\n")
pw5.write("\n" + scoringRules._2 + "\n" + RIGHT_BEFORE_DEF + "\n")
pw5.write("\n" + tas)
pw5.close()
scoreTermFile.deleteOnExit()
// Generate cross-validation file
val crossValFile = new java.io.File(BK_CROSSVAL)
val pw6 = new PrintWriter(crossValFile)
pw6.write(userBK + "\n")
pw6.write(CROSSVAL_EVENT_CALCULUS_BK.mkString("\n"))
pw6.write("\n" + tas)
pw6.close()
crossValFile.deleteOnExit()
}
def generateBKFiles_No_Event_Calculus() = {
// Read the user-input BK
val userBK = Source.fromFile(USER_BK).getLines.toList.mkString("\n")
// Generate the ASP scoring rules:
val scoringRules = generateScoringBK(MODEHS)
// Type axioms:
val tas = this.typeAxioms.mkString("\n")
// Generate bk.lp file (it will be used for reasoning)
val bkFile = new java.io.File(BK_WHOLE)
val pw1 = new PrintWriter(bkFile)
pw1.write(userBK + "\n")
pw1.write("\n" + tas)
pw1.close()
bkFile.deleteOnExit()
// Generate BK file for rule scoring
val scoreTermFile = new java.io.File(BK_RULE_SCORING_MARKDED)
val pw5 = new PrintWriter(scoreTermFile)
pw5.write(userBK + "\n")
pw5.write("\n" + scoringRules._2 + "\n")
pw5.write("\n" + tas)
pw5.close()
scoreTermFile.deleteOnExit()
// Generate cross-validation file
val crossValFile = new java.io.File(BK_CROSSVAL)
val pw6 = new PrintWriter(crossValFile)
pw6.write(userBK + "\n")
pw6.write("\n" + tas)
pw6.close()
crossValFile.deleteOnExit()
}
if (Globals.glvalues("with-ec").toBoolean) {
generateBKFiles_Event_Calculus()
} else {
generateBKFiles_No_Event_Calculus()
}
val EXAMPLE_PATTERNS: List[Literal] = eps2 map (p => p.varbed)
val EXAMPLE_PATTERNS_AS_STRINGS: List[String] = EXAMPLE_PATTERNS map (_.tostring)
private val coverageDirectives = getCoverageDirectives(EXAMPLE_PATTERNS_AS_STRINGS)
val TPS_RULES: String = coverageDirectives._1
val FPS_RULES: String = coverageDirectives._2
val FNS_RULES: String = coverageDirectives._3
val TPS_RULES_MARKED: String = coverageDirectives._4
val FPS_RULES_MARKED: String = coverageDirectives._5
val FNS_RULES_MARKED: String = coverageDirectives._6
val CONSTRAINT_COVER_ALL_POSITIVES: String = coverageDirectives._7
val CONSTRAINT_EXCLUDE_ALL_NEGATIVES: String = coverageDirectives._8
val SHOW_TPS_ARITY_1 = "\n#show tps/1."
val SHOW_TPS_ARITY_2 = "\n#show tps/2."
val SHOW_FPS_ARITY_1 = "\n#show fps/1."
val SHOW_FPS_ARITY_2 = "\n#show fps/2."
val SHOW_FNS_ARITY_1 = "\n#show fns/1."
val SHOW_FNS_ARITY_2 = "\n#show fns/2."
val SHOW_TIME = "\n#show times/1."
val SHOW_INTERPRETATIONS_COUNT = "\n#show countGroundings/1."
val INCLUDE_BK: String => String = (file: String) => s"\n\n#include " + "\"" + file + "\".\n"
val HIDE = "\n#show.\n"
// if jep is used "UNSAT" else "UNSATISFIABLE"
def UNSAT = if (Globals.glvalues("with-jep").toBoolean) "UNSAT" else "UNSATISFIABLE"
val SAT = "SAT"
val TPS_COUNT_RULE: String = tpsCountRules(EXAMPLE_PATTERNS)
val FPS_COUNT_RULE: String = fpsCountRules(EXAMPLE_PATTERNS)
val FNS_COUNT_RULE: String = fnsCountRules(EXAMPLE_PATTERNS)
// FNs for terminated: not marked(I,exmpl), example(exmpl) (same as FNs for initiated)
// TPs for terminated: marked(I, exmpl), example(exmpl) (same as TPs for initiated)
val TIMES_COUNT_RULE = "\ntimes(X) :- X = #count { Z: time(Z) }.\n"
/*
def EXAMPLE_COUNT_RULE =
this.EXAMPLE_PATTERNS.map{ x =>
s"exampleGrounding(${x.tostring}):-${x.getTypePredicates(this).mkString(",")}.\n"+
s"countGroundings(X) :- X = #count { ${x.getVars.toList.map(_.tostring).mkString(",")}: " +
s"exampleGrounding(${x.tostring}),${x.getTypePredicates(this).mkString(",")} }."
}.mkString("\n")+"\n"
*/
/* I NEED TO FIND A WAY TO MAKE THIS GENERIC (NON- EVENT CALCULUS SPECIFIC).
* FOR EXAMPLE, THE USER COULD SPECIFY IT IN THE MODES FILE. */
/*
def EXAMPLE_COUNT_RULE = "exampleGrounding(holdsAt(F,T)):-fluent(F),time(T).\n"+
"countGroundings(X) :- X = #count { F,T: exampleGrounding(holdsAt(F,T)),fluent(F),time(T) }.\n"
*/
def EXAMPLE_COUNT_RULE = {
val targetPred = EXAMPLE_PATTERNS.head
val tpstr = targetPred.tostring
val vars = targetPred.getVars.map(x => x.name).mkString(",")
val typePreds = targetPred.getTypePredicates(this).mkString(",")
s"exampleGrounding($tpstr) :- $typePreds.\ncountGroundings(X) :- X = #count { $vars: exampleGrounding($tpstr),$typePreds }.\n"
}
/*
val LOOK_AHEADS = {
val f = Source.fromFile(modesFile).getLines.toList.filter(line => line.startsWith("lookahead"))
if (f.nonEmpty) f.map( x => new LookAheadSpecification(x) ) else Nil
}
*/
private val LOOK_AHEADS_TEST = {
val f = Source.fromFile(modesFile).getLines.toList.filter(line => line.startsWith("lookahead"))
if (f.nonEmpty) f.map(x => new LookAheadUtils.LookAheadSpecification(x)) else Nil
}
/*
def getAdditionalLanguageBias(predicateName: String) = {
val f = Source.fromFile(modesFile).getLines.toList.filter(line => line.startsWith(s"$predicateName"))
f.map(x => x.split(s"$predicateName\\(")(1).split("\\)")(0)).filter(p => (MODEHS++MODEBS).exists(q => q.functor == p))
}
val FORCE_PREDS = getAdditionalLanguageBias("force")
val BASIC_PREDS = getAdditionalLanguageBias("basic")
val AUXILIARY_PREDS = getAdditionalLanguageBias("auxiliary")
*/
var EVALUATION_FUNCTION = "precision_recall" // alternative is foil_gain
var MAX_CLAUSE_LENGTH = 15
var LEARNING_WHOLE_THEORIES = false
var TOP_THEORY_SCORE = 0.0
var TOP_THEORY_SCORE_COUNT = 0
/*
* The following are not used anywhere, they are for debugging
*/
/*
private val initHead = "initiatedAt(meeting(X0,X1),X2)"
private val initHead1 = "initiatedAt(meeting(X1,X0),X2)"
private val termHead = "terminatedAt(meeting(X0,X1),X2)"
private val termHead1 = "terminatedAt(meeting(X1,X0),X2)"
private val BCBodyLits =
List("happensAt(inactive(X1),X2)","happensAt(inactive(X0),X2)",
"happensAt(active(X1),X2)","happensAt(active(X0),X2)",
"happensAt(walking(X1),X2)","happensAt(walking(X0),X2)",
"happensAt(running(X1),X2)","happensAt(running(X0),X2)",
"happensAt(appear(X1),X2)","happensAt(appear(X0),X2)",
"happensAt(disappear(X1),X2)","happensAt(disappear(X0),X2)",
"not happensAt(disappear(X1),X2)","not happensAt(disappear(X0),X2)",
"close(X0,X1,24,X2)","close(X1,X0,24,X2)","close(X0,X1,25,X2)","close(X1,X0,25,X2)",
"close(X0,X1,30,X2)","close(X1,X0,30,X2)","close(X0,X1,34,X2)","close(X1,X0,34,X2)",
"far(X0,X1,24,X2)","far(X1,X0,24,X2)","far(X0,X1,25,X2)","far(X1,X0,25,X2)",
"far(X0,X1,30,X2)","far(X1,X0,30,X2)","far(X0,X1,34,X2)","far(X1,X0,34,X2)")
val initBC1 = {
val h = Literal.toLiteral(initHead)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
val initBC2 = {
val h = Literal.toLiteral(initHead1)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
val termBC1 = {
val h = Literal.toLiteral(termHead)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
val termBC2 = {
val h = Literal.toLiteral(termHead1)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
*/
}
| nkatzz/OLED | src/main/scala/app/runutils/Globals.scala | Scala | gpl-3.0 | 25,519 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.convert
import scala.collection.JavaConverters.bufferAsJavaListConverter
import org.xml.sax.Attributes
import org.xml.sax.ContentHandler
import org.xml.sax.ext.LexicalHandler
import org.xml.sax.helpers.AttributesImpl
import nl.ebpi.yaidom.core.Declarations
import nl.ebpi.yaidom.core.Scope
import nl.ebpi.yaidom.simple.Comment
import nl.ebpi.yaidom.simple.Document
import nl.ebpi.yaidom.simple.DocumentConverter
import nl.ebpi.yaidom.simple.Elem
import nl.ebpi.yaidom.simple.ElemConverter
import nl.ebpi.yaidom.simple.EntityRef
import nl.ebpi.yaidom.simple.Node
import nl.ebpi.yaidom.simple.ProcessingInstruction
import nl.ebpi.yaidom.simple.Text
import YaidomToSaxEventsConversions.SaxEventsProducer
/**
* Converter from yaidom nodes to SAX event producers, in particular from [[nl.ebpi.yaidom.simple.Elem]] to `SaxEventsProducer`,
* and from [[nl.ebpi.yaidom.simple.Document]] to `SaxEventsProducer`.
*
* @author Chris de Vreeze
*/
trait YaidomToSaxEventsConversions extends ElemConverter[SaxEventsProducer] with DocumentConverter[SaxEventsProducer] {
/** Converts a yaidom `Document` to a `SaxEventsProducer` */
final def convertDocument(doc: Document): SaxEventsProducer = {
{ (handler: ContentHandler) =>
handler.startDocument()
doc.children foreach { ch => convertNode(ch, Scope.Empty)(handler) }
handler.endDocument()
}
}
/**
* Converts a yaidom `Elem` to a `SaxEventsProducer`.
* The assumed parent scope is the empty scope.
*/
final def convertElem(elm: Elem): SaxEventsProducer = {
convertElem(elm, Scope.Empty)
}
/**
* Converts a yaidom node to a `SaxEventsProducer`.
* The given parent scope is used, in case the node is an `Elem`.
*/
final def convertNode(node: Node, parentScope: Scope): SaxEventsProducer = {
node match {
case e: Elem => convertElem(e, parentScope)
case t: Text => convertText(t)
case pi: ProcessingInstruction => convertProcessingInstruction(pi)
// Difficult to convert yaidom EntityRef to SAX event producer, because of missing declaration
case er: EntityRef => (handler => ())
case c: Comment => convertComment(c)
}
}
/**
* Converts a yaidom `Elem` to a `SaxEventsProducer`.
* The given parent scope is used, that is, the prefix mappings before the outer "start element event" correspond to
* `parentScope.relativize(elm.scope)`.
*/
final def convertElem(elm: Elem, parentScope: Scope): SaxEventsProducer = {
{ (handler: ContentHandler) =>
// Not tail-recursive, but the recursion depth should be limited
val namespaces: Declarations = parentScope.relativize(elm.scope)
val namespacesMap = namespaces.prefixNamespaceMap
for ((prefix, nsUri) <- namespacesMap) handler.startPrefixMapping(prefix, nsUri)
generateStartElementEvent(elm, handler, parentScope)
// Recursive calls. Not tail-recursive, but recursion depth should be limited.
for (node <- elm.children) {
convertNode(node, elm.scope)(handler)
}
generateEndElementEvent(elm, handler, parentScope)
for ((prefix, nsUri) <- namespacesMap) handler.endPrefixMapping(prefix)
}
}
/**
* Converts a yaidom `Text` to a `SaxEventsProducer`.
*/
final def convertText(text: Text): SaxEventsProducer = {
{ (handler: ContentHandler) =>
handler match {
case handler: ContentHandler with LexicalHandler =>
if (text.isCData) handler.startCDATA()
handler.characters(text.text.toCharArray, 0, text.text.length)
if (text.isCData) handler.endCDATA()
case _ =>
handler.characters(text.text.toCharArray, 0, text.text.length)
}
}
}
/**
* Converts a yaidom `ProcessingInstruction` to a `SaxEventsProducer`.
*/
final def convertProcessingInstruction(
processingInstruction: ProcessingInstruction): SaxEventsProducer = {
{ (handler: ContentHandler) =>
handler.processingInstruction(processingInstruction.target, processingInstruction.data)
}
}
/**
* Converts a yaidom `Comment` to a `SaxEventsProducer`.
*/
final def convertComment(comment: Comment): SaxEventsProducer = {
{ (handler: ContentHandler) =>
handler match {
case handler: ContentHandler with LexicalHandler =>
handler.comment(comment.text.toCharArray, 0, comment.text.length)
case _ => ()
}
}
}
private def generateStartElementEvent(elm: Elem, handler: ContentHandler, parentScope: Scope): Unit = {
val uri = elm.resolvedName.namespaceUriOption.getOrElse("")
val attrs: Attributes = getAttributes(elm)
handler.startElement(uri, elm.localName, elm.qname.toString, attrs)
}
private def generateEndElementEvent(elm: Elem, handler: ContentHandler, parentScope: Scope): Unit = {
val uri = elm.resolvedName.namespaceUriOption.getOrElse("")
handler.endElement(uri, elm.localName, elm.qname.toString)
}
private def getAttributes(elm: Elem): Attributes = {
val attrs = new AttributesImpl
addNormalAttributes(elm, attrs)
attrs
}
/**
* Gets the normal (non-namespace-declaration) attributes, and adds them to the passed Attributes object.
* This method is called internally, providing the attributes that are passed to the startElement call.
*/
final def addNormalAttributes(elm: Elem, attrs: AttributesImpl): Attributes = {
val attrScope = elm.attributeScope
for ((attrQName, attrValue) <- elm.attributes) {
val attrEName = attrScope.resolveQNameOption(attrQName).getOrElse(sys.error(s"Corrupt non-resolvable attribute: $attrQName"))
val uri = attrEName.namespaceUriOption.getOrElse("")
val tpe = "CDATA"
attrs.addAttribute(uri, attrQName.localPart, attrQName.toString, tpe, attrValue)
}
attrs
}
/**
* Gets the namespace-declaration attributes, and adds them to the passed Attributes object.
* This method is not called internally.
*/
final def addNamespaceDeclarationAttributes(elm: Elem, parentScope: Scope, attrs: AttributesImpl): Attributes = {
val namespaces: Declarations = parentScope.relativize(elm.scope)
val namespacesMap = namespaces.prefixNamespaceMap
val tpe = "CDATA"
val xmlNs = "http://www.w3.org/XML/1998/namespace"
for ((prefix, nsUri) <- namespacesMap) {
if (prefix == "") {
attrs.addAttribute(xmlNs, "xmlns", "xmlns", tpe, nsUri)
} else {
val qname = s"xmlns:$prefix"
attrs.addAttribute(xmlNs, prefix, qname, tpe, nsUri)
}
}
attrs
}
}
object YaidomToSaxEventsConversions {
/** Producer of SAX events, given a `ContentHandler` on which the SAX event handlers are invoked */
type SaxEventsProducer = (ContentHandler => Unit)
}
| EBPI/yaidom | src/main/scala/nl/ebpi/yaidom/convert/YaidomToSaxEventsConversions.scala | Scala | apache-2.0 | 7,458 |
package org.pgscala.embedded
import java.io.File
import java.net.{HttpURLConnection, URL}
import java.security.MessageDigest
import java.text.NumberFormat
import java.util.regex.Pattern
import com.typesafe.scalalogging.StrictLogging
import org.apache.commons.io.FileUtils
import org.pgscala.embedded.Util._
import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration.Duration.Inf
import scala.util.Try
object PostgresSizeUpdater extends StrictLogging {
case class DownloadAttributes(version: PostgresVersion, variant: Int, os: OS, size: Long, sha256: Array[Byte])
private[this] val resolver = new PostgresVersionSpec()
private[this] def makePostgresDownload(_ver: PostgresVersion, _os: OS): PostgresDownloadBase = try {
PostgresDownload(_ver, _os)
} catch {
case _: Throwable => new {
val version = _ver
val os = _os
val url = {
val resolved = resolver.resolveActual(_ver, _os)
logger.info(s"Could not retrieve (${_ver}, ${_os}) from metadata, downloading: $resolved ...")
resolved
}
val variant = {(
Pattern.quote(s"postgresql-${_ver}-")
+ "(\\\\d+)"
+ Pattern.quote(s"-${os.name.classifier}${os.architecture.classifier}-binaries.${os.name.archiveMode}"))
.r.findFirstMatchIn(url)
.getOrElse(sys.error(s"Could not decode variant from url: $url"))
.group(1).toInt
}
val (size, sha256) = {
val buffer = new Array[Byte](65536)
val is = new URL(url).openStream()
try {
val md = MessageDigest.getInstance("SHA-256")
var length = 0L
while ({
val read = is.read(buffer)
if (read != -1) {
md.update(buffer, 0, read)
length += read
true
} else {
false
}
}) {}
(length, md.digest())
} finally {
is.close()
}
}
} with PostgresDownloadBase
}
implicit val executionContext = EmbeddedSpec.executionContext
lazy val downloadAttributes = Await.result(Future.sequence(for {
ver <- PostgresVersion.values.take(7)
os <- OS.values
} yield Future {
val download = makePostgresDownload(ver, os)
val url = download.downloadUrl
val conn = new java.net.URL(url).openConnection().asInstanceOf[HttpURLConnection]
conn.setRequestMethod("HEAD")
try {
val is = conn.getInputStream()
try {
val size = conn.getHeaderField("Content-Length").toLong
val sha256 = download.sha256
val nf = NumberFormat.getInstance()
logger.debug(s"Retrieved attributes for $ver-${download.variant} on OS $os - size: ${nf.format(size)} - sha256: ${bin2Hex(sha256)}")
DownloadAttributes(ver, download.variant, os, size, sha256)
} finally {
is.close()
}
} finally {
conn.disconnect()
}
}), Inf)
def main(args: Array[String]): Unit = {
val file = new File(EmbeddedSpec.projectRoot, "src/main/resources/org/pgscala/embedded/version-metadata.txt")
val oldBody = Try { FileUtils.readFileToString(file, "UTF-8") } getOrElse ""
val sb = new StringBuilder
for (DownloadAttributes(ver, variant, os, size, sha256) <- downloadAttributes) {
((sb ++= ver.toString += '-') append variant += ';' ++= os.toString += ';') append size += ';' ++= bin2Hex(sha256) += '\\n'
}
val newBody = sb.toString
if (newBody != oldBody) {
logger.info("Updated version-metadata.txt in {}", file)
FileUtils.writeStringToFile(file, newBody, "UTF-8")
} else {
logger.debug("No need to update version-metadata.txt, as it contains the correct sizes")
}
EmbeddedSpec.shutdown()
}
}
| melezov/pgscala-embedded | src/test/scala/org/pgscala/embedded/PostgresSizeUpdater.scala | Scala | mit | 3,778 |
package io.github.binaryfoo.lagotto
import org.joda.time.DateTime
import scala.collection.AbstractIterator
case class PivotedLogEntry(row: Map[String, String]) extends LogEntry {
override def timestamp: DateTime = null
override def source: SourceRef = null
override def exportAsSeq: Seq[(String, String)] = row.toSeq
override def lines: String = ""
override def apply(id: String): String = row(id)
}
/**
* A single value for one of the N rows included in a single PivotedLogEntry.
*/
case class PivotedValue(field: String, value: String) extends LogEntry {
override def timestamp: DateTime = null
override def source: SourceRef = null
override def exportAsSeq: Seq[(String, String)] = null
override def lines: String = ""
override def apply(id: String): String = {
if (field != id) {
throw new IllegalArgumentException(s"Wrong value being queried $id != $field")
}
value
}
}
/**
* Supports a use case like:
*
* time(HH:mm),pivot(mti),count
*
* Instead of getting each time,mti,count pair in the vertical you get something like:
*
* time(HH:mm),0200 - count,0210 - count,0400 - count, 0410 - count
* 13:59,10,9,1,1
*
* Without the pivot you'd have:
*
* time(HH:mm),mti,count
* 13:59,0200,10
* 13:59,0210,9
* 13:59,0400,1
* 13:59,0410,1
*/
class PivotedIterator(val rotateOn: DirectExpr, val pivot: PivotExpr, val pivoted: Seq[FieldExpr], val entries: Iterator[LogEntry]) extends AbstractIterator[PivotedLogEntry] {
private val (aggregateOfPivot, toPivot) = pivoted.partition {
case a@AggregateExpr(_, _) => a.expr.exists(_.isInstanceOf[PivotResultExpr])
case _ => false
}
private val aggregateOps: Seq[PivotAggregate] = extractAggregateOps
private val (pivotedFields, pivotedLookup) = pivotResultFields
val fields: Seq[String] = Seq(rotateOn.field) ++ pivotedFields ++ aggregateOfPivot.map(_.field)
// value of pivot expr
private var currentKey: String = null
// pivot expr -> values of toPivot
// will be flattened into a single row
private var current: Map[String, Seq[String]] = Map.empty
def readNext(): PivotedLogEntry = {
for (e <- entries) {
val thisKey = rotateOn(e)
val row = if (thisKey != currentKey) outputRow() else null
currentKey = thisKey
current = current.updated(pivot(e), toPivot.map(_(e)))
if (row != null)
return row
}
outputRow()
}
def outputRow() = {
if (currentKey != null) {
val pivotedRow = fields.zip(Seq(currentKey) ++ pivot.distinctValues().flatMap { v =>
current.getOrElse(v, toPivot.map(_ => "0"))
}).toMap
val rowAggregates = aggregateOps.map { case PivotAggregate(resultName, field, op) =>
val a = op.copy()
pivotedLookup(field).foreach(name => a += PivotedValue(field, pivotedRow(name)))
(resultName, a.result())
}
current = Map.empty
new PivotedLogEntry(pivotedRow ++ rowAggregates)
} else {
null
}
}
override def hasNext: Boolean = entries.hasNext || current.nonEmpty
override def next(): PivotedLogEntry = readNext()
private def extractAggregateOps: Seq[PivotAggregate] = {
aggregateOfPivot.map {
case AggregateExpr(resultName, op) =>
op match {
case o: FieldBasedAggregateOp =>
val pivotedField = o.expr.asInstanceOf[PivotResultExpr].pivotedField
if (!toPivot.exists(_.field == pivotedField))
throw new IAmSorryDave(s"$pivotedField must be in the field list to calculate ${o.field}")
PivotAggregate(resultName, pivotedField, op)
case CountIfBuilder(condition@FieldFilterOn(expr)) =>
val pivotedField = expr.asInstanceOf[PivotResultExpr].pivotedField
if (!toPivot.exists(_.field == pivotedField))
throw new IAmSorryDave(s"$pivotedField must be in the field list to calculate count(if($condition))")
PivotAggregate(resultName, pivotedField, op)
}
case x => throw new IAmSorryDave(s"Can't process $x")
}
}
private def pivotResultFields: (Seq[String], Map[String, Seq[String]]) = {
val pivoted: Seq[(String, String)] = pivot.distinctValues().flatMap(v => toPivot.map(p => p.field -> (v + " - " + p.field)))
val pivotResultFields = pivoted.map(_._2)
val pivotResultLookup = pivoted.groupBy(_._1).mapValues(_.map(_._2))
(pivotResultFields, pivotResultLookup)
}
}
case class PivotAggregate(resultName: String, pivotField: String, op: AggregateOp)
| binaryfoo/lagotto | src/main/scala/io/github/binaryfoo/lagotto/PivotedLogEntry.scala | Scala | mit | 4,524 |
package io.iohk.ethereum.utils
import java.math.BigInteger
import java.nio.{ByteBuffer, ByteOrder}
import akka.util.ByteString
import scala.util.Random
object ByteUtils {
/**
* Calculates number of matching bytes from the beginning of both arrays.
* Due to performance reasons needs to be as fast as possible which means usage of while loops and var's.
*
* @param a - first array of bytes to check
* @param b - second array to bytes to check
* @return Length of common prefix shared by both arrays
*/
def matchingLength(a: Array[Byte], b: Array[Byte]): Int = {
var prefixLen = 0
while (prefixLen < a.length && prefixLen < b.length && a(prefixLen) == b(prefixLen)) {
prefixLen = prefixLen + 1
}
prefixLen
}
def bigIntegerToBytes(b: BigInteger, numBytes: Int): Array[Byte] = {
val bytes = new Array[Byte](numBytes)
val biBytes = b.toByteArray
val start = if (biBytes.length == numBytes + 1) 1 else 0
val length = Math.min(biBytes.length, numBytes)
System.arraycopy(biBytes, start, bytes, numBytes - length, length)
bytes
}
def bigIntToBytes(b: BigInt, numBytes: Int): Array[Byte] =
bigIntegerToBytes(b.bigInteger, numBytes)
def toBigInt(bytes: ByteString): BigInt =
bytes.foldLeft(BigInt(0)) { (n, b) => (n << 8) + (b & 0xff) }
/**
* Calculates xor distance between two byte arrays. Due to performance reasons needs to be as fast as possible
* which means usage of while loops and var's.
*
* @param a - array of bytes to xor
* @param b - array of bytes to xor
* @return Array[Byte] - each element of array is equal to `(a(i) ^ b(i))`
*/
def xor(a: Array[Byte], b: Array[Byte]): Array[Byte] = {
val ret = new Array[Byte](a.length)
var i = 0
while (i < a.length) {
ret(i) = (a(i) ^ b(i)).toByte
i += 1
}
ret
}
def or(arrays: Array[Byte]*): Array[Byte] = {
require(arrays.map(_.length).distinct.length <= 1, "All the arrays should have the same length")
require(arrays.nonEmpty, "There should be one or more arrays")
val zeroes = Array.fill(arrays.head.length)(0.toByte)
arrays.foldLeft[Array[Byte]](zeroes) { case (prevOr, array) =>
prevOr.zip(array).map { case (b1, b2) => (b1 | b2).toByte }
}
}
def and(arrays: Array[Byte]*): Array[Byte] = {
require(arrays.map(_.length).distinct.length <= 1, "All the arrays should have the same length")
require(arrays.nonEmpty, "There should be one or more arrays")
val ones = Array.fill(arrays.head.length)(0xff.toByte)
arrays.foldLeft[Array[Byte]](ones) { case (prevOr, array) =>
prevOr.zip(array).map { case (b1, b2) => (b1 & b2).toByte }
}
}
def randomBytes(len: Int): Array[Byte] = {
val arr = new Array[Byte](len)
new Random().nextBytes(arr)
arr
}
def bigEndianToShort(bs: Array[Byte]): Short = {
val n = bs(0) << 8
(n | bs(1) & 0xff).toShort
}
def padLeft(bytes: ByteString, length: Int, byte: Byte = 0): ByteString = {
val l = math.max(0, length - bytes.length)
val fill = Seq.fill[Byte](l)(byte)
fill ++: bytes
}
def compactPickledBytesToArray(buffer: ByteBuffer): Array[Byte] = {
val data = Array.ofDim[Byte](buffer.limit())
buffer.rewind()
buffer.get(data)
data
}
def compactPickledBytes(buffer: ByteBuffer): ByteString = {
ByteString(compactPickledBytesToArray(buffer))
}
def byteSequenceToBuffer(bytes: IndexedSeq[Byte]): ByteBuffer =
ByteBuffer.wrap(bytes.toArray)
def bytesToInts(bytes: Array[Byte], bigEndian: Boolean): Array[Int] = {
val ret = new Array[Int](bytes.length / 4)
bytesToIntsMut(bytes, ret, bigEndian)
ret
}
def intsToBytes(ints: Array[Int], bigEndian: Boolean): Array[Byte] = {
val ret = new Array[Byte](ints.length * 4)
intsToBytesMut(ints, ret, bigEndian)
ret
}
def getIntFromWord(arr: Array[Byte]): Int = {
ByteBuffer.wrap(arr, 0, 4).order(ByteOrder.LITTLE_ENDIAN).getInt
}
/**
* Converts array of Int to corresponding array of bytes. Due to performance reasons needs to be as fast as possible
* which means usage of while loops and var's.
*
* @param arr - array of int's to convert
* @param b - array for resulting byte conversion. It will be mutated in place, and it's length needs to be equal to
* `(arr.length * 4)`
* @param bigEndian - param specifying which int representation should be used.
* @return Unit
*/
def intsToBytesMut(arr: Array[Int], b: Array[Byte], bigEndian: Boolean) {
if (!bigEndian) {
var off = 0
var i = 0
while (i < arr.length) {
val ii = arr(i)
b(off) = (ii & 0xff).toByte
off += 1
b(off) = ((ii >> 8) & 0xff).toByte
off += 1
b(off) = ((ii >> 16) & 0xff).toByte
off += 1
b(off) = ((ii >> 24) & 0xff).toByte
off += 1
i = i + 1
}
} else {
var off = 0
var i = 0
while (i < arr.length) {
val ii = arr(i)
b(off) = ((ii >> 24) & 0xff).toByte
off += 1
b(off) = ((ii >> 16) & 0xff).toByte
off += 1
b(off) = ((ii >> 8) & 0xff).toByte
off += 1
b(off) = (ii & 0xff).toByte
off += 1
i = i + 1
}
}
}
/**
* Converts array of bytes to corresponding array of ints. Due to performance reasons needs to be as fast as possible
* which means usage of while loops and var's.
*
* @param b - array of bytes to convert
* @param arr - array for resulting int conversion. It will be mutated in place, and it's length needs to be equal to
* `(b.length / 4)`
* @param bigEndian - param specifying which int representation should be used.
* @return Unit
*/
def bytesToIntsMut(b: Array[Byte], arr: Array[Int], bigEndian: Boolean) {
if (!bigEndian) {
var off = 0
var i = 0
while (i < arr.length) {
var ii: Int = b(off) & 0x000000ff
off += 1
ii |= (b(off) << 8) & 0x0000ff00
off += 1
ii |= (b(off) << 16) & 0x00ff0000
off += 1
ii |= (b(off) << 24)
off += 1
arr(i) = ii
i = i + 1
}
} else {
var off = 0
var i = 0
while (i < arr.length) {
var ii: Int = b(off) << 24
off += 1
ii |= (b(off) << 16) & 0x00ff0000
off += 1
ii |= (b(off) << 8) & 0x0000ff00
off += 1
ii |= b(off) & 0x000000ff
off += 1
arr(i) = ii
i = i + 1
}
}
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/utils/ByteUtils.scala | Scala | mit | 6,606 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.concurrent._
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import scala.util.DynamicVariable
import org.apache.spark.SparkContext
import org.apache.spark.internal.config._
import org.apache.spark.util.Utils
/**
* Asynchronously passes SparkListenerEvents to registered SparkListeners.
*
* Until `start()` is called, all posted events are only buffered. Only after this listener bus
* has started will events be actually propagated to all attached listeners. This listener bus
* is stopped when `stop()` is called, and it will drop further events after stopping.
*/
private[spark] class LiveListenerBus(val sparkContext: SparkContext) extends SparkListenerBus {
self =>
import LiveListenerBus._
// Cap the capacity of the event queue so we get an explicit error (rather than
// an OOM exception) if it's perpetually being added to more quickly than it's being drained.
private lazy val eventQueue = new LinkedBlockingQueue[SparkListenerEvent](
sparkContext.conf.get(LISTENER_BUS_EVENT_QUEUE_CAPACITY))
// Indicate if `start()` is called
private val started = new AtomicBoolean(false)
// Indicate if `stop()` is called
private val stopped = new AtomicBoolean(false)
/** A counter for dropped events. It will be reset every time we log it. */
private val droppedEventsCounter = new AtomicLong(0L)
/** When `droppedEventsCounter` was logged last time in milliseconds. */
@volatile private var lastReportTimestamp = 0L
// Indicate if we are processing some event
// Guarded by `self`
private var processingEvent = false
private val logDroppedEvent = new AtomicBoolean(false)
// A counter that represents the number of events produced and consumed in the queue
private val eventLock = new Semaphore(0)
private val listenerThread = new Thread(name) {
setDaemon(true)
override def run(): Unit = Utils.tryOrStopSparkContext(sparkContext) {
LiveListenerBus.withinListenerThread.withValue(true) {
while (true) {
eventLock.acquire()
self.synchronized {
processingEvent = true
}
try {
val event = eventQueue.poll
if (event == null) {
// Get out of the while loop and shutdown the daemon thread
if (!stopped.get) {
throw new IllegalStateException("Polling `null` from eventQueue means" +
" the listener bus has been stopped. So `stopped` must be true")
}
return
}
postToAll(event)
} finally {
self.synchronized {
processingEvent = false
}
}
}
}
}
}
/**
* Start sending events to attached listeners.
*
* This first sends out all buffered events posted before this listener bus has started, then
* listens for any additional events asynchronously while the listener bus is still running.
* This should only be called once.
*
*/
def start(): Unit = {
if (started.compareAndSet(false, true)) {
listenerThread.start()
} else {
throw new IllegalStateException(s"$name already started!")
}
}
def post(event: SparkListenerEvent): Unit = {
if (stopped.get) {
// Drop further events to make `listenerThread` exit ASAP
logError(s"$name has already stopped! Dropping event $event")
return
}
val eventAdded = eventQueue.offer(event)
if (eventAdded) {
eventLock.release()
} else {
onDropEvent(event)
droppedEventsCounter.incrementAndGet()
}
val droppedEvents = droppedEventsCounter.get
if (droppedEvents > 0) {
// Don't log too frequently
if (System.currentTimeMillis() - lastReportTimestamp >= 60 * 1000) {
// There may be multiple threads trying to decrease droppedEventsCounter.
// Use "compareAndSet" to make sure only one thread can win.
// And if another thread is increasing droppedEventsCounter, "compareAndSet" will fail and
// then that thread will update it.
if (droppedEventsCounter.compareAndSet(droppedEvents, 0)) {
val prevLastReportTimestamp = lastReportTimestamp
lastReportTimestamp = System.currentTimeMillis()
logWarning(s"Dropped $droppedEvents SparkListenerEvents since " +
new java.util.Date(prevLastReportTimestamp))
}
}
}
}
/**
* For testing only. Wait until there are no more events in the queue, or until the specified
* time has elapsed. Throw `TimeoutException` if the specified time elapsed before the queue
* emptied.
* Exposed for testing.
*/
@throws(classOf[TimeoutException])
def waitUntilEmpty(timeoutMillis: Long): Unit = {
val finishTime = System.currentTimeMillis + timeoutMillis
while (!queueIsEmpty) {
if (System.currentTimeMillis > finishTime) {
throw new TimeoutException(
s"The event queue is not empty after $timeoutMillis milliseconds")
}
/* Sleep rather than using wait/notify, because this is used only for testing and
* wait/notify add overhead in the general case. */
Thread.sleep(10)
}
}
/**
* For testing only. Return whether the listener daemon thread is still alive.
* Exposed for testing.
*/
def listenerThreadIsAlive: Boolean = listenerThread.isAlive
/**
* Return whether the event queue is empty.
*
* The use of synchronized here guarantees that all events that once belonged to this queue
* have already been processed by all attached listeners, if this returns true.
*/
private def queueIsEmpty: Boolean = synchronized { eventQueue.isEmpty && !processingEvent }
/**
* Stop the listener bus. It will wait until the queued events have been processed, but drop the
* new events after stopping.
*/
def stop(): Unit = {
if (!started.get()) {
throw new IllegalStateException(s"Attempted to stop $name that has not yet started!")
}
if (stopped.compareAndSet(false, true)) {
// Call eventLock.release() so that listenerThread will poll `null` from `eventQueue` and know
// `stop` is called.
eventLock.release()
listenerThread.join()
} else {
// Keep quiet
}
}
/**
* If the event queue exceeds its capacity, the new events will be dropped. The subclasses will be
* notified with the dropped events.
*
* Note: `onDropEvent` can be called in any thread.
*/
def onDropEvent(event: SparkListenerEvent): Unit = {
if (logDroppedEvent.compareAndSet(false, true)) {
// Only log the following message once to avoid duplicated annoying logs.
logError("Dropping SparkListenerEvent because no remaining room in event queue. " +
"This likely means one of the SparkListeners is too slow and cannot keep up with " +
"the rate at which tasks are being started by the scheduler.")
}
}
}
private[spark] object LiveListenerBus {
// Allows for Context to check whether stop() call is made within listener thread
val withinListenerThread: DynamicVariable[Boolean] = new DynamicVariable[Boolean](false)
/** The thread name of Spark listener bus */
val name = "SparkListenerBus"
}
| mzl9039/spark | core/src/main/scala/org/apache/spark/scheduler/LiveListenerBus.scala | Scala | apache-2.0 | 8,100 |
package clasp.core.sdktools
import scala.language.postfixOps
import scala.concurrent.duration._
import scala.sys.process._
import scala.util.matching.Regex
import org.slf4j.LoggerFactory
object Command {
lazy val log = LoggerFactory.getLogger(getClass())
import log.{error, debug, info, trace}
def run(command: String, timeout: FiniteDuration = 0 seconds,
killSignal: Int = 9): Option[String] = {
debug(s"Executing '$command'")
val out = new StringBuilder
val logger = ProcessLogger(
(o: String) => out.append(o),
(e: String) => out.append(e))
val ret = if (timeout.toSeconds != 0) {
s"timeout -s $killSignal ${timeout.toSeconds} $command" ! logger
} else {
command ! logger
}
debug(s"Finished executing'$command'")
debug(s"Output: ${out.toString}")
if (ret != 0) {
return None
} else {
return Some(out.toString)
}
}
def runSeq(command: Seq[String], timeout: FiniteDuration = 0 seconds,
killSignal: Int = 9): Option[String] = {
run(command.mkString(" "), timeout, killSignal)
}
def runAndParse(command: String, regex: Regex,
timeout: FiniteDuration = 0 seconds, killSignal: Int = 9):
Option[Vector[String]] = {
val res = run(command, timeout, killSignal)
res.map((s: String) => {
val r = for (regex(name) <- regex.findAllIn(s)) yield name
r.toVector
})
}
def runSeqAndParse(command: Seq[String], regex: Regex,
timeout: FiniteDuration = 0 seconds, killSignal: Int = 9):
Option[Vector[String]] = {
runAndParse(command.mkString(" "), regex, timeout, killSignal)
}
}
| hamiltont/clasp | src/clasp/core/sdktools/command.scala | Scala | mit | 1,647 |
package org.scalawiki.copyvio
import org.scalawiki.dto.Page
import org.scalawiki.dto.cmd.Action
import org.scalawiki.dto.cmd.query.prop._
import org.scalawiki.dto.cmd.query.{PageIdsParam, Query}
import org.scalawiki.http.HttpClient
import org.scalawiki.query.QueryLibrary
import org.scalawiki.{MwBot, WithBot}
import play.api.libs.functional.syntax._
import play.api.libs.json.{Json, _}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class CopyVio(val http: HttpClient) {
implicit val sourceReads =
((__ \\ "url").read[String] ~
(__ \\ "confidence").read[Double] ~
(__ \\ "violation").read[String] ~
(__ \\ "skipped").read[Boolean]) (CopyVioSource.apply _)
val sourcesReads = (__ \\ "sources").read[Seq[CopyVioSource]]
def baseUrl(project: String = "wikipedia", lang: String = "uk") =
s"https://tools.wmflabs.org/copyvios/api.json?version=1&action=search&project=$project&lang=$lang"
def search(title: String, lang: String = "uk", project: String = "wikipedia") =
http.get(baseUrl(project, lang) + s"&title=$title") map parseResponse
def searchByRevId(revId: Long, lang: String = "uk", project: String = "wikipedia") =
http.get(baseUrl(project, lang) + s"&oldid=$revId") map parseResponse
def searchByPage(page: Page) = {
println(s"# [[${page.title}]]")
searchByRevId(page.revisions.head.revId.get)
}
def parseResponse(body: String) = Json.parse(body).validate(sourcesReads).get
}
object CopyVio extends WithBot with QueryLibrary {
val host = MwBot.ukWiki
def pagesByIds(ids: Seq[Long]): Future[Seq[Page]] = {
import org.scalawiki.dto.cmd.query.prop.rvprop._
val action = Action(Query(PageIdsParam(ids), Prop(Info(), Revisions(RvProp(Ids) /*,RvLimit("max")*/))))
bot.run(action)
}
def main(args: Array[String]) {
val copyVio = new CopyVio(HttpClient.get())
for (revIds <- articlesWithTemplate("ΠΡΠΊΡΠΏΠ΅Π΄ΡΡ Π»ΡΠ±ΠΈΡΡ ΠΏΠ°ΠΌ'ΡΡΠΊΠΈ");
pages <- pagesByIds(revIds);
page <- pages;
sources <- copyVio.searchByPage(page);
suspected <- sources.filter(_.isPossible)) {
println(s"## url: [${suspected.url}], violation ${suspected.violation}, confidence ${suspected.confidence}")
}
}
} | intracer/scalawiki | scalawiki-bots/src/main/scala/org/scalawiki/copyvio/CopyVio.scala | Scala | apache-2.0 | 2,276 |
package entities
trait BaseEntity {
val id: Long
def isValid: Boolean = true
} | Kanris826/spray-slick-swagger | src/main/scala/entities/BaseEntity.scala | Scala | apache-2.0 | 84 |
package net.mentalarray.doozie.Tasks
/**
* Created by bwilson on 12/16/14.
*/
class FileBuilderTask(Jobname: String) extends WorkflowTask(Jobname) {
// Set defaults and accessors for the Builder Runner.
private var _srcSys: String = "hdfs"
private var _destSys: String = "hdfs"
private var _inPath: String = ""
private var _outPath: String = ""
private var _srcCheckDel: Boolean = false
private var _stringAdd: String = ""
// Getters
def inPath: String = {
_inPath
}
def outPath: String = {
_outPath
}
def srcCheckDel: Boolean = {
_srcCheckDel
}
def stringAdd: String = {
_stringAdd
}
def srcSys: String = {
_srcSys
}
def destSys: String = {
_destSys
}
// Setters
def inPath(inPath: => String): FileBuilderTask = {
_inPath = inPath
this
}
def outPath(outPath: => String): FileBuilderTask = {
_outPath = outPath
this
}
def srcCheckDel(delSrc: => Boolean): FileBuilderTask = {
_srcCheckDel = srcCheckDel
this
}
def stringAdd(stringAdd: => String): FileBuilderTask = {
_stringAdd = stringAdd
this
}
def srcSys(srcSys: => String): FileBuilderTask = {
_srcSys = srcSys
this
}
def destSys(destSys: => String): FileBuilderTask = {
_destSys = destSys
this
}
// Perform validation of all input parameters.
override def validate = {
//Verify that input path has been supplied.
if (inPath.isNullOrWhitespace)
throw new WorkflowStateException(this, "Input directory path must be specified.")
//Verify that the target output path has been supplied.
if (outPath.isNullOrWhitespace)
throw new WorkflowStateException(this, "Output directory path must be specified.")
}
}
object FileBuilderTask {
def apply(Jobname: String)(cfgFn: FileBuilderTask => Unit): FileBuilderTask = {
val state = new FileBuilderTask(Jobname)
cfgFn(state)
state
}
} | antagonist112358/tomahawk | workflow-engine/src/net/mentalarray/doozie/Tasks/FileBuilderTask.scala | Scala | apache-2.0 | 1,937 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{FileSystem => _, _}
import java.net.{InetAddress, UnknownHostException, URI}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{Locale, Properties, UUID}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.immutable.{Map => IMap}
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map}
import scala.util.control.NonFatal
import com.google.common.base.Objects
import com.google.common.io.Files
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier
import org.apache.hadoop.yarn.util.Records
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.api.python.PythonUtils
import org.apache.spark.deploy.{SparkApplication, SparkHadoopUtil}
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.deploy.yarn.ResourceRequestHelper._
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Python._
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils}
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.util.{CallerContext, Utils, YarnContainerInfoHelper}
private[spark] class Client(
val args: ClientArguments,
val sparkConf: SparkConf,
val rpcEnv: RpcEnv)
extends Logging {
import Client._
import YarnSparkHadoopUtil._
private val yarnClient = YarnClient.createYarnClient
private val hadoopConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
private val isClusterMode = sparkConf.get(SUBMIT_DEPLOY_MODE) == "cluster"
private val isClientUnmanagedAMEnabled = sparkConf.get(YARN_UNMANAGED_AM) && !isClusterMode
private var appMaster: ApplicationMaster = _
private var stagingDirPath: Path = _
// AM related configurations
private val amMemory = if (isClusterMode) {
sparkConf.get(DRIVER_MEMORY).toInt
} else {
sparkConf.get(AM_MEMORY).toInt
}
private val amMemoryOverhead = {
val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD
sparkConf.get(amMemoryOverheadEntry).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong,
ResourceProfile.MEMORY_OVERHEAD_MIN_MIB)).toInt
}
private val amCores = if (isClusterMode) {
sparkConf.get(DRIVER_CORES)
} else {
sparkConf.get(AM_CORES)
}
// Executor related configurations
private val executorMemory = sparkConf.get(EXECUTOR_MEMORY)
// Executor offHeap memory in MiB.
protected val executorOffHeapMemory = Utils.executorOffHeapMemorySizeAsMb(sparkConf)
private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong,
ResourceProfile.MEMORY_OVERHEAD_MIN_MIB)).toInt
private val isPython = sparkConf.get(IS_PYTHON_APP)
private val pysparkWorkerMemory: Int = if (isPython) {
sparkConf.get(PYSPARK_EXECUTOR_MEMORY).map(_.toInt).getOrElse(0)
} else {
0
}
private val distCacheMgr = new ClientDistributedCacheManager()
private val cachedResourcesConf = new SparkConf(false)
private val keytab = sparkConf.get(KEYTAB).orNull
private val amKeytabFileName: Option[String] = if (keytab != null && isClusterMode) {
val principal = sparkConf.get(PRINCIPAL).orNull
require((principal == null) == (keytab == null),
"Both principal and keytab must be defined, or neither.")
logInfo(s"Kerberos credentials: principal = $principal, keytab = $keytab")
// Generate a file name that can be used for the keytab file, that does not conflict
// with any user file.
Some(new File(keytab).getName() + "-" + UUID.randomUUID().toString)
} else {
None
}
require(keytab == null || !Utils.isLocalUri(keytab), "Keytab should reference a local file.")
private val launcherBackend = new LauncherBackend() {
override protected def conf: SparkConf = sparkConf
override def onStopRequest(): Unit = {
if (isClusterMode && appId != null) {
yarnClient.killApplication(appId)
} else {
setState(SparkAppHandle.State.KILLED)
stop()
}
}
}
private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION)
private var appId: ApplicationId = null
def reportLauncherState(state: SparkAppHandle.State): Unit = {
launcherBackend.setState(state)
}
def stop(): Unit = {
if (appMaster != null) {
appMaster.stopUnmanaged(stagingDirPath)
}
launcherBackend.close()
yarnClient.stop()
}
/**
* Submit an application running our ApplicationMaster to the ResourceManager.
*
* The stable Yarn API provides a convenience method (YarnClient#createApplication) for
* creating applications and setting up the application submission context. This was not
* available in the alpha API.
*/
def submitApplication(): ApplicationId = {
ResourceRequestHelper.validateResources(sparkConf)
var appId: ApplicationId = null
try {
launcherBackend.connect()
yarnClient.init(hadoopConf)
yarnClient.start()
logInfo("Requesting a new application from cluster with %d NodeManagers"
.format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
// Get a new application from our RM
val newApp = yarnClient.createApplication()
val newAppResponse = newApp.getNewApplicationResponse()
appId = newAppResponse.getApplicationId()
// The app staging dir based on the STAGING_DIR configuration if configured
// otherwise based on the users home directory.
// scalastyle:off FileSystemGet
val appStagingBaseDir = sparkConf.get(STAGING_DIR)
.map { new Path(_, UserGroupInformation.getCurrentUser.getShortUserName) }
.getOrElse(FileSystem.get(hadoopConf).getHomeDirectory())
stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
// scalastyle:on FileSystemGet
new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT),
Option(appId.toString)).setCurrentContext()
// Verify whether the cluster has enough resources for our AM
verifyClusterResources(newAppResponse)
// Set up the appropriate contexts to launch our AM
val containerContext = createContainerLaunchContext(newAppResponse)
val appContext = createApplicationSubmissionContext(newApp, containerContext)
// Finally, submit and monitor the application
logInfo(s"Submitting application $appId to ResourceManager")
yarnClient.submitApplication(appContext)
launcherBackend.setAppId(appId.toString)
reportLauncherState(SparkAppHandle.State.SUBMITTED)
appId
} catch {
case e: Throwable =>
if (stagingDirPath != null) {
cleanupStagingDir()
}
throw e
}
}
/**
* Cleanup application staging directory.
*/
private def cleanupStagingDir(): Unit = {
if (sparkConf.get(PRESERVE_STAGING_FILES)) {
return
}
def cleanupStagingDirInternal(): Unit = {
try {
val fs = stagingDirPath.getFileSystem(hadoopConf)
if (fs.delete(stagingDirPath, true)) {
logInfo(s"Deleted staging directory $stagingDirPath")
}
} catch {
case ioe: IOException =>
logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
cleanupStagingDirInternal()
}
/**
* Set up the context for submitting our ApplicationMaster.
* This uses the YarnClientApplication not available in the Yarn alpha API.
*/
def createApplicationSubmissionContext(
newApp: YarnClientApplication,
containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
val componentName = if (isClusterMode) {
config.YARN_DRIVER_RESOURCE_TYPES_PREFIX
} else {
config.YARN_AM_RESOURCE_TYPES_PREFIX
}
val yarnAMResources = getYarnResourcesAndAmounts(sparkConf, componentName)
val amResources = yarnAMResources ++
getYarnResourcesFromSparkResources(SPARK_DRIVER_PREFIX, sparkConf)
logDebug(s"AM resources: $amResources")
val appContext = newApp.getApplicationSubmissionContext
appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark"))
appContext.setQueue(sparkConf.get(QUEUE_NAME))
appContext.setAMContainerSpec(containerContext)
appContext.setApplicationType(sparkConf.get(APPLICATION_TYPE))
sparkConf.get(APPLICATION_TAGS).foreach { tags =>
appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava))
}
sparkConf.get(MAX_APP_ATTEMPTS) match {
case Some(v) => appContext.setMaxAppAttempts(v)
case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " +
"Cluster's default value will be used.")
}
sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval =>
appContext.setAttemptFailuresValidityInterval(interval)
}
val capability = Records.newRecord(classOf[Resource])
capability.setMemory(amMemory + amMemoryOverhead)
capability.setVirtualCores(amCores)
if (amResources.nonEmpty) {
ResourceRequestHelper.setResourceRequests(amResources, capability)
}
logDebug(s"Created resource capability for AM request: $capability")
sparkConf.get(AM_NODE_LABEL_EXPRESSION) match {
case Some(expr) =>
val amRequest = Records.newRecord(classOf[ResourceRequest])
amRequest.setResourceName(ResourceRequest.ANY)
amRequest.setPriority(Priority.newInstance(0))
amRequest.setCapability(capability)
amRequest.setNumContainers(1)
amRequest.setNodeLabelExpression(expr)
appContext.setAMContainerResourceRequest(amRequest)
case None =>
appContext.setResource(capability)
}
sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern =>
try {
val logAggregationContext = Records.newRecord(classOf[LogAggregationContext])
logAggregationContext.setRolledLogsIncludePattern(includePattern)
sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern =>
logAggregationContext.setRolledLogsExcludePattern(excludePattern)
}
appContext.setLogAggregationContext(logAggregationContext)
} catch {
case NonFatal(e) =>
logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " +
"does not support it", e)
}
}
appContext.setUnmanagedAM(isClientUnmanagedAMEnabled)
sparkConf.get(APPLICATION_PRIORITY).foreach { appPriority =>
appContext.setPriority(Priority.newInstance(appPriority))
}
appContext
}
/**
* Set up security tokens for launching our ApplicationMaster container.
*
* In client mode, a set of credentials has been obtained by the scheduler, so they are copied
* and sent to the AM. In cluster mode, new credentials are obtained and then sent to the AM,
* along with whatever credentials the current user already has.
*/
private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
val currentUser = UserGroupInformation.getCurrentUser()
val credentials = currentUser.getCredentials()
if (isClusterMode) {
val credentialManager = new HadoopDelegationTokenManager(sparkConf, hadoopConf, null)
credentialManager.obtainDelegationTokens(credentials)
}
val serializedCreds = SparkHadoopUtil.get.serialize(credentials)
amContainer.setTokens(ByteBuffer.wrap(serializedCreds))
}
/** Get the application report from the ResourceManager for an application we have submitted. */
def getApplicationReport(appId: ApplicationId): ApplicationReport =
yarnClient.getApplicationReport(appId)
/**
* Return the security token used by this client to communicate with the ApplicationMaster.
* If no security is enabled, the token returned by the report is null.
*/
private def getClientToken(report: ApplicationReport): String =
Option(report.getClientToAMToken).map(_.toString).getOrElse("")
/**
* Fail fast if we have requested more resources per container than is available in the cluster.
*/
private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = {
val maxMem = newAppResponse.getMaximumResourceCapability().getMemory()
logInfo("Verifying our application has not requested more than the maximum " +
s"memory capability of the cluster ($maxMem MB per container)")
val executorMem =
executorMemory + executorOffHeapMemory + executorMemoryOverhead + pysparkWorkerMemory
if (executorMem > maxMem) {
throw new IllegalArgumentException(s"Required executor memory ($executorMemory MB), " +
s"offHeap memory ($executorOffHeapMemory) MB, overhead ($executorMemoryOverhead MB), " +
s"and PySpark memory ($pysparkWorkerMemory MB) is above the max threshold ($maxMem MB) " +
"of this cluster! Please check the values of 'yarn.scheduler.maximum-allocation-mb' " +
"and/or 'yarn.nodemanager.resource.memory-mb'.")
}
val amMem = amMemory + amMemoryOverhead
if (amMem > maxMem) {
throw new IllegalArgumentException(s"Required AM memory ($amMemory" +
s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " +
"'yarn.nodemanager.resource.memory-mb'.")
}
logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format(
amMem,
amMemoryOverhead))
// We could add checks to make sure the entire cluster has enough resources but that involves
// getting all the node reports and computing ourselves.
}
/**
* Copy the given file to a remote file system (e.g. HDFS) if needed.
* The file is only copied if the source and destination file systems are different or the source
* scheme is "file". This is used for preparing resources for launching the ApplicationMaster
* container. Exposed for testing.
*/
private[yarn] def copyFileToRemote(
destDir: Path,
srcPath: Path,
replication: Short,
symlinkCache: Map[URI, Path],
force: Boolean = false,
destName: Option[String] = None): Path = {
val destFs = destDir.getFileSystem(hadoopConf)
val srcFs = srcPath.getFileSystem(hadoopConf)
var destPath = srcPath
if (force || !compareFs(srcFs, destFs) || "file".equals(srcFs.getScheme)) {
destPath = new Path(destDir, destName.getOrElse(srcPath.getName()))
logInfo(s"Uploading resource $srcPath -> $destPath")
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf)
destFs.setReplication(destPath, replication)
destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION))
} else {
logInfo(s"Source and destination file systems are the same. Not copying $srcPath")
}
// Resolve any symlinks in the URI path so using a "current" symlink to point to a specific
// version shows the specific version in the distributed cache configuration
val qualifiedDestPath = destFs.makeQualified(destPath)
val qualifiedDestDir = qualifiedDestPath.getParent
val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), {
val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf)
fc.resolvePath(qualifiedDestDir)
})
new Path(resolvedDestDir, qualifiedDestPath.getName())
}
/**
* Upload any resources to the distributed cache if needed. If a resource is intended to be
* consumed locally, set up the appropriate config for downstream code to handle it properly.
* This is used for setting up a container launch context for our ApplicationMaster.
* Exposed for testing.
*/
def prepareLocalResources(
destDir: Path,
pySparkArchives: Seq[String]): HashMap[String, LocalResource] = {
logInfo("Preparing resources for our AM container")
// Upload Spark and the application JAR to the remote file system if necessary,
// and add them as local resources to the application master.
val fs = destDir.getFileSystem(hadoopConf)
// Used to keep track of URIs added to the distributed cache. If the same URI is added
// multiple times, YARN will fail to launch containers for the app with an internal
// error.
val distributedUris = new HashSet[String]
// Used to keep track of URIs(files) added to the distribute cache have the same name. If
// same name but different path files are added multiple time, YARN will fail to launch
// containers for the app with an internal error.
val distributedNames = new HashSet[String]
val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort)
.getOrElse(fs.getDefaultReplication(destDir))
val localResources = HashMap[String, LocalResource]()
FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION))
val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]()
val symlinkCache: Map[URI, Path] = HashMap[URI, Path]()
def addDistributedUri(uri: URI): Boolean = {
val uriStr = uri.toString()
val fileName = new File(uri.getPath).getName
if (distributedUris.contains(uriStr)) {
logWarning(s"Same path resource $uri added multiple times to distributed cache.")
false
} else if (distributedNames.contains(fileName)) {
logWarning(s"Same name resource $uri added multiple times to distributed cache")
false
} else {
distributedUris += uriStr
distributedNames += fileName
true
}
}
/*
* Distribute a file to the cluster.
*
* If the file's path is a "local:" URI, it's actually not distributed. Other files are copied
* to HDFS (if not already there) and added to the application's distributed cache.
*
* @param path URI of the file to distribute.
* @param resType Type of resource being distributed.
* @param destName Name of the file in the distributed cache.
* @param targetDir Subdirectory where to place the file.
* @param appMasterOnly Whether to distribute only to the AM.
* @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the
* localized path for non-local paths, or the input `path` for local paths.
* The localized path will be null if the URI has already been added to the cache.
*/
def distribute(
path: String,
resType: LocalResourceType = LocalResourceType.FILE,
destName: Option[String] = None,
targetDir: Option[String] = None,
appMasterOnly: Boolean = false): (Boolean, String) = {
val trimmedPath = path.trim()
val localURI = Utils.resolveURI(trimmedPath)
if (localURI.getScheme != Utils.LOCAL_SCHEME) {
if (addDistributedUri(localURI)) {
val localPath = getQualifiedLocalPath(localURI, hadoopConf)
val linkname = targetDir.map(_ + "/").getOrElse("") +
destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName())
val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache)
val destFs = FileSystem.get(destPath.toUri(), hadoopConf)
distCacheMgr.addResource(
destFs, hadoopConf, destPath, localResources, resType, linkname, statCache,
appMasterOnly = appMasterOnly)
(false, linkname)
} else {
(false, null)
}
} else {
(true, trimmedPath)
}
}
// If we passed in a keytab, make sure we copy the keytab to the staging directory on
// HDFS, and setup the relevant environment vars, so the AM can login again.
amKeytabFileName.foreach { kt =>
logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" +
" via the YARN Secure Distributed Cache.")
val (_, localizedPath) = distribute(keytab,
destName = Some(kt),
appMasterOnly = true)
require(localizedPath != null, "Keytab file already distributed.")
}
/**
* Add Spark to the cache. There are two settings that control what files to add to the cache:
* - if a Spark archive is defined, use the archive. The archive is expected to contain
* jar files at its root directory.
* - if a list of jars is provided, filter the non-local ones, resolve globs, and
* add the found files to the cache.
*
* Note that the archive cannot be a "local" URI. If none of the above settings are found,
* then upload all files found in $SPARK_HOME/jars.
*/
val sparkArchive = sparkConf.get(SPARK_ARCHIVE)
if (sparkArchive.isDefined) {
val archive = sparkArchive.get
require(!Utils.isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.")
distribute(Utils.resolveURI(archive).toString,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
} else {
sparkConf.get(SPARK_JARS) match {
case Some(jars) =>
// Break the list of jars to upload, and resolve globs.
val localJars = new ArrayBuffer[String]()
jars.foreach { jar =>
if (!Utils.isLocalUri(jar)) {
val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf)
val pathFs = FileSystem.get(path.toUri(), hadoopConf)
val fss = pathFs.globStatus(path)
if (fss == null) {
throw new FileNotFoundException(s"Path ${path.toString} does not exist")
}
fss.filter(_.isFile()).foreach { entry =>
val uri = entry.getPath().toUri()
statCache.update(uri, entry)
distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR))
}
} else {
localJars += jar
}
}
// Propagate the local URIs to the containers using the configuration.
sparkConf.set(SPARK_JARS, localJars.toSeq)
case None =>
// No configuration, so fall back to uploading local jar files.
logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " +
"to uploading libraries under SPARK_HOME.")
val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir(
sparkConf.getenv("SPARK_HOME")))
val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive))
try {
jarsStream.setLevel(0)
jarsDir.listFiles().foreach { f =>
if (f.isFile && f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) {
jarsStream.putNextEntry(new ZipEntry(f.getName))
Files.copy(f, jarsStream)
jarsStream.closeEntry()
}
}
} finally {
jarsStream.close()
}
distribute(jarsArchive.toURI.getPath,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
jarsArchive.delete()
}
}
/**
* Copy user jar to the distributed cache if their scheme is not "local".
* Otherwise, set the corresponding key in our SparkConf to handle it downstream.
*/
Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar =>
val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME))
if (isLocal) {
require(localizedPath != null, s"Path $jar already distributed")
// If the resource is intended for local use only, handle this downstream
// by setting the appropriate property
sparkConf.set(APP_JAR, localizedPath)
}
}
/**
* Do the same for any additional resources passed in through ClientArguments.
* Each resource category is represented by a 3-tuple of:
* (1) comma separated list of resources in this category,
* (2) resource type, and
* (3) whether to add these resources to the classpath
*/
val cachedSecondaryJarLinks = ListBuffer.empty[String]
List(
(sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true),
(sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false),
(sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false)
).foreach { case (flist, resType, addToClasspath) =>
flist.foreach { file =>
val (_, localizedPath) = distribute(file, resType = resType)
// If addToClassPath, we ignore adding jar multiple times to distributed cache.
if (addToClasspath) {
if (localizedPath != null) {
cachedSecondaryJarLinks += localizedPath
}
} else {
if (localizedPath == null) {
throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" +
" to the distributed cache.")
}
}
}
}
if (cachedSecondaryJarLinks.nonEmpty) {
sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks.toSeq)
}
if (isClusterMode && args.primaryPyFile != null) {
distribute(args.primaryPyFile, appMasterOnly = true)
}
pySparkArchives.foreach { f =>
val uri = Utils.resolveURI(f)
if (uri.getScheme != Utils.LOCAL_SCHEME) {
distribute(f)
}
}
// The python files list needs to be treated especially. All files that are not an
// archive need to be placed in a subdirectory that will be added to PYTHONPATH.
sparkConf.get(PY_FILES).foreach { f =>
val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None
distribute(f, targetDir = targetDir)
}
// Update the configuration with all the distributed files, minus the conf archive. The
// conf archive will be handled by the AM differently so that we avoid having to send
// this configuration by other means. See SPARK-14602 for one reason of why this is needed.
distCacheMgr.updateConfiguration(cachedResourcesConf)
// Upload the conf archive to HDFS manually, and record its location in the configuration.
// This will allow the AM to know where the conf archive is in HDFS, so that it can be
// distributed to the containers.
//
// This code forces the archive to be copied, so that unit tests pass (since in that case both
// file systems are the same and the archive wouldn't normally be copied). In most (all?)
// deployments, the archive would be copied anyway, since it's a temp file in the local file
// system.
val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE)
val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf)
cachedResourcesConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString())
val localConfArchive = new Path(createConfArchive().toURI())
copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true,
destName = Some(LOCALIZED_CONF_ARCHIVE))
// Manually add the config archive to the cache manager so that the AM is launched with
// the proper files set up.
distCacheMgr.addResource(
remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE,
LOCALIZED_CONF_DIR, statCache, appMasterOnly = false)
localResources
}
/**
* Create an archive with the config files for distribution.
*
* These will be used by AM and executors. The files are zipped and added to the job as an
* archive, so that YARN will explode it when distributing to AM and executors. This directory
* is then added to the classpath of AM and executor process, just to make sure that everybody
* is using the same default config.
*
* This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR
* shows up in the classpath before YARN_CONF_DIR.
*
* Currently this makes a shallow copy of the conf directory. If there are cases where a
* Hadoop config directory contains subdirectories, this code will have to be fixed.
*
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*/
private def createConfArchive(): File = {
val hadoopConfFiles = new HashMap[String, File]()
// SPARK_CONF_DIR shows up in the classpath before HADOOP_CONF_DIR/YARN_CONF_DIR
sys.env.get("SPARK_CONF_DIR").foreach { localConfDir =>
val dir = new File(localConfDir)
if (dir.isDirectory) {
val files = dir.listFiles(new FileFilter {
override def accept(pathname: File): Boolean = {
pathname.isFile && pathname.getName.endsWith(".xml")
}
})
files.foreach { f => hadoopConfFiles(f.getName) = f }
}
}
// SPARK-23630: during testing, Spark scripts filter out hadoop conf dirs so that user's
// environments do not interfere with tests. This allows a special env variable during
// tests so that custom conf dirs can be used by unit tests.
val confDirs = Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR") ++
(if (Utils.isTesting) Seq("SPARK_TEST_HADOOP_CONF_DIR") else Nil)
confDirs.foreach { envKey =>
sys.env.get(envKey).foreach { path =>
val dir = new File(path)
if (dir.isDirectory()) {
val files = dir.listFiles()
if (files == null) {
logWarning("Failed to list files under directory " + dir)
} else {
files.foreach { file =>
if (file.isFile && !hadoopConfFiles.contains(file.getName())) {
hadoopConfFiles(file.getName()) = file
}
}
}
}
}
}
val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val confStream = new ZipOutputStream(new FileOutputStream(confArchive))
logDebug(s"Creating an archive with the config files for distribution at $confArchive.")
try {
confStream.setLevel(0)
// Upload $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that
// the executors will use the latest configurations instead of the default values. This is
// required when user changes log4j.properties directly to set the log configurations. If
// configuration file is provided through --files then executors will be taking configurations
// from --files instead of $SPARK_CONF_DIR/log4j.properties.
// Also upload metrics.properties to distributed cache if exists in classpath.
// If user specify this file using --files then executors will use the one
// from --files instead.
for { prop <- Seq("log4j.properties", "metrics.properties")
url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop))
if url.getProtocol == "file" } {
val file = new File(url.getPath())
confStream.putNextEntry(new ZipEntry(file.getName()))
Files.copy(file, confStream)
confStream.closeEntry()
}
// Save the Hadoop config files under a separate directory in the archive. This directory
// is appended to the classpath so that the cluster-provided configuration takes precedence.
confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/"))
confStream.closeEntry()
hadoopConfFiles.foreach { case (name, file) =>
if (file.canRead()) {
confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/$name"))
Files.copy(file, confStream)
confStream.closeEntry()
}
}
// Save the YARN configuration into a separate file that will be overlayed on top of the
// cluster's Hadoop conf.
confStream.putNextEntry(new ZipEntry(SparkHadoopUtil.SPARK_HADOOP_CONF_FILE))
hadoopConf.writeXml(confStream)
confStream.closeEntry()
// Save Spark configuration to a file in the archive.
val props = confToProperties(sparkConf)
// If propagating the keytab to the AM, override the keytab name with the name of the
// distributed file.
amKeytabFileName.foreach { kt => props.setProperty(KEYTAB.key, kt) }
writePropertiesToArchive(props, SPARK_CONF_FILE, confStream)
// Write the distributed cache config to the archive.
writePropertiesToArchive(confToProperties(cachedResourcesConf), DIST_CACHE_CONF_FILE,
confStream)
} finally {
confStream.close()
}
confArchive
}
/**
* Set up the environment for launching our ApplicationMaster container.
*/
private def setupLaunchEnv(
stagingDirPath: Path,
pySparkArchives: Seq[String]): HashMap[String, String] = {
logInfo("Setting up the launch environment for our AM container")
val env = new HashMap[String, String]()
populateClasspath(args, hadoopConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH))
env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString
env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName()
// Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.*
val amEnvPrefix = "spark.yarn.appMasterEnv."
sparkConf.getAll
.filter { case (k, v) => k.startsWith(amEnvPrefix) }
.map { case (k, v) => (k.substring(amEnvPrefix.length), v) }
.foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) }
// If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH
// of the container processes too. Add all non-.py files directly to PYTHONPATH.
//
// NOTE: the code currently does not handle .py files defined with a "local:" scheme.
val pythonPath = new ListBuffer[String]()
val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py"))
if (pyFiles.nonEmpty) {
pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR)
}
(pySparkArchives ++ pyArchives).foreach { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme != Utils.LOCAL_SCHEME) {
pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName())
} else {
pythonPath += uri.getPath()
}
}
// Finally, update the Spark config to propagate PYTHONPATH to the AM and executors.
if (pythonPath.nonEmpty) {
val pythonPathList = (sys.env.get("PYTHONPATH") ++ pythonPath)
env("PYTHONPATH") = (env.get("PYTHONPATH") ++ pythonPathList)
.mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
val pythonPathExecutorEnv = (sparkConf.getExecutorEnv.toMap.get("PYTHONPATH") ++
pythonPathList).mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
sparkConf.setExecutorEnv("PYTHONPATH", pythonPathExecutorEnv)
}
if (isClusterMode) {
// propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode
Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname =>
if (!env.contains(envname)) {
sys.env.get(envname).foreach(env(envname) = _)
}
}
sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _))
}
sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp =>
env(ENV_DIST_CLASSPATH) = dcp
}
env
}
/**
* Set up a ContainerLaunchContext to launch our ApplicationMaster container.
* This sets up the launch environment, java options, and the command for launching the AM.
*/
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
: ContainerLaunchContext = {
logInfo("Setting up container launch context for our AM")
val appId = newAppResponse.getApplicationId
val pySparkArchives =
if (sparkConf.get(IS_PYTHON_APP)) {
findPySparkArchives()
} else {
Nil
}
val launchEnv = setupLaunchEnv(stagingDirPath, pySparkArchives)
val localResources = prepareLocalResources(stagingDirPath, pySparkArchives)
val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
amContainer.setLocalResources(localResources.asJava)
amContainer.setEnvironment(launchEnv.asJava)
val javaOpts = ListBuffer[String]()
// Set the environment variable through a command prefix
// to append to the existing value of the variable
var prefixEnv: Option[String] = None
// Add Xmx for AM memory
javaOpts += "-Xmx" + amMemory + "m"
val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR)
javaOpts += "-Djava.io.tmpdir=" + tmpDir
// TODO: Remove once cpuset version is pushed out.
// The context is, default gc for server class machines ends up using all cores to do gc -
// hence if there are multiple containers in same node, Spark GC affects all other containers'
// performance (which can be that of other Spark containers)
// Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
// multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
// of cores on a node.
val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
if (useConcurrentAndIncrementalGC) {
// In our expts, using (default) throughput collector has severe perf ramifications in
// multi-tenant machines
javaOpts += "-XX:+UseConcMarkSweepGC"
javaOpts += "-XX:MaxTenuringThreshold=31"
javaOpts += "-XX:SurvivorRatio=8"
javaOpts += "-XX:+CMSIncrementalMode"
javaOpts += "-XX:+CMSIncrementalPacing"
javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
javaOpts += "-XX:CMSIncrementalDutyCycle=10"
}
// Include driver-specific java options if we are launching a driver
if (isClusterMode) {
sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts =>
javaOpts ++= Utils.splitCommandString(opts)
.map(Utils.substituteAppId(_, appId.toString))
.map(YarnSparkHadoopUtil.escapeForShell)
}
val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH),
sys.props.get("spark.driver.libraryPath")).flatten
if (libraryPaths.nonEmpty) {
prefixEnv = Some(createLibraryPathPrefix(libraryPaths.mkString(File.pathSeparator),
sparkConf))
}
if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) {
logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode")
}
} else {
// Validate and include yarn am specific java options in yarn-client mode.
sparkConf.get(AM_JAVA_OPTIONS).foreach { opts =>
if (opts.contains("-Dspark")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')."
throw new SparkException(msg)
}
if (opts.contains("-Xmx")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " +
s"(was '$opts'). Use spark.yarn.am.memory instead."
throw new SparkException(msg)
}
javaOpts ++= Utils.splitCommandString(opts)
.map(Utils.substituteAppId(_, appId.toString))
.map(YarnSparkHadoopUtil.escapeForShell)
}
sparkConf.get(AM_LIBRARY_PATH).foreach { paths =>
prefixEnv = Some(createLibraryPathPrefix(paths, sparkConf))
}
}
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
val userClass =
if (isClusterMode) {
Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
} else {
Nil
}
val userJar =
if (args.userJar != null) {
Seq("--jar", args.userJar)
} else {
Nil
}
val primaryPyFile =
if (isClusterMode && args.primaryPyFile != null) {
Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
} else {
Nil
}
val primaryRFile =
if (args.primaryRFile != null) {
Seq("--primary-r-file", args.primaryRFile)
} else {
Nil
}
val amClass =
if (isClusterMode) {
Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
} else {
Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
}
if (args.primaryRFile != null &&
(args.primaryRFile.endsWith(".R") || args.primaryRFile.endsWith(".r"))) {
args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
}
val userArgs = args.userArgs.flatMap { arg =>
Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
}
val amArgs =
Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++
Seq("--properties-file",
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE)) ++
Seq("--dist-cache-conf",
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, DIST_CACHE_CONF_FILE))
// Command for the ApplicationMaster
val commands = prefixEnv ++
Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
javaOpts ++ amArgs ++
Seq(
"1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
"2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
// TODO: it would be nicer to just make sure there are no null commands here
val printableCommands = commands.map(s => if (s == null) "null" else s).toList
amContainer.setCommands(printableCommands.asJava)
logDebug("===============================================================================")
logDebug("YARN AM launch context:")
logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
logDebug(" env:")
if (log.isDebugEnabled) {
Utils.redact(sparkConf, launchEnv.toSeq).foreach { case (k, v) =>
logDebug(s" $k -> $v")
}
}
logDebug(" resources:")
localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
logDebug(" command:")
logDebug(s" ${printableCommands.mkString(" ")}")
logDebug("===============================================================================")
// send the acl settings into YARN to control who has access via YARN interfaces
val securityManager = new SecurityManager(sparkConf)
amContainer.setApplicationACLs(
YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava)
setupSecurityToken(amContainer)
amContainer
}
/**
* Report the state of an application until it has exited, either successfully or
* due to some failure, then return a pair of the yarn application state (FINISHED, FAILED,
* KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED,
* or KILLED).
*
* @param appId ID of the application to monitor.
* @param returnOnRunning Whether to also return the application state when it is RUNNING.
* @param logApplicationReport Whether to log details of the application report every iteration.
* @param interval How often to poll the YARN RM for application status (in ms).
* @return A pair of the yarn application state and the final application state.
*/
def monitorApplication(
appId: ApplicationId,
returnOnRunning: Boolean = false,
logApplicationReport: Boolean = true,
interval: Long = sparkConf.get(REPORT_INTERVAL)): YarnAppReport = {
var lastState: YarnApplicationState = null
while (true) {
Thread.sleep(interval)
val report: ApplicationReport =
try {
getApplicationReport(appId)
} catch {
case e: ApplicationNotFoundException =>
logError(s"Application $appId not found.")
cleanupStagingDir()
return YarnAppReport(YarnApplicationState.KILLED, FinalApplicationStatus.KILLED, None)
case NonFatal(e) if !e.isInstanceOf[InterruptedIOException] =>
val msg = s"Failed to contact YARN for application $appId."
logError(msg, e)
// Don't necessarily clean up staging dir because status is unknown
return YarnAppReport(YarnApplicationState.FAILED, FinalApplicationStatus.FAILED,
Some(msg))
}
val state = report.getYarnApplicationState
if (logApplicationReport) {
logInfo(s"Application report for $appId (state: $state)")
// If DEBUG is enabled, log report details every iteration
// Otherwise, log them every time the application changes state
if (log.isDebugEnabled) {
logDebug(formatReportDetails(report, getDriverLogsLink(report)))
} else if (lastState != state) {
logInfo(formatReportDetails(report, getDriverLogsLink(report)))
}
}
if (lastState != state) {
state match {
case YarnApplicationState.RUNNING =>
reportLauncherState(SparkAppHandle.State.RUNNING)
case YarnApplicationState.FINISHED =>
report.getFinalApplicationStatus match {
case FinalApplicationStatus.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case FinalApplicationStatus.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
reportLauncherState(SparkAppHandle.State.FINISHED)
}
case YarnApplicationState.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case YarnApplicationState.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
}
}
if (state == YarnApplicationState.FINISHED ||
state == YarnApplicationState.FAILED ||
state == YarnApplicationState.KILLED) {
cleanupStagingDir()
return createAppReport(report)
}
if (returnOnRunning && state == YarnApplicationState.RUNNING) {
return createAppReport(report)
}
if (state == YarnApplicationState.ACCEPTED && isClientUnmanagedAMEnabled &&
appMaster == null && report.getAMRMToken != null) {
appMaster = startApplicationMasterService(report)
}
lastState = state
}
// Never reached, but keeps compiler happy
throw new SparkException("While loop is depleted! This should never happen...")
}
private def startApplicationMasterService(report: ApplicationReport): ApplicationMaster = {
// Add AMRMToken to establish connection between RM and AM
val token = report.getAMRMToken
val amRMToken: org.apache.hadoop.security.token.Token[AMRMTokenIdentifier] =
new org.apache.hadoop.security.token.Token[AMRMTokenIdentifier](
token.getIdentifier().array(), token.getPassword().array,
new Text(token.getKind()), new Text(token.getService()))
val currentUGI = UserGroupInformation.getCurrentUser
currentUGI.addToken(amRMToken)
// Start Application Service in a separate thread and continue with application monitoring
val appMaster = new ApplicationMaster(
new ApplicationMasterArguments(Array.empty), sparkConf, hadoopConf)
val amService = new Thread("Unmanaged Application Master Service") {
override def run(): Unit = {
appMaster.runUnmanaged(rpcEnv, report.getCurrentApplicationAttemptId,
stagingDirPath, cachedResourcesConf)
}
}
amService.setDaemon(true)
amService.start()
appMaster
}
/**
* Format an application report and optionally, links to driver logs, in a human-friendly manner.
*
* @param report The application report from YARN.
* @param driverLogsLinks A map of driver log files and their links. Keys are the file names
* (e.g. `stdout`), and values are the links. If empty, nothing will be
* printed.
* @return Human-readable version of the input data.
*/
private def formatReportDetails(report: ApplicationReport,
driverLogsLinks: IMap[String, String]): String = {
val details = Seq[(String, String)](
("client token", getClientToken(report)),
("diagnostics", report.getDiagnostics),
("ApplicationMaster host", report.getHost),
("ApplicationMaster RPC port", report.getRpcPort.toString),
("queue", report.getQueue),
("start time", report.getStartTime.toString),
("final status", report.getFinalApplicationStatus.toString),
("tracking URL", report.getTrackingUrl),
("user", report.getUser)
) ++ driverLogsLinks.map { case (fname, link) => (s"Driver Logs ($fname)", link) }
// Use more loggable format if value is null or empty
details.map { case (k, v) =>
val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A")
s"\\n\\t $k: $newValue"
}.mkString("")
}
/**
* Fetch links to the logs of the driver for the given application report. This requires
* query the ResourceManager via RPC. Returns an empty map if the links could not be fetched.
* If this feature is disabled via [[CLIENT_INCLUDE_DRIVER_LOGS_LINK]], or if the application
* report indicates that the driver container isn't currently running, an empty map is
* returned immediately.
*/
private def getDriverLogsLink(appReport: ApplicationReport): IMap[String, String] = {
if (!sparkConf.get(CLIENT_INCLUDE_DRIVER_LOGS_LINK)
|| appReport.getYarnApplicationState != YarnApplicationState.RUNNING) {
return IMap.empty
}
try {
Option(appReport.getCurrentApplicationAttemptId)
.flatMap(attemptId => Option(yarnClient.getApplicationAttemptReport(attemptId)))
.flatMap(attemptReport => Option(attemptReport.getAMContainerId))
.flatMap(amContainerId => Option(yarnClient.getContainerReport(amContainerId)))
.flatMap(containerReport => Option(containerReport.getLogUrl))
.map(YarnContainerInfoHelper.getLogUrlsFromBaseUrl)
.getOrElse(IMap.empty)
} catch {
case e: Exception =>
logWarning(s"Unable to get driver log links for $appId: $e")
// Include the full stack trace only at DEBUG level to reduce verbosity
logDebug(s"Unable to get driver log links for $appId", e)
IMap.empty
}
}
/**
* Submit an application to the ResourceManager.
* If set spark.yarn.submit.waitAppCompletion to true, it will stay alive
* reporting the application's status until the application has exited for any reason.
* Otherwise, the client process will exit after submission.
* If the application finishes with a failed, killed, or undefined status,
* throw an appropriate SparkException.
*/
def run(): Unit = {
this.appId = submitApplication()
if (!launcherBackend.isConnected() && fireAndForget) {
val report = getApplicationReport(appId)
val state = report.getYarnApplicationState
logInfo(s"Application report for $appId (state: $state)")
logInfo(formatReportDetails(report, getDriverLogsLink(report)))
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
throw new SparkException(s"Application $appId finished with status: $state")
}
} else {
val YarnAppReport(appState, finalState, diags) = monitorApplication(appId)
if (appState == YarnApplicationState.FAILED || finalState == FinalApplicationStatus.FAILED) {
diags.foreach { err =>
logError(s"Application diagnostics message: $err")
}
throw new SparkException(s"Application $appId finished with failed status")
}
if (appState == YarnApplicationState.KILLED || finalState == FinalApplicationStatus.KILLED) {
throw new SparkException(s"Application $appId is killed")
}
if (finalState == FinalApplicationStatus.UNDEFINED) {
throw new SparkException(s"The final status of application $appId is undefined")
}
}
}
private def findPySparkArchives(): Seq[String] = {
sys.env.get("PYSPARK_ARCHIVES_PATH")
.map(_.split(",").toSeq)
.getOrElse {
val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator)
val pyArchivesFile = new File(pyLibPath, "pyspark.zip")
require(pyArchivesFile.exists(),
s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.")
val py4jFile = new File(pyLibPath, PythonUtils.PY4J_ZIP_NAME)
require(py4jFile.exists(),
s"$py4jFile not found; cannot run pyspark application in YARN mode.")
Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath())
}
}
}
private object Client extends Logging {
// Alias for the user jar
val APP_JAR_NAME: String = "__app__.jar"
// Staging directory for any temporary jars or files
val SPARK_STAGING: String = ".sparkStaging"
// Staging directory is private! -> rwx--------
val STAGING_DIR_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("700", 8).toShort)
// App files are world-wide readable and owner writable -> rw-r--r--
val APP_FILE_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)
// Distribution-defined classpath to add to processes
val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH"
// Subdirectory where the user's Spark and Hadoop config files will be placed.
val LOCALIZED_CONF_DIR = "__spark_conf__"
// Subdirectory in the conf directory containing Hadoop config files.
val LOCALIZED_HADOOP_CONF_DIR = "__hadoop_conf__"
// File containing the conf archive in the AM. See prepareLocalResources().
val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip"
// Name of the file in the conf archive containing Spark configuration.
val SPARK_CONF_FILE = "__spark_conf__.properties"
// Name of the file in the conf archive containing the distributed cache info.
val DIST_CACHE_CONF_FILE = "__spark_dist_cache__.properties"
// Subdirectory where the user's python files (not archives) will be placed.
val LOCALIZED_PYTHON_DIR = "__pyfiles__"
// Subdirectory where Spark libraries will be placed.
val LOCALIZED_LIB_DIR = "__spark_libs__"
/**
* Return the path to the given application's staging directory.
*/
private def getAppStagingDir(appId: ApplicationId): String = {
buildPath(SPARK_STAGING, appId.toString())
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String])
: Unit = {
val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf)
classPathElementsToAdd.foreach { c =>
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim)
}
}
private def getYarnAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match {
case Some(s) => s.toSeq
case None => getDefaultYarnApplicationClasspath
}
private def getMRAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings("mapreduce.application.classpath")) match {
case Some(s) => s.toSeq
case None => getDefaultMRApplicationClasspath
}
private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] =
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq
private[yarn] def getDefaultMRApplicationClasspath: Seq[String] =
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq
/**
* Populate the classpath entry in the given environment map.
*
* User jars are generally not added to the JVM's system classpath; those are handled by the AM
* and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars
* are included in the system classpath, though. The extra class path and other uploaded files are
* always made available through the system class path.
*
* @param args Client arguments (when starting the AM) or null (when starting executors).
*/
private[yarn] def populateClasspath(
args: ClientArguments,
conf: Configuration,
sparkConf: SparkConf,
env: HashMap[String, String],
extraClassPath: Option[String] = None): Unit = {
extraClassPath.foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
addClasspathEntry(Environment.PWD.$$(), env)
addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env)
if (sparkConf.get(USER_CLASS_PATH_FIRST)) {
// in order to properly add the app jar when user classpath is first
// we have to do the mainJar separate in order to send the right thing
// into addFileToClasspath
val mainJar =
if (args != null) {
getMainJarUri(Option(args.userJar))
} else {
getMainJarUri(sparkConf.get(APP_JAR))
}
mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env))
val secondaryJars =
if (args != null) {
getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE)))
} else {
getSecondaryJarUris(sparkConf.get(SECONDARY_JARS))
}
secondaryJars.foreach { x =>
addFileToClasspath(sparkConf, conf, x, null, env)
}
}
// Add the Spark jars to the classpath, depending on how they were distributed.
addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env)
if (sparkConf.get(SPARK_ARCHIVE).isEmpty) {
sparkConf.get(SPARK_JARS).foreach { jars =>
jars.filter(Utils.isLocalUri).foreach { jar =>
val uri = new URI(jar)
addClasspathEntry(getClusterPath(sparkConf, uri.getPath()), env)
}
}
}
if (sparkConf.get(POPULATE_HADOOP_CLASSPATH)) {
populateHadoopClasspath(conf, env)
}
sys.env.get(ENV_DIST_CLASSPATH).foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
// Add the localized Hadoop config at the end of the classpath, in case it contains other
// files (such as configuration files for different services) that are not part of the
// YARN cluster's config.
addClasspathEntry(
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, LOCALIZED_HADOOP_CONF_DIR), env)
}
/**
* Returns a list of URIs representing the user classpath.
*
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Array[URI] = {
val mainUri = getMainJarUri(conf.get(APP_JAR))
val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS))
(mainUri ++ secondaryUris).toArray
}
private def getMainJarUri(mainJar: Option[String]): Option[URI] = {
mainJar.flatMap { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme == Utils.LOCAL_SCHEME) Some(uri) else None
}.orElse(Some(new URI(APP_JAR_NAME)))
}
private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = {
secondaryJars.getOrElse(Nil).map(new URI(_))
}
/**
* Adds the given path to the classpath, handling "local:" URIs correctly.
*
* If an alternate name for the file is given, and it's not a "local:" file, the alternate
* name will be added to the classpath (relative to the job's work directory).
*
* If not a "local:" file and no alternate name, the linkName will be added to the classpath.
*
* @param conf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param uri URI to add to classpath (optional).
* @param fileName Alternate name for the file (optional).
* @param env Map holding the environment variables.
*/
private def addFileToClasspath(
conf: SparkConf,
hadoopConf: Configuration,
uri: URI,
fileName: String,
env: HashMap[String, String]): Unit = {
if (uri != null && uri.getScheme == Utils.LOCAL_SCHEME) {
addClasspathEntry(getClusterPath(conf, uri.getPath), env)
} else if (fileName != null) {
addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env)
} else if (uri != null) {
val localPath = getQualifiedLocalPath(uri, hadoopConf)
val linkName = Option(uri.getFragment()).getOrElse(localPath.getName())
addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env)
}
}
/**
* Add the given path to the classpath entry of the given environment map.
* If the classpath is already set, this appends the new path to the existing classpath.
*/
private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit =
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path)
/**
* Returns the path to be sent to the NM for a path that is valid on the gateway.
*
* This method uses two configuration values:
*
* - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may
* only be valid in the gateway node.
* - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may
* contain, for example, env variable references, which will be expanded by the NMs when
* starting containers.
*
* If either config is not available, the input path is returned.
*/
def getClusterPath(conf: SparkConf, path: String): String = {
val localPath = conf.get(GATEWAY_ROOT_PATH)
val clusterPath = conf.get(REPLACEMENT_ROOT_PATH)
if (localPath != null && clusterPath != null) {
path.replace(localPath, clusterPath)
} else {
path
}
}
/**
* Return whether two URI represent file system are the same
*/
private[spark] def compareUri(srcUri: URI, dstUri: URI): Boolean = {
if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
return false
}
val srcAuthority = srcUri.getAuthority()
val dstAuthority = dstUri.getAuthority()
if (srcAuthority != null && !srcAuthority.equalsIgnoreCase(dstAuthority)) {
return false
}
var srcHost = srcUri.getHost()
var dstHost = dstUri.getHost()
// In HA or when using viewfs, the host part of the URI may not actually be a host, but the
// name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
// match.
if (srcHost != null && dstHost != null && srcHost != dstHost) {
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
} catch {
case e: UnknownHostException =>
return false
}
}
Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
}
/**
* Return whether the two file systems are the same.
*/
protected def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
val srcUri = srcFs.getUri()
val dstUri = destFs.getUri()
compareUri(srcUri, dstUri)
}
/**
* Given a local URI, resolve it and return a qualified local path that corresponds to the URI.
* This is used for preparing local resources to be included in the container launch context.
*/
private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = {
val qualifiedURI =
if (localURI.getScheme == null) {
// If not specified, assume this is in the local filesystem to keep the behavior
// consistent with that of Hadoop
new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString)
} else {
localURI
}
new Path(qualifiedURI)
}
/**
* Whether to consider jars provided by the user to have precedence over the Spark jars when
* loading user classes.
*/
def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = {
if (isDriver) {
conf.get(DRIVER_USER_CLASS_PATH_FIRST)
} else {
conf.get(EXECUTOR_USER_CLASS_PATH_FIRST)
}
}
/**
* Joins all the path components using Path.SEPARATOR.
*/
def buildPath(components: String*): String = {
components.mkString(Path.SEPARATOR)
}
def createAppReport(report: ApplicationReport): YarnAppReport = {
val diags = report.getDiagnostics()
val diagsOpt = if (diags != null && diags.nonEmpty) Some(diags) else None
YarnAppReport(report.getYarnApplicationState(), report.getFinalApplicationStatus(), diagsOpt)
}
/**
* Create a properly quoted and escaped library path string to be added as a prefix to the command
* executed by YARN. This is different from normal quoting / escaping due to YARN executing the
* command through "bash -c".
*/
def createLibraryPathPrefix(libpath: String, conf: SparkConf): String = {
val cmdPrefix = if (Utils.isWindows) {
Utils.libraryPathEnvPrefix(Seq(libpath))
} else {
val envName = Utils.libraryPathEnvName
// For quotes, escape both the quote and the escape character when encoding in the command
// string.
val quoted = libpath.replace("\\"", "\\\\\\\\\\\\\\"")
envName + "=\\\\\\"" + quoted + File.pathSeparator + "$" + envName + "\\\\\\""
}
getClusterPath(conf, cmdPrefix)
}
def confToProperties(conf: SparkConf): Properties = {
val props = new Properties()
conf.getAll.foreach { case (k, v) =>
props.setProperty(k, v)
}
props
}
def writePropertiesToArchive(props: Properties, name: String, out: ZipOutputStream): Unit = {
out.putNextEntry(new ZipEntry(name))
val writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)
props.store(writer, "Spark configuration.")
writer.flush()
out.closeEntry()
}
}
private[spark] class YarnClusterApplication extends SparkApplication {
override def start(args: Array[String], conf: SparkConf): Unit = {
// SparkSubmit would use yarn cache to distribute files & jars in yarn mode,
// so remove them from sparkConf here for yarn mode.
conf.remove(JARS)
conf.remove(FILES)
conf.remove(ARCHIVES)
new Client(new ClientArguments(args), conf, null).run()
}
}
private[spark] case class YarnAppReport(
appState: YarnApplicationState,
finalState: FinalApplicationStatus,
diagnostics: Option[String])
| witgo/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala | Scala | apache-2.0 | 68,386 |
package models.services
import javax.inject.Inject
import java.util.UUID
import models._
import models.daos._
import shared.Util.reverseEffect
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Handles majors (student side major)
* Collection of functions to get majors
*/
class MajorServiceImpl @Inject() (
majorDAO: MajorDAO,
majorSubjectDAO: MajorSubjectDAO,
userMajorDAO: UserMajorDAO,
departmentDAO: DepartmentDAO
) extends MajorService {
/**
* Returns a matching Major with a matching name
*
* @param name The name of the Major, in Korean
* @param majorType The type of the Major
* @return The found major.
*/
def find(name: String, majorType: MajorType): Future[Option[Major]] =
majorDAO.findByName(name).map { majorSeq => majorSeq.filter(_.majorType == majorType).headOption }
def find(name: String, majorType: MajorType, year: Short): Future[Option[Major]] =
majorDAO.findByName(name).map { majorSeq => majorSeq.filter(m => m.majorType == majorType && m.year == year).headOption }
/**
* Returns a sequence of majors of a given user
* @param subject The user to search
* @return The sequence majors the user has
*/
def findByUser(userID: UUID): Future[Seq[Major]] =
for {
ums <- userMajorDAO.allMajor(userID)
ms <- reverseEffect(ums.map(um => majorDAO.findById(um.majorID)))
} yield {
ms.map {
case Some(m) => m
case None => throw new NoSuchElementException("User has an invalid major")
}
}
/**
* Returns a sequence of majors and the given subject's requiredness
*
* @param subject The subject to search
* @return The sequence of pairs of matching majors and its requiredness
*/
def findBySubject(subject: Subject): Future[Seq[(Major, Boolean)]] =
majorSubjectDAO.findBySubject(subject.code, subject.department)
/**
* Returns a sequence of majors and the subject's requiredness of a given course
*
* @param course The course to search
* @return The sequence of pairs of matching majors and its requiredness
*/
def findByCourse(course: Course): Future[Seq[(Major, Boolean)]] =
for {
dept <- departmentDAO.findById(course.departmentId)
ms <- majorSubjectDAO.findBySubject(course.codePrefix, dept)
} yield {
ms
}
def allOfType(): Future[Map[MajorType, Seq[(String, String)]]] = {
majorDAO.all()
.map {
_.groupBy {
case Major(_, _, _, majorType, _) => majorType
}.map {
case (majorType, majorseq) =>
majorType ->
majorseq.map {
case Major(_, nameKo, nameEn, _, _) => nameKo -> nameEn.getOrElse("")
}
}
}
}
}
| yoo-haemin/hufs-planner | project/app/models/services/MajorServiceImpl.scala | Scala | agpl-3.0 | 2,776 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.io._
import java.net.URL
import java.nio.file.{Files, Paths}
import java.sql.{Date, Timestamp}
import java.util.{Locale, TimeZone, UUID}
import scala.collection.JavaConverters._
import org.apache.avro.Schema
import org.apache.avro.Schema.{Field, Type}
import org.apache.avro.Schema.Type._
import org.apache.avro.file.{DataFileReader, DataFileWriter}
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericDatumWriter, GenericRecord}
import org.apache.avro.generic.GenericData.{EnumSymbol, Fixed}
import org.apache.commons.io.FileUtils
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.TestingUDT.{IntervalData, NullData, NullUDT}
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class AvroSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
import testImplicits._
val episodesAvro = testFile("episodes.avro")
val testAvro = testFile("test.avro")
override protected def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set("spark.sql.files.maxPartitionBytes", 1024)
}
def checkReloadMatchesSaved(originalFile: String, newFile: String): Unit = {
val originalEntries = spark.read.format("avro").load(testAvro).collect()
val newEntries = spark.read.format("avro").load(newFile)
checkAnswer(newEntries, originalEntries)
}
def checkAvroSchemaEquals(avroSchema: String, expectedAvroSchema: String): Unit = {
assert(new Schema.Parser().parse(avroSchema) ==
new Schema.Parser().parse(expectedAvroSchema))
}
def getAvroSchemaStringFromFiles(filePath: String): String = {
new DataFileReader({
val file = new File(filePath)
if (file.isFile) {
file
} else {
file.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
.head
}
}, new GenericDatumReader[Any]()).getSchema.toString(false)
}
test("resolve avro data source") {
val databricksAvro = "com.databricks.spark.avro"
// By default the backward compatibility for com.databricks.spark.avro is enabled.
Seq("avro", "org.apache.spark.sql.avro.AvroFileFormat", databricksAvro).foreach { provider =>
assert(DataSource.lookupDataSource(provider, spark.sessionState.conf) ===
classOf[org.apache.spark.sql.avro.AvroFileFormat])
}
withSQLConf(SQLConf.LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED.key -> "false") {
val message = intercept[AnalysisException] {
DataSource.lookupDataSource(databricksAvro, spark.sessionState.conf)
}.getMessage
assert(message.contains(s"Failed to find data source: $databricksAvro"))
}
}
test("reading from multiple paths") {
val df = spark.read.format("avro").load(episodesAvro, episodesAvro)
assert(df.count == 16)
}
test("reading and writing partitioned data") {
val df = spark.read.format("avro").load(episodesAvro)
val fields = List("title", "air_date", "doctor")
for (field <- fields) {
withTempPath { dir =>
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.partitionBy(field).format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
// makes sure that no fields got dropped.
// We convert Rows to Seqs in order to work around SPARK-10325
assert(input.select(field).collect().map(_.toSeq).toSet ===
df.select(field).collect().map(_.toSeq).toSet)
}
}
}
test("request no fields") {
val df = spark.read.format("avro").load(episodesAvro)
df.createOrReplaceTempView("avro_table")
assert(spark.sql("select count(*) from avro_table").collect().head === Row(8))
}
test("convert formats") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.write.parquet(dir.getCanonicalPath)
assert(spark.read.parquet(dir.getCanonicalPath).count() === df.count)
}
}
test("rearrange internal schema") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.select("doctor", "title").write.format("avro").save(dir.getCanonicalPath)
}
}
test("union(int, long) is read as long") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.INT), Schema.create(Type.LONG)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toLong)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", LongType, nullable = true)))
assert(df.collect().toSet == Set(Row(1L), Row(2L)))
}
}
test("union(float, double) is read as double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.FLOAT), Schema.create(Type.DOUBLE)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2.toDouble)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(2.toDouble)))
}
}
test("union(float, double, null) is read as nullable double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union = Schema.createUnion(
List(Schema.create(Type.FLOAT),
Schema.create(Type.DOUBLE),
Schema.create(Type.NULL)
).asJava
)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", null)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(null)))
}
}
test("Union of a single type") {
withTempPath { dir =>
val UnionOfOne = Schema.createUnion(List(Schema.create(Type.INT)).asJava)
val fields = Seq(new Field("field1", UnionOfOne, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 8)
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.first() == Row(8))
}
}
test("Complex Union Type") {
withTempPath { dir =>
val fixedSchema = Schema.createFixed("fixed_name", "doc", "namespace", 4)
val enumSchema = Schema.createEnum("enum_name", "doc", "namespace", List("e1", "e2").asJava)
val complexUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.STRING), fixedSchema, enumSchema).asJava)
val fields = Seq(
new Field("field1", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field2", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field3", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field4", complexUnionType, "doc", null.asInstanceOf[AnyVal])
).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
val field1 = 1234
val field2 = "Hope that was not load bearing"
val field3 = Array[Byte](1, 2, 3, 4)
val field4 = "e2"
avroRec.put("field1", field1)
avroRec.put("field2", field2)
avroRec.put("field3", new Fixed(fixedSchema, field3))
avroRec.put("field4", new EnumSymbol(enumSchema, field4))
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.sqlContext.read.format("avro").load(s"$dir.avro")
assertResult(field1)(df.selectExpr("field1.member0").first().get(0))
assertResult(field2)(df.selectExpr("field2.member1").first().get(0))
assertResult(field3)(df.selectExpr("field3.member2").first().get(0))
assertResult(field4)(df.selectExpr("field4.member3").first().get(0))
}
}
test("Lots of nulls") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("binary", BinaryType, true),
StructField("timestamp", TimestampType, true),
StructField("array", ArrayType(ShortType), true),
StructField("map", MapType(StringType, StringType), true),
StructField("struct", StructType(Seq(StructField("int", IntegerType, true))))))
val rdd = spark.sparkContext.parallelize(Seq[Row](
Row(null, new Timestamp(1), Array[Short](1, 2, 3), null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null)))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("Struct field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("short", ShortType, true),
StructField("byte", ByteType, true),
StructField("boolean", BooleanType, true)
))
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, 1.toShort, 1.toByte, true),
Row(2f, 2.toShort, 2.toByte, true),
Row(3f, 3.toShort, 3.toByte, true)
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
private def createDummyCorruptFile(dir: File): Unit = {
Utils.tryWithResource {
FileUtils.forceMkdir(dir)
val corruptFile = new File(dir, "corrupt.avro")
new BufferedWriter(new FileWriter(corruptFile))
} { writer =>
writer.write("corrupt")
}
}
test("Ignore corrupt Avro file if flag IGNORE_CORRUPT_FILES enabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[FileNotFoundException] {
spark.read.format("avro").load(dir.getAbsolutePath).schema
}.getMessage
assert(message.contains("No Avro files found."))
val srcFile = new File("src/test/resources/episodes.avro")
val destFile = new File(dir, "episodes.avro")
FileUtils.copyFile(srcFile, destFile)
val result = spark.read.format("avro").load(srcFile.getAbsolutePath).collect()
checkAnswer(spark.read.format("avro").load(dir.getAbsolutePath), result)
}
}
}
test("Throws IOException on reading corrupt Avro file if flag IGNORE_CORRUPT_FILES disabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[org.apache.spark.SparkException] {
spark.read.format("avro").load(dir.getAbsolutePath)
}.getMessage
assert(message.contains("Could not read file"))
}
}
}
test("Date field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("date", DateType, true)
))
TimeZone.setDefault(TimeZone.getTimeZone("UTC"))
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, null),
Row(2f, new Date(1451948400000L)),
Row(3f, new Date(1460066400500L))
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
checkAnswer(
spark.read.format("avro").load(dir.toString).select("date"),
Seq(Row(null), Row(new Date(1451865600000L)), Row(new Date(1459987200000L))))
}
}
test("Array data types") {
withTempPath { dir =>
val testSchema = StructType(Seq(
StructField("byte_array", ArrayType(ByteType), true),
StructField("short_array", ArrayType(ShortType), true),
StructField("float_array", ArrayType(FloatType), true),
StructField("bool_array", ArrayType(BooleanType), true),
StructField("long_array", ArrayType(LongType), true),
StructField("double_array", ArrayType(DoubleType), true),
StructField("decimal_array", ArrayType(DecimalType(10, 0)), true),
StructField("bin_array", ArrayType(BinaryType), true),
StructField("timestamp_array", ArrayType(TimestampType), true),
StructField("array_array", ArrayType(ArrayType(StringType), true), true),
StructField("struct_array", ArrayType(
StructType(Seq(StructField("name", StringType, true)))))))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val rdd = spark.sparkContext.parallelize(Seq(
Row(arrayOfByte, Array[Short](1, 2, 3, 4), Array[Float](1f, 2f, 3f, 4f),
Array[Boolean](true, false, true, false), Array[Long](1L, 2L), Array[Double](1.0, 2.0),
Array[BigDecimal](BigDecimal.valueOf(3)), Array[Array[Byte]](arrayOfByte, arrayOfByte),
Array[Timestamp](new Timestamp(0)),
Array[Array[String]](Array[String]("CSH, tearing down the walls that divide us", "-jd")),
Array[Row](Row("Bobby G. can't swim")))))
val df = spark.createDataFrame(rdd, testSchema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("write with compression - sql configs") {
withTempPath { dir =>
val uncompressDir = s"$dir/uncompress"
val bzip2Dir = s"$dir/bzip2"
val xzDir = s"$dir/xz"
val deflateDir = s"$dir/deflate"
val snappyDir = s"$dir/snappy"
val df = spark.read.format("avro").load(testAvro)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "uncompressed")
df.write.format("avro").save(uncompressDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "bzip2")
df.write.format("avro").save(bzip2Dir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "xz")
df.write.format("avro").save(xzDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "deflate")
spark.conf.set(SQLConf.AVRO_DEFLATE_LEVEL.key, "9")
df.write.format("avro").save(deflateDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "snappy")
df.write.format("avro").save(snappyDir)
val uncompressSize = FileUtils.sizeOfDirectory(new File(uncompressDir))
val bzip2Size = FileUtils.sizeOfDirectory(new File(bzip2Dir))
val xzSize = FileUtils.sizeOfDirectory(new File(xzDir))
val deflateSize = FileUtils.sizeOfDirectory(new File(deflateDir))
val snappySize = FileUtils.sizeOfDirectory(new File(snappyDir))
assert(uncompressSize > deflateSize)
assert(snappySize > deflateSize)
assert(snappySize > bzip2Size)
assert(bzip2Size > xzSize)
}
}
test("dsl test") {
val results = spark.read.format("avro").load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("old avro data source name works") {
val results =
spark.read.format("com.databricks.spark.avro")
.load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("support of various data types") {
// This test uses data from test.avro. You can see the data and the schema of this file in
// test.json and test.avsc
val all = spark.read.format("avro").load(testAvro).collect()
assert(all.length == 3)
val str = spark.read.format("avro").load(testAvro).select("string").collect()
assert(str.map(_(0)).toSet.contains("Terran is IMBA!"))
val simple_map = spark.read.format("avro").load(testAvro).select("simple_map").collect()
assert(simple_map(0)(0).getClass.toString.contains("Map"))
assert(simple_map.map(_(0).asInstanceOf[Map[String, Some[Int]]].size).toSet == Set(2, 0))
val union0 = spark.read.format("avro").load(testAvro).select("union_string_null").collect()
assert(union0.map(_(0)).toSet == Set("abc", "123", null))
val union1 = spark.read.format("avro").load(testAvro).select("union_int_long_null").collect()
assert(union1.map(_(0)).toSet == Set(66, 1, null))
val union2 = spark.read.format("avro").load(testAvro).select("union_float_double").collect()
assert(
union2
.map(x => java.lang.Double.valueOf(x(0).toString))
.exists(p => Math.abs(p - Math.PI) < 0.001))
val fixed = spark.read.format("avro").load(testAvro).select("fixed3").collect()
assert(fixed.map(_(0).asInstanceOf[Array[Byte]]).exists(p => p(1) == 3))
val enum = spark.read.format("avro").load(testAvro).select("enum").collect()
assert(enum.map(_(0)).toSet == Set("SPADES", "CLUBS", "DIAMONDS"))
val record = spark.read.format("avro").load(testAvro).select("record").collect()
assert(record(0)(0).getClass.toString.contains("Row"))
assert(record.map(_(0).asInstanceOf[Row](0)).contains("TEST_STR123"))
val array_of_boolean =
spark.read.format("avro").load(testAvro).select("array_of_boolean").collect()
assert(array_of_boolean.map(_(0).asInstanceOf[Seq[Boolean]].size).toSet == Set(3, 1, 0))
val bytes = spark.read.format("avro").load(testAvro).select("bytes").collect()
assert(bytes.map(_(0).asInstanceOf[Array[Byte]].length).toSet == Set(3, 1, 0))
}
test("sql test") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW avroTable
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM avroTable").collect().length === 8)
}
test("conversion to avro and back") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { dir =>
val avroDir = s"$dir/avro"
spark.read.format("avro").load(testAvro).write.format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
}
}
test("conversion to avro and back with namespace") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { tempDir =>
val name = "AvroTest"
val namespace = "org.apache.spark.avro"
val parameters = Map("recordName" -> name, "recordNamespace" -> namespace)
val avroDir = tempDir + "/namedAvro"
spark.read.format("avro").load(testAvro)
.write.options(parameters).format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
// Look at raw file and make sure has namespace info
val rawSaved = spark.sparkContext.textFile(avroDir)
val schema = rawSaved.collect().mkString("")
assert(schema.contains(name))
assert(schema.contains(namespace))
}
}
test("converting some specific sparkSQL types to avro") {
withTempPath { tempDir =>
val testSchema = StructType(Seq(
StructField("Name", StringType, false),
StructField("Length", IntegerType, true),
StructField("Time", TimestampType, false),
StructField("Decimal", DecimalType(10, 2), true),
StructField("Binary", BinaryType, false)))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val cityRDD = spark.sparkContext.parallelize(Seq(
Row("San Francisco", 12, new Timestamp(666), null, arrayOfByte),
Row("Palo Alto", null, new Timestamp(777), null, arrayOfByte),
Row("Munich", 8, new Timestamp(42), Decimal(3.14), arrayOfByte)))
val cityDataFrame = spark.createDataFrame(cityRDD, testSchema)
val avroDir = tempDir + "/avro"
cityDataFrame.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 3)
// TimesStamps are converted to longs
val times = spark.read.format("avro").load(avroDir).select("Time").collect()
assert(times.map(_(0)).toSet ==
Set(new Timestamp(666), new Timestamp(777), new Timestamp(42)))
// DecimalType should be converted to string
val decimals = spark.read.format("avro").load(avroDir).select("Decimal").collect()
assert(decimals.map(_(0)).contains(new java.math.BigDecimal("3.14")))
// There should be a null entry
val length = spark.read.format("avro").load(avroDir).select("Length").collect()
assert(length.map(_(0)).contains(null))
val binary = spark.read.format("avro").load(avroDir).select("Binary").collect()
for (i <- arrayOfByte.indices) {
assert(binary(1)(0).asInstanceOf[Array[Byte]](i) == arrayOfByte(i))
}
}
}
test("correctly read long as date/timestamp type") {
withTempPath { tempDir =>
val currentTime = new Timestamp(System.currentTimeMillis())
val currentDate = new Date(System.currentTimeMillis())
val schema = StructType(Seq(
StructField("_1", DateType, false), StructField("_2", TimestampType, false)))
val writeDs = Seq((currentDate, currentTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 1)
val readDs = spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)]
assert(readDs.collect().sameElements(writeDs.collect()))
}
}
test("support of globbed paths") {
val resourceDir = testFile(".")
val e1 = spark.read.format("avro").load(resourceDir + "../*/episodes.avro").collect()
assert(e1.length == 8)
val e2 = spark.read.format("avro").load(resourceDir + "../../*/*/episodes.avro").collect()
assert(e2.length == 8)
}
test("does not coerce null date/timestamp value to 0 epoch.") {
withTempPath { tempDir =>
val nullTime: Timestamp = null
val nullDate: Date = null
val schema = StructType(Seq(
StructField("_1", DateType, nullable = true),
StructField("_2", TimestampType, nullable = true))
)
val writeDs = Seq((nullDate, nullTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
val readValues =
spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)].collect
assert(readValues.size == 1)
assert(readValues.head == ((nullDate, nullTime)))
}
}
test("support user provided avro schema") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "string",
| "type" : "string",
| "doc" : "Meaningless string of characters"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("string").collect()
assert(result.sameElements(expected))
}
test("support user provided avro schema with defaults for missing fields") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "missingField",
| "type" : "string",
| "default" : "foo"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro").load(testAvro).select("missingField").first
assert(result === Row("foo"))
}
test("support user provided avro schema for writing nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "enum",
| "type": [{ "type": "enum",
| "name": "Suit",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing data not in the enum will throw an exception
val message = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write \\"NOT-IN-ENUM\\" since it's not defined in enum"))
}
}
test("support user provided avro schema for writing non-nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "enum",
| "type": { "type": "enum",
| "name": "Suit",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }
| }]
|}
""".stripMargin
val dfWithNull = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val df = spark.createDataFrame(dfWithNull.na.drop().rdd,
StructType(Seq(StructField("Suit", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing nulls without using avro union type will
// throw an exception as avro uses union type to handle null.
val message1 = intercept[SparkException] {
dfWithNull.write.format("avro")
.option("avroSchema", avroSchema).save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message1.contains("org.apache.avro.AvroRuntimeException: Not a union:"))
// Writing df containing data not in the enum will throw an exception
val message2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message2.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write \\"NOT-IN-ENUM\\" since it's not defined in enum"))
}
}
test("support user provided avro schema for writing nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": [{ "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(null))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message1.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 3 bytes of binary data into FIXED Type with size of 2 bytes"))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message2.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 1 byte of binary data into FIXED Type with size of 2 bytes"))
}
}
test("support user provided avro schema for writing non-nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": { "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(Array(1, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message1.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 3 bytes of binary data into FIXED Type with size of 2 bytes"))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message2.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 1 byte of binary data into FIXED Type with size of 2 bytes"))
}
}
test("error handling for unsupported Interval data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
var msg = intercept[AnalysisException] {
sql("select interval 1 days").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.contains("Cannot save interval data type into external storage."))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new IntervalData())
sql("select testType()").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(s"avro data source does not support calendarinterval data type."))
}
}
test("support Null data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
val df = sql("select null")
df.write.format("avro").mode("overwrite").save(tempDir)
checkAnswer(spark.read.format("avro").load(tempDir), df)
}
}
test("throw exception if unable to write with user provided Avro schema") {
val input: Seq[(DataType, Schema.Type)] = Seq(
(NullType, NULL),
(BooleanType, BOOLEAN),
(ByteType, INT),
(ShortType, INT),
(IntegerType, INT),
(LongType, LONG),
(FloatType, FLOAT),
(DoubleType, DOUBLE),
(BinaryType, BYTES),
(DateType, INT),
(TimestampType, LONG),
(DecimalType(4, 2), BYTES)
)
def assertException(f: () => AvroSerializer) {
val message = intercept[org.apache.spark.sql.avro.IncompatibleSchemaException] {
f()
}.getMessage
assert(message.contains("Cannot convert Catalyst type"))
}
def resolveNullable(schema: Schema, nullable: Boolean): Schema = {
if (nullable && schema.getType != NULL) {
Schema.createUnion(schema, Schema.create(NULL))
} else {
schema
}
}
for {
i <- input
j <- input
nullable <- Seq(true, false)
} if (i._2 != j._2) {
val avroType = resolveNullable(Schema.create(j._2), nullable)
val avroArrayType = resolveNullable(Schema.createArray(avroType), nullable)
val avroMapType = resolveNullable(Schema.createMap(avroType), nullable)
val name = "foo"
val avroField = new Field(name, avroType, "", null.asInstanceOf[AnyVal])
val recordSchema = Schema.createRecord("name", "doc", "space", true, Seq(avroField).asJava)
val avroRecordType = resolveNullable(recordSchema, nullable)
val catalystType = i._1
val catalystArrayType = ArrayType(catalystType, nullable)
val catalystMapType = MapType(StringType, catalystType, nullable)
val catalystStructType = StructType(Seq(StructField(name, catalystType, nullable)))
for {
avro <- Seq(avroType, avroArrayType, avroMapType, avroRecordType)
catalyst <- Seq(catalystType, catalystArrayType, catalystMapType, catalystStructType)
} {
assertException(() => new AvroSerializer(catalyst, avro, nullable))
}
}
}
test("reading from invalid path throws exception") {
// Directory given has no avro files
intercept[AnalysisException] {
withTempPath(dir => spark.read.format("avro").load(dir.getCanonicalPath))
}
intercept[AnalysisException] {
spark.read.format("avro").load("very/invalid/path/123.avro")
}
// In case of globbed path that can't be matched to anything, another exception is thrown (and
// exception message is helpful)
intercept[AnalysisException] {
spark.read.format("avro").load("*/*/*/*/*/*/*/something.avro")
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
spark.read.format("avro").load(dir.toString)
}
}
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(dir.toString)
}
}
}
test("SQL test insert overwrite") {
withTempPath { tempDir =>
val tempEmptyDir = s"$tempDir/sqlOverwrite"
// Create a temp directory for table that will be overwritten
new File(tempEmptyDir).mkdirs()
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodes
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodesEmpty
|(name string, air_date string, doctor int)
|USING avro
|OPTIONS (path "$tempEmptyDir")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodes").collect().length === 8)
assert(spark.sql("SELECT * FROM episodesEmpty").collect().isEmpty)
spark.sql(
s"""
|INSERT OVERWRITE TABLE episodesEmpty
|SELECT * FROM episodes
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodesEmpty").collect().length == 8)
}
}
test("test save and load") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("test load with non-Avro file") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count() == 8)
}
}
}
test("read avro with user defined schema: read partial columns") {
val partialColumns = StructType(Seq(
StructField("string", StringType, false),
StructField("simple_map", MapType(StringType, IntegerType), false),
StructField("complex_map", MapType(StringType, MapType(StringType, StringType)), false),
StructField("union_string_null", StringType, true),
StructField("union_int_long_null", LongType, true),
StructField("fixed3", BinaryType, true),
StructField("fixed2", BinaryType, true),
StructField("enum", StringType, false),
StructField("record", StructType(Seq(StructField("value_field", StringType, false))), false),
StructField("array_of_boolean", ArrayType(BooleanType), false),
StructField("bytes", BinaryType, true)))
val withSchema = spark.read.schema(partialColumns).format("avro").load(testAvro).collect()
val withOutSchema = spark
.read
.format("avro")
.load(testAvro)
.select("string", "simple_map", "complex_map", "union_string_null", "union_int_long_null",
"fixed3", "fixed2", "enum", "record", "array_of_boolean", "bytes")
.collect()
assert(withSchema.sameElements(withOutSchema))
}
test("read avro with user defined schema: read non-exist columns") {
val schema =
StructType(
Seq(
StructField("non_exist_string", StringType, true),
StructField(
"record",
StructType(Seq(
StructField("non_exist_field", StringType, false),
StructField("non_exist_field2", StringType, false))),
false)))
val withEmptyColumn = spark.read.schema(schema).format("avro").load(testAvro).collect()
assert(withEmptyColumn.forall(_ == Row(null: String, Row(null: String, null: String))))
}
test("read avro file partitioned") {
withTempPath { dir =>
val df = (0 to 1024 * 3).toDS.map(i => s"record${i}").toDF("records")
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
assert(input.collect.toSet.size === 1024 * 3 + 1)
assert(input.rdd.partitions.size > 2)
}
}
case class NestedBottom(id: Int, data: String)
case class NestedMiddle(id: Int, data: NestedBottom)
case class NestedTop(id: Int, data: NestedMiddle)
test("Validate namespace in avro file that has nested records with the same name") {
withTempPath { dir =>
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
writeDf.write.format("avro").save(dir.toString)
val schema = getAvroSchemaStringFromFiles(dir.toString)
assert(schema.contains("\\"namespace\\":\\"topLevelRecord\\""))
assert(schema.contains("\\"namespace\\":\\"topLevelRecord.data\\""))
}
}
test("saving avro that has nested records with the same name") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
val outputFolder = s"$tempDir/duplicate_names/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("check namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee",
nameSpace = "foo.bar")
assert(employeeType.getFullName == "foo.bar.employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == "foo.bar")
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "foo.bar.employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "foo.bar.employee")
}
test("check empty namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee")
assert(employeeType.getFullName == "employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == null)
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "employee")
}
case class NestedMiddleArray(id: Int, data: Array[NestedBottom])
case class NestedTopArray(id: Int, data: NestedMiddleArray)
test("saving avro that has nested records with the same name inside an array") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopArray(1, NestedMiddleArray(2, Array(
NestedBottom(3, "1"), NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_array/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
case class NestedMiddleMap(id: Int, data: Map[String, NestedBottom])
case class NestedTopMap(id: Int, data: NestedMiddleMap)
test("saving avro that has nested records with the same name inside a map") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopMap(1, NestedMiddleMap(2, Map(
"1" -> NestedBottom(3, "1"), "2" -> NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_map/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("SPARK-24805: do not ignore files without .avro extension by default") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val fileWithoutExtension = s"${dir.getCanonicalPath}/episodes"
val df1 = spark.read.format("avro").load(fileWithoutExtension)
assert(df1.count == 8)
val schema = new StructType()
.add("title", StringType)
.add("air_date", StringType)
.add("doctor", IntegerType)
val df2 = spark.read.schema(schema).format("avro").load(fileWithoutExtension)
assert(df2.count == 8)
}
}
test("SPARK-24836: checking the ignoreExtension option") {
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
val newDf = spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("SPARK-24836: ignoreExtension must override hadoop's config") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val hadoopConf = spark.sessionState.newHadoopConf()
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark
.read
.option("ignoreExtension", "true")
.format("avro")
.load(s"${dir.getCanonicalPath}/episodes")
assert(newDf.count() == 8)
}
}
}
test("SPARK-24881: write with compression - avro options") {
def getCodec(dir: String): Option[String] = {
val files = new File(dir)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
files.map { file =>
val reader = new DataFileReader(file, new GenericDatumReader[Any]())
val r = reader.getMetaString("avro.codec")
r
}.map(v => if (v == "null") "uncompressed" else v).headOption
}
def checkCodec(df: DataFrame, dir: String, codec: String): Unit = {
val subdir = s"$dir/$codec"
df.write.option("compression", codec).format("avro").save(subdir)
assert(getCodec(subdir) == Some(codec))
}
withTempPath { dir =>
val path = dir.toString
val df = spark.read.format("avro").load(testAvro)
checkCodec(df, path, "uncompressed")
checkCodec(df, path, "deflate")
checkCodec(df, path, "snappy")
checkCodec(df, path, "bzip2")
checkCodec(df, path, "xz")
}
}
private def checkSchemaWithRecursiveLoop(avroSchema: String): Unit = {
val message = intercept[IncompatibleSchemaException] {
SchemaConverters.toSqlType(new Schema.Parser().parse(avroSchema))
}.getMessage
assert(message.contains("Found recursive reference in Avro schema"))
}
test("Detect recursive loop") {
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"}, // each element has a long
| {"name": "next", "type": ["null", "LongList"]} // optional next element
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields": [
| {
| "name": "value",
| "type": {
| "type": "record",
| "name": "foo",
| "fields": [
| {
| "name": "parent",
| "type": "LongList"
| }
| ]
| }
| }
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "array", "type": {"type": "array", "items": "LongList"}}
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "map", "type": {"type": "map", "values": "LongList"}}
| ]
|}
""".stripMargin)
}
}
| WindCanDie/spark | external/avro/src/test/scala/org/apache/spark/sql/avro/AvroSuite.scala | Scala | apache-2.0 | 53,335 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.Comparator
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodegenFallback, ExprCode}
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData, MapData}
import org.apache.spark.sql.types._
/**
* Given an array or map, returns its size.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the size of an array or a map.",
extended = " > SELECT _FUNC_(array('b', 'd', 'c', 'a'));\\n 4")
case class Size(child: Expression) extends UnaryExpression with ExpectsInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(ArrayType, MapType))
override def nullSafeEval(value: Any): Int = child.dataType match {
case _: ArrayType => value.asInstanceOf[ArrayData].numElements()
case _: MapType => value.asInstanceOf[MapData].numElements()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => s"${ev.value} = ($c).numElements();")
}
}
/**
* Returns an unordered array containing the keys of the map.
*/
@ExpressionDescription(
usage = "_FUNC_(map) - Returns an unordered array containing the keys of the map.",
extended = " > SELECT _FUNC_(map(1, 'a', 2, 'b'));\\n [1,2]")
case class MapKeys(child: Expression)
extends UnaryExpression with ExpectsInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(MapType)
override def dataType: DataType = ArrayType(child.dataType.asInstanceOf[MapType].keyType)
override def nullSafeEval(map: Any): Any = {
map.asInstanceOf[MapData].keyArray()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => s"${ev.value} = ($c).keyArray();")
}
override def prettyName: String = "map_keys"
}
/**
* Returns an unordered array containing the values of the map.
*/
@ExpressionDescription(
usage = "_FUNC_(map) - Returns an unordered array containing the values of the map.",
extended = " > SELECT _FUNC_(map(1, 'a', 2, 'b'));\\n [\\"a\\",\\"b\\"]")
case class MapValues(child: Expression)
extends UnaryExpression with ExpectsInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(MapType)
override def dataType: DataType = ArrayType(child.dataType.asInstanceOf[MapType].valueType)
override def nullSafeEval(map: Any): Any = {
map.asInstanceOf[MapData].valueArray()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => s"${ev.value} = ($c).valueArray();")
}
override def prettyName: String = "map_values"
}
/**
* Sorts the input array in ascending / descending order according to the natural ordering of
* the array elements and returns it.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(array(obj1, obj2, ...), ascendingOrder) - Sorts the input array in ascending order according to the natural ordering of the array elements.",
extended = " > SELECT _FUNC_(array('b', 'd', 'c', 'a'), true);\\n 'a', 'b', 'c', 'd'")
// scalastyle:on line.size.limit
case class SortArray(base: Expression, ascendingOrder: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
def this(e: Expression) = this(e, Literal(true))
override def left: Expression = base
override def right: Expression = ascendingOrder
override def dataType: DataType = base.dataType
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType, BooleanType)
override def checkInputDataTypes(): TypeCheckResult = base.dataType match {
case ArrayType(dt, _) if RowOrdering.isOrderable(dt) =>
ascendingOrder match {
case Literal(_: Boolean, BooleanType) =>
TypeCheckResult.TypeCheckSuccess
case _ =>
TypeCheckResult.TypeCheckFailure(
"Sort order in second argument requires a boolean literal.")
}
case ArrayType(dt, _) =>
TypeCheckResult.TypeCheckFailure(
s"$prettyName does not support sorting array of type ${dt.simpleString}")
case _ =>
TypeCheckResult.TypeCheckFailure(s"$prettyName only supports array input.")
}
@transient
private lazy val lt: Comparator[Any] = {
val ordering = base.dataType match {
case _ @ ArrayType(n: AtomicType, _) => n.ordering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(a: ArrayType, _) => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(s: StructType, _) => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
}
new Comparator[Any]() {
override def compare(o1: Any, o2: Any): Int = {
if (o1 == null && o2 == null) {
0
} else if (o1 == null) {
-1
} else if (o2 == null) {
1
} else {
ordering.compare(o1, o2)
}
}
}
}
@transient
private lazy val gt: Comparator[Any] = {
val ordering = base.dataType match {
case _ @ ArrayType(n: AtomicType, _) => n.ordering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(a: ArrayType, _) => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(s: StructType, _) => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
}
new Comparator[Any]() {
override def compare(o1: Any, o2: Any): Int = {
if (o1 == null && o2 == null) {
0
} else if (o1 == null) {
1
} else if (o2 == null) {
-1
} else {
-ordering.compare(o1, o2)
}
}
}
}
override def nullSafeEval(array: Any, ascending: Any): Any = {
val elementType = base.dataType.asInstanceOf[ArrayType].elementType
val data = array.asInstanceOf[ArrayData].toArray[AnyRef](elementType)
if (elementType != NullType) {
java.util.Arrays.sort(data, if (ascending.asInstanceOf[Boolean]) lt else gt)
}
new GenericArrayData(data.asInstanceOf[Array[Any]])
}
override def prettyName: String = "sort_array"
}
/**
* Checks if the array (left) has the element (right)
*/
@ExpressionDescription(
usage = "_FUNC_(array, value) - Returns TRUE if the array contains the value.",
extended = " > SELECT _FUNC_(array(1, 2, 3), 2);\\n true")
case class ArrayContains(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = BooleanType
override def inputTypes: Seq[AbstractDataType] = right.dataType match {
case NullType => Seq()
case _ => left.dataType match {
case n @ ArrayType(element, _) => Seq(n, element)
case _ => Seq()
}
}
override def checkInputDataTypes(): TypeCheckResult = {
if (right.dataType == NullType) {
TypeCheckResult.TypeCheckFailure("Null typed values cannot be used as arguments")
} else if (!left.dataType.isInstanceOf[ArrayType]
|| left.dataType.asInstanceOf[ArrayType].elementType != right.dataType) {
TypeCheckResult.TypeCheckFailure(
"Arguments must be an array followed by a value of same type as the array members")
} else {
TypeCheckResult.TypeCheckSuccess
}
}
override def nullable: Boolean = {
left.nullable || right.nullable || left.dataType.asInstanceOf[ArrayType].containsNull
}
override def nullSafeEval(arr: Any, value: Any): Any = {
var hasNull = false
arr.asInstanceOf[ArrayData].foreach(right.dataType, (i, v) =>
if (v == null) {
hasNull = true
} else if (v == value) {
return true
}
)
if (hasNull) {
null
} else {
false
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (arr, value) => {
val i = ctx.freshName("i")
val getValue = ctx.getValue(arr, right.dataType, i)
s"""
for (int $i = 0; $i < $arr.numElements(); $i ++) {
if ($arr.isNullAt($i)) {
${ev.isNull} = true;
} else if (${ctx.genEqual(right.dataType, value, getValue)}) {
${ev.isNull} = false;
${ev.value} = true;
break;
}
}
"""
})
}
override def prettyName: String = "array_contains"
}
| gioenn/xSpark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala | Scala | apache-2.0 | 9,090 |
package de.tototec.sbuild.internal
import scala.annotation.tailrec
import de.tototec.sbuild.Logger
import de.tototec.sbuild.ProjectConfigurationException
class DependentClassesOrderer {
private[this] val log = Logger[DependentClassesOrderer]
def orderClasses(classes: Seq[Class[_]], dependencies: Seq[(Class[_], Class[_])]): Seq[Class[_]] = {
var unchained: Seq[Class[_]] = classes
var chained: Seq[Class[_]] = Seq()
log.debug(s"Trying to order plugins: ${unchained}")
def hasNoDeps(plugin: Class[_]): Boolean = dependencies.filter { case (a, b) => a == plugin && unchained.contains(b) }.isEmpty
@tailrec
def searchNextResolved(candidates: Seq[Class[_]]): Option[Class[_]] = candidates match {
case Seq() => None
case head +: tail => if (hasNoDeps(head)) Some(head) else searchNextResolved(tail)
}
while (!unchained.isEmpty) {
log.debug(s"aleady chained: ${chained}")
log.debug(s"still needs chaining: ${unchained}")
searchNextResolved(unchained) match {
case None => throw new ProjectConfigurationException("Could not resolve inter plugin dependencies")
case Some(c) =>
// val c = c_.asInstanceOf[Class[_]]
val unchainedSize = unchained.size
log.debug(s"chaining plugin: ${c} with id: ${System.identityHashCode(c)}")
chained ++= Seq(c)
unchained = unchained.filter(_ != c)
require(unchainedSize > unchained.size, "Unchained plugins must shrink after one plugin is chained")
}
}
chained
}
} | SBuild-org/sbuild | de.tototec.sbuild/src/main/scala/de/tototec/sbuild/internal/DependentClassesOrderer.scala | Scala | apache-2.0 | 1,568 |
/*
* SimpleMovieTest.scala
* Simple movie example tests.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.example
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.cra.figaro.algorithm._
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.algorithm.factored._
import com.cra.figaro.library.atomic.discrete._
import com.cra.figaro.language._
import com.cra.figaro.language.Universe._
import com.cra.figaro.test._
import com.cra.figaro.test.tags.Example
class SimpleMovieTest extends WordSpec with Matchers {
"A PRM with a global constraint without mutation" should {
"produce the correct probability under variable elimination" taggedAs (Example) in {
test((e: Element[Boolean]) => VariableElimination(e))
}
"produce the correct probability under importance sampling" taggedAs (Example) in {
test((e: Element[Boolean]) => Importance(20000, e))
}
"produce the correct probability under Metropolis-Hastings" taggedAs (Example) in {
test((e: Element[Boolean]) => { val m = MetropolisHastings(100000, chooseScheme, 0, e); /*m.debug = true;*/ m })
}
}
class Actor(name: String) {
val famous = Flip(0.1)(name + "famous", universe)
}
class Movie(name: String) {
val quality = Select(0.3 -> 'low, 0.5 -> 'medium, 0.2 -> 'high)(name + "quality", universe)
}
class Appearance(name: String, val actor: Actor, val movie: Movie) {
def probAward(quality: Symbol, famous: Boolean) =
(quality, famous) match {
case ('low, false) => 0.001
case ('low, true) => 0.01
case ('medium, false) => 0.01
case ('medium, true) => 0.05
case ('high, false) => 0.05
case ('high, true) => 0.2
}
val pa = Apply(movie.quality, actor.famous, (q: Symbol, f: Boolean) => probAward(q, f))(name + "probAward", universe)
val award = SwitchingFlip(pa)(name + "award", universe)
}
val numActors = 3
val numMovies = 2
val numAppearances = 3
var actors: Array[Actor] = _
var movies: Array[Movie] = _
var appearances: Array[Appearance] = _
val random = new scala.util.Random()
// A proposal either proposes to switch the awardee to another awardee or proposes the properties of a movie or
// an actor.
def chooseScheme(): ProposalScheme = {
DisjointScheme(
(0.5, () => switchAwards()),
(0.25, () => ProposalScheme(actors(random.nextInt(numActors)).famous)),
(0.25, () => ProposalScheme(movies(random.nextInt(numMovies)).quality)))
}
/*
* It's possible that as a result of other attributes changing, an appearance becomes awarded or unawarded.
* Therefore, we have to take this into account in the proposal scheme.
*/
def switchAwards(): ProposalScheme = {
val (awarded, unawarded) = appearances.partition(_.award.value)
awarded.length match {
case 1 =>
val other = unawarded(random.nextInt(numAppearances - 1))
ProposalScheme(awarded(0).award, other.award)
case 0 =>
ProposalScheme(appearances(random.nextInt(numAppearances)).award) // make something unawarded awarded
case _ =>
ProposalScheme(awarded(random.nextInt(awarded.length)).award)
}
}
def test(algorithmCreator: Element[Boolean] => ProbQueryAlgorithm): Unit = {
Universe.createNew()
val actor1 = new Actor("actor1")
val actor2 = new Actor("actor2")
val actor3 = new Actor("actor3")
val movie1 = new Movie("movie1")
val movie2 = new Movie("movie2")
val appearance1 = new Appearance("appearance1", actor1, movie1)
val appearance2 = new Appearance("appearance2", actor2, movie2)
val appearance3 = new Appearance("appearance3", actor3, movie2)
actors = Array(actor1, actor2, actor3)
movies = Array(movie1, movie2)
appearances = Array(appearance1, appearance2, appearance3)
// Ensure that exactly one appearance gets an award.
val appearanceAwards: Array[Element[Boolean]] = appearances map (_.award)
val allAwards: Element[List[Boolean]] = Inject(appearanceAwards: _*)("allAwards", universe)
def uniqueAwardCondition(awards: List[Boolean]) = awards.count((b: Boolean) => b) == 1
allAwards.setCondition(uniqueAwardCondition)
actor3.famous.observe(true)
movie2.quality.observe('high)
// We first make sure the initial state satisfies the unique award condition, and then make sure that all
// subsequent proposals keep that condition.
appearances.foreach(_.award.randomness = 0.0)
appearances(random.nextInt(numAppearances)).award.randomness = 1.0
appearances.foreach(appearance =>
appearance.award.value = appearance.award.generateValue(appearance.award.randomness))
allAwards.generate()
val qAppearance1Award = 0.1 * (0.3 * 0.01 + 0.5 * 0.05 + 0.2 * 0.2) + 0.9 * (0.3 * 0 + 0.5 * 0.01 + 0.2 * 0.05)
val qAppearance2Award = 0.1 * 0.2 + 0.9 * 0.05
val qAppearance3Award = 0.2
val qAppearance1Only = qAppearance1Award * (1 - qAppearance2Award) * (1 - qAppearance3Award)
val qAppearance2Only = qAppearance2Award * (1 - qAppearance1Award) * (1 - qAppearance3Award)
val qAppearance3Only = qAppearance3Award * (1 - qAppearance1Award) * (1 - qAppearance2Award)
val pAppearance3Award = qAppearance3Only / (qAppearance1Only + qAppearance2Only + qAppearance3Only)
val alg = algorithmCreator(appearance3.award)
alg.start()
alg.stop()
alg.probability(appearance3.award, true) should be(pAppearance3Award +- 0.01)
alg.kill()
}
}
| jyuhuan/figaro | Figaro/src/test/scala/com/cra/figaro/test/example/SimpleMovieTest.scala | Scala | bsd-3-clause | 5,789 |
package forcomp
import scala.collection.mutable
object Anagrams {
/** A word is simply a `String`. */
type Word = String
/** A sentence is a `List` of words. */
type Sentence = List[Word]
/** `Occurrences` is a `List` of pairs of characters and positive integers saying
* how often the character appears.
* This list is sorted alphabetically w.r.t. to the character in each pair.
* All characters in the occurrence list are lowercase.
*
* Any list of pairs of lowercase characters and their frequency which is not sorted
* is **not** an occurrence list.
*
* Note: If the frequency of some character is zero, then that character should not be
* in the list.
*/
type Occurrences = List[(Char, Int)]
/** The dictionary is simply a sequence of words.
* It is predefined and obtained as a sequence using the utility method `loadDictionary`.
*/
val dictionary: List[Word] = loadDictionary
/** Converts the word into its character occurrence list.
*
* Note: the uppercase and lowercase version of the character are treated as the
* same character, and are represented as a lowercase character in the occurrence list.
*
* Note: you must use `groupBy` to implement this method!
*/
def wordOccurrences(w: Word): Occurrences = w
.toLowerCase
.groupBy(c => c)
.mapValues(_.length)
.toList
.sorted
/** Converts a sentence into its character occurrence list. */
def sentenceOccurrences(s: Sentence): Occurrences =
wordOccurrences(s mkString)
/** The `dictionaryByOccurrences` is a `Map` from different occurrences to a sequence of all
* the words that have that occurrence count.
* This map serves as an easy way to obtain all the anagrams of a word given its occurrence list.
*
* For example, the word "eat" has the following character occurrence list:
*
* `List(('a', 1), ('e', 1), ('t', 1))`
*
* Incidentally, so do the words "ate" and "tea".
*
* This means that the `dictionaryByOccurrences` map will contain an entry:
*
* List(('a', 1), ('e', 1), ('t', 1)) -> Seq("ate", "eat", "tea")
*
*/
lazy val dictionaryByOccurrences: Map[Occurrences, List[Word]] =
dictionary groupBy wordOccurrences withDefaultValue Nil
/** Returns all the anagrams of a given word. */
def wordAnagrams(word: Word): List[Word] =
dictionaryByOccurrences(wordOccurrences(word))
/** Returns the list of all subsets of the occurrence list.
* This includes the occurrence itself, i.e. `List(('k', 1), ('o', 1))`
* is a subset of `List(('k', 1), ('o', 1))`.
* It also include the empty subset `List()`.
*
* Example: the subsets of the occurrence list `List(('a', 2), ('b', 2))` are:
*
* List(
* List(),
* List(('a', 1)),
* List(('a', 2)),
* List(('b', 1)),
* List(('a', 1), ('b', 1)),
* List(('a', 2), ('b', 1)),
* List(('b', 2)),
* List(('a', 1), ('b', 2)),
* List(('a', 2), ('b', 2))
* )
*
* Note that the order of the occurrence list subsets does not matter -- the subsets
* in the example above could have been displayed in some other order.
*/
def combinations(occurrences: Occurrences): List[Occurrences] = occurrences match {
case Nil => List(Nil)
case (char, times) :: xs =>
for {
comb <- combinations(xs)
n <- 0 to times
} yield if (n == 0) comb else (char, n) :: comb
}
/** Subtracts occurrence list `y` from occurrence list `x`.
*
* The precondition is that the occurrence list `y` is a subset of
* the occurrence list `x` -- any character appearing in `y` must
* appear in `x`, and its frequency in `y` must be smaller or equal
* than its frequency in `x`.
*
* Note: the resulting value is an occurrence - meaning it is sorted
* and has no zero-entries.
*/
// def subtract(x: Occurrences, y: Occurrences): Occurrences = {
//
// val ymap = y.toMap withDefaultValue 0
//
// x.foldLeft(Map[Char, Int]()) {
// case (map, (char, times)) =>
// if (times - ymap(char) > 0) map + (char -> times)
// else map
// }.toList.sortWith(_._1 < _._1)
// }
def subtract(x: Occurrences, y: Occurrences): Occurrences = y
.foldLeft(x.toMap) {
case (xs, (char, times)) =>
val diff = xs.apply(char) - times
if (diff > 0) xs.updated(char, diff)
else xs.filterKeys(_ != char)
}
.toList.sorted
/** Returns a list of all anagram sentences of the given sentence.
*
* An anagram of a sentence is formed by taking the occurrences of all the characters of
* all the words in the sentence, and producing all possible combinations of words with those characters,
* such that the words have to be from the dictionary.
*
* The number of words in the sentence and its anagrams does not have to correspond.
* For example, the sentence `List("I", "love", "you")` is an anagram of the sentence `List("You", "olive")`.
*
* Also, two sentences with the same words but in a different order are considered two different anagrams.
* For example, sentences `List("You", "olive")` and `List("olive", "you")` are different anagrams of
* `List("I", "love", "you")`.
*
* Here is a full example of a sentence `List("Yes", "man")` and its anagrams for our dictionary:
*
* List(
* List(en, as, my),
* List(en, my, as),
* List(man, yes),
* List(men, say),
* List(as, en, my),
* List(as, my, en),
* List(sane, my),
* List(Sean, my),
* List(my, en, as),
* List(my, as, en),
* List(my, sane),
* List(my, Sean),
* List(say, men),
* List(yes, man)
* )
*
* The different sentences do not have to be output in the order shown above - any order is fine as long as
* all the anagrams are there. Every returned word has to exist in the dictionary.
*
* Note: in case that the words of the sentence are in the dictionary, then the sentence is the anagram of itself,
* so it has to be returned in this list.
*
* Note: There is only one anagram of an empty sentence.
*/
def sentenceAnagrams(sentence: Sentence): List[Sentence] = {
def subSentence(occ: Occurrences): List[Sentence] =
if (occ.isEmpty) List(Nil)
else
for {
combination <- combinations(occ)
word <- dictionaryByOccurrences(combination)
sentence <- subSentence(subtract(occ, combination))
} yield word :: sentence
subSentence(sentenceOccurrences(sentence))
}
def sentenceAnagramsMemo(sentence: Sentence): List[Sentence] = {
val cache = mutable.Map[Occurrences, List[Sentence]]()
def subSentenceMemo(occurrences: Occurrences): List[Sentence] =
cache.getOrElseUpdate(occurrences, subSentence(occurrences))
def subSentence(occ: Occurrences): List[Sentence] =
if (occ.isEmpty) List(Nil)
else
for {
combination <- combinations(occ)
word <- dictionaryByOccurrences(combination)
sentence <- subSentenceMemo(subtract(occ, combination))
} yield word :: sentence
subSentence(sentenceOccurrences(sentence))
}
} | yurii-khomenko/fpScalaSpec | c1w6forcomp/src/main/scala/forcomp/Anagrams.scala | Scala | gpl-3.0 | 7,252 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package templates
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiClass
import org.jetbrains.plugins.scala.JavaArrayFactoryUtil.ScTemplateParentsFactory
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes._
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScTypeAlias, ScTypeAliasDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.SyntheticMembersInjector
import org.jetbrains.plugins.scala.lang.psi.stubs.ScExtendsBlockStub
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScDesignatorType
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, CachedInUserData, ModCount}
import org.jetbrains.plugins.scala.project.ProjectContext
import scala.collection.{Seq, mutable}
/**
* @author AlexanderPodkhalyuzin
* Date: 20.02.2008
*/
class ScExtendsBlockImpl private(stub: ScExtendsBlockStub, node: ASTNode)
extends ScalaStubBasedElementImpl(stub, EXTENDS_BLOCK, node) with ScExtendsBlock {
def this(node: ASTNode) = this(null, node)
def this(stub: ScExtendsBlockStub) = this(stub, null)
override def toString: String = "ExtendsBlock"
@Cached(ModCount.anyScalaPsiModificationCount, this)
def templateBody: Option[ScTemplateBody] = {
def childStubTemplate(stub: ScExtendsBlockStub) =
Option(stub.findChildStubByType(TEMPLATE_BODY))
.map(_.getPsi)
def lastChildTemplateBody = getLastChild match {
case tb: ScTemplateBody => Some(tb)
case _ => None
}
byPsiOrStub(lastChildTemplateBody)(childStubTemplate)
}
def empty: Boolean = getNode.getFirstChildNode == null
def selfType: Option[ScType] =
selfTypeElement.flatMap {
_.typeElement
}.flatMap {
_.`type`().toOption
}
@CachedInUserData(this, ModCount.getBlockModificationCount)
def superTypes: List[ScType] = {
val buffer = mutable.ListBuffer.empty[ScType]
val stdTypes = projectContext.stdTypes
import stdTypes._
def addType(t: ScType) {
t match {
case ScCompoundType(comps, _, _) => comps.foreach(addType)
case _ => buffer += t
}
}
templateParents match {
case Some(parents: ScTemplateParents) => parents.superTypes.foreach(addType)
case _ => syntheticTypeElements.map(_.`type`().getOrAny).foreach(addType)
}
if (isUnderCaseClass) {
val prod = scalaProduct
if (prod != null) buffer += prod
val ser = scalaSerializable
if (ser != null) buffer += ser
}
if (!isScalaObject) {
val obj = scalaObject
if (obj != null && !obj.element.asInstanceOf[PsiClass].isDeprecated) buffer += obj
}
def extract(scType: ScType): Boolean = {
scType.extractClass match {
case Some(_: ScObject) => true
case Some(_: ScTrait) => false
case Some(_: ScClass) => true
case Some(c: PsiClass) if !c.isInterface => true
case _ => false
}
}
val findResult = buffer.find {
case AnyVal | AnyRef | Any => true
case t => extract(t)
}
findResult match {
case Some(AnyVal) => //do nothing
case res@(Some(AnyRef) | Some(Any)) =>
buffer -= res.get
if (javaObject != null)
buffer += javaObject
case Some(_) => //do nothing
case _ =>
if (javaObject != null)
buffer += javaObject
}
buffer.toList
}
def isScalaObject: Boolean = {
getParentByStub match {
case clazz: PsiClass =>
clazz.qualifiedName == "scala.ScalaObject"
case _ => false
}
}
private def scalaProductClass: PsiClass =
ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "scala.Product").orNull
private def scalaSerializableClass: PsiClass =
ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "scala.Serializable").orNull
private def scalaObjectClass: PsiClass =
ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "scala.ScalaObject").orNull
private def javaObjectClass: PsiClass =
ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "java.lang.Object").orNull
private def scalaProduct: ScType = {
val sp = scalaProductClass
if (sp != null) ScalaType.designator(sp) else null
}
private def scalaSerializable: ScType = {
val sp = scalaSerializableClass
if (sp != null) ScalaType.designator(sp) else null
}
private def scalaObject: ScDesignatorType = {
val so = scalaObjectClass
if (so != null) ScDesignatorType(so) else null
}
private def javaObject: ScDesignatorType = {
val so = javaObjectClass
if (so != null) ScDesignatorType(so) else null
}
def isAnonymousClass: Boolean =
getParent match {
case _: ScNewTemplateDefinition => templateBody.isDefined
case _ => false
}
@Cached(ModCount.getBlockModificationCount, this)
def syntheticTypeElements: Seq[ScTypeElement] = {
if (templateParents.nonEmpty) return Seq.empty //will be handled separately
getContext match {
case td: ScTypeDefinition => SyntheticMembersInjector.injectSupers(td)
case _ => Seq.empty
}
}
@CachedInUserData(this, ModCount.getBlockModificationCount)
def supers: Seq[PsiClass] = {
val typeElements = templateParents.fold(syntheticTypeElements) {
_.allTypeElements
}
val buffer = mutable.ListBuffer(ScExtendsBlockImpl.extractSupers(typeElements): _*)
if (isUnderCaseClass) {
val prod = scalaProductClass
if (prod != null) buffer += prod
val ser = scalaSerializableClass
if (ser != null) buffer += ser
}
if (!isScalaObject) {
val obj = scalaObjectClass
if (obj != null && !obj.isDeprecated) buffer += obj
}
buffer.find {
case _: ScSyntheticClass => true
case _: ScObject => true
case _: ScTrait => false
case _: ScClass => true
case c: PsiClass if !c.isInterface => true
case _ => false
} match {
case Some(s: ScSyntheticClass) if s.stdType.isAnyVal => //do nothing
case Some(s: ScSyntheticClass) if s.stdType.isAnyRef || s.stdType.isAny =>
buffer -= s
if (javaObjectClass != null)
buffer += javaObjectClass
case Some(_: PsiClass) => //do nothing
case _ =>
if (javaObjectClass != null)
buffer += javaObjectClass
}
buffer
}
def members: Seq[ScMember] = {
templateBodies.flatMap {
_.members
} ++ earlyDefinitions.toSeq.flatMap {
_.members
}
}
def typeDefinitions: Seq[ScTypeDefinition] =
templateBodies.flatMap {
_.typeDefinitions
}
def nameId = null
def aliases: Seq[ScTypeAlias] =
templateBodies.flatMap {
_.aliases
}
def functions: Seq[ScFunction] =
templateBodies.flatMap {
_.functions
}
def selfTypeElement: Option[ScSelfTypeElement] =
templateBody.flatMap {
_.selfTypeElement
}
def templateParents: Option[ScTemplateParents] =
getStubOrPsiChildren(TEMPLATE_PARENTS, ScTemplateParentsFactory).headOption
def earlyDefinitions: Option[ScEarlyDefinitions] =
this.stubOrPsiChild(EARLY_DEFINITIONS)
override def addEarlyDefinitions(): ScEarlyDefinitions = {
earlyDefinitions.getOrElse {
val text = "class A extends {} with B {}"
val templDef = ScalaPsiElementFactory.createTemplateDefinitionFromText(text, getParentByStub.getContext, getParentByStub)
val extBlock = templDef.extendsBlock
val kExtends = extBlock.children.find(_.getNode.getElementType == ScalaTokenTypes.kEXTENDS).get
val kWith = extBlock.children.find(_.getNode.getElementType == ScalaTokenTypes.kWITH).get
val firstElem = if (templateParents.isEmpty) kExtends else kExtends.getNextSibling
val anchor = if (templateParents.isEmpty) getFirstChild else templateParents.get
this.addRangeBefore(firstElem, kWith, anchor)
earlyDefinitions.get
}
}
def isUnderCaseClass: Boolean = getParentByStub match {
case td: ScTypeDefinition if td.isCase => true
case _ => false
}
private def templateBodies = templateBody.toSeq
}
object ScExtendsBlockImpl {
private def extractSupers(typeElements: Seq[ScTypeElement])
(implicit project: ProjectContext): Seq[PsiClass] =
typeElements.flatMap { element =>
def tail(): Option[PsiClass] =
element.`type`().toOption
.flatMap(_.extractClass)
def refTail(reference: ScStableCodeReferenceElement): Option[PsiClass] =
reference.resolveNoConstructor match {
case Array(head) => head.element match {
case c: PsiClass => Some(c)
case ta: ScTypeAliasDefinition =>
ta.aliasedType.toOption
.flatMap(_.extractClass)
case _ => tail()
}
case _ => tail()
}
val maybeReference = element match {
case ScSimpleTypeElement(result) => result
case ScParameterizedTypeElement(ScSimpleTypeElement(result), _) => result
case _ => None
}
maybeReference match {
case Some(reference) => refTail(reference)
case _ => tail()
}
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/templates/ScExtendsBlockImpl.scala | Scala | apache-2.0 | 10,038 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util.{Locale, OptionalLong}
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.IO_WARNING_LARGEFILETHRESHOLD
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{AttributeSet, Expression, ExpressionSet}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.connector.read.{Batch, InputPartition, Scan, Statistics, SupportsReportStatistics}
import org.apache.spark.sql.execution.PartitionedFileUtil
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.connector.SupportsMetadata
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
trait FileScan extends Scan
with Batch with SupportsReportStatistics with SupportsMetadata with Logging {
/**
* Returns whether a file with `path` could be split or not.
*/
def isSplitable(path: Path): Boolean = {
false
}
def sparkSession: SparkSession
def fileIndex: PartitioningAwareFileIndex
/**
* Returns the required data schema
*/
def readDataSchema: StructType
/**
* Returns the required partition schema
*/
def readPartitionSchema: StructType
/**
* Returns the filters that can be use for partition pruning
*/
def partitionFilters: Seq[Expression]
/**
* Returns the data filters that can be use for file listing
*/
def dataFilters: Seq[Expression]
/**
* Create a new `FileScan` instance from the current one
* with different `partitionFilters` and `dataFilters`
*/
def withFilters(partitionFilters: Seq[Expression], dataFilters: Seq[Expression]): FileScan
/**
* If a file with `path` is unsplittable, return the unsplittable reason,
* otherwise return `None`.
*/
def getFileUnSplittableReason(path: Path): String = {
assert(!isSplitable(path))
"undefined"
}
protected def seqToString(seq: Seq[Any]): String = seq.mkString("[", ", ", "]")
private lazy val (normalizedPartitionFilters, normalizedDataFilters) = {
val output = readSchema().toAttributes
val partitionFilterAttributes = AttributeSet(partitionFilters).map(a => a.name -> a).toMap
val dataFiltersAttributes = AttributeSet(dataFilters).map(a => a.name -> a).toMap
val normalizedPartitionFilters = ExpressionSet(partitionFilters.map(
QueryPlan.normalizeExpressions(_,
output.map(a => partitionFilterAttributes.getOrElse(a.name, a)))))
val normalizedDataFilters = ExpressionSet(dataFilters.map(
QueryPlan.normalizeExpressions(_,
output.map(a => dataFiltersAttributes.getOrElse(a.name, a)))))
(normalizedPartitionFilters, normalizedDataFilters)
}
override def equals(obj: Any): Boolean = obj match {
case f: FileScan =>
fileIndex == f.fileIndex && readSchema == f.readSchema &&
normalizedPartitionFilters == f.normalizedPartitionFilters &&
normalizedDataFilters == f.normalizedDataFilters
case _ => false
}
override def hashCode(): Int = getClass.hashCode()
val maxMetadataValueLength = sparkSession.sessionState.conf.maxMetadataStringLength
override def description(): String = {
val metadataStr = getMetaData().toSeq.sorted.map {
case (key, value) =>
val redactedValue =
Utils.redact(sparkSession.sessionState.conf.stringRedactionPattern, value)
key + ": " + StringUtils.abbreviate(redactedValue, maxMetadataValueLength)
}.mkString(", ")
s"${this.getClass.getSimpleName} $metadataStr"
}
override def getMetaData(): Map[String, String] = {
val locationDesc =
fileIndex.getClass.getSimpleName +
Utils.buildLocationMetadata(fileIndex.rootPaths, maxMetadataValueLength)
Map(
"Format" -> s"${this.getClass.getSimpleName.replace("Scan", "").toLowerCase(Locale.ROOT)}",
"ReadSchema" -> readDataSchema.catalogString,
"PartitionFilters" -> seqToString(partitionFilters),
"DataFilters" -> seqToString(dataFilters),
"Location" -> locationDesc)
}
protected def partitions: Seq[FilePartition] = {
val selectedPartitions = fileIndex.listFiles(partitionFilters, dataFilters)
val maxSplitBytes = FilePartition.maxSplitBytes(sparkSession, selectedPartitions)
val partitionAttributes = fileIndex.partitionSchema.toAttributes
val attributeMap = partitionAttributes.map(a => normalizeName(a.name) -> a).toMap
val readPartitionAttributes = readPartitionSchema.map { readField =>
attributeMap.get(normalizeName(readField.name)).getOrElse {
throw new AnalysisException(s"Can't find required partition column ${readField.name} " +
s"in partition schema ${fileIndex.partitionSchema}")
}
}
lazy val partitionValueProject =
GenerateUnsafeProjection.generate(readPartitionAttributes, partitionAttributes)
val splitFiles = selectedPartitions.flatMap { partition =>
// Prune partition values if part of the partition columns are not required.
val partitionValues = if (readPartitionAttributes != partitionAttributes) {
partitionValueProject(partition.values).copy()
} else {
partition.values
}
partition.files.flatMap { file =>
val filePath = file.getPath
PartitionedFileUtil.splitFiles(
sparkSession = sparkSession,
file = file,
filePath = filePath,
isSplitable = isSplitable(filePath),
maxSplitBytes = maxSplitBytes,
partitionValues = partitionValues
)
}.toArray.sortBy(_.length)(implicitly[Ordering[Long]].reverse)
}
if (splitFiles.length == 1) {
val path = new Path(splitFiles(0).filePath)
if (!isSplitable(path) && splitFiles(0).length >
sparkSession.sparkContext.getConf.get(IO_WARNING_LARGEFILETHRESHOLD)) {
logWarning(s"Loading one large unsplittable file ${path.toString} with only one " +
s"partition, the reason is: ${getFileUnSplittableReason(path)}")
}
}
FilePartition.getFilePartitions(sparkSession, splitFiles, maxSplitBytes)
}
override def planInputPartitions(): Array[InputPartition] = {
partitions.toArray
}
override def estimateStatistics(): Statistics = {
new Statistics {
override def sizeInBytes(): OptionalLong = {
val compressionFactor = sparkSession.sessionState.conf.fileCompressionFactor
val size = (compressionFactor * fileIndex.sizeInBytes).toLong
OptionalLong.of(size)
}
override def numRows(): OptionalLong = OptionalLong.empty()
}
}
override def toBatch: Batch = this
override def readSchema(): StructType =
StructType(readDataSchema.fields ++ readPartitionSchema.fields)
// Returns whether the two given arrays of [[Filter]]s are equivalent.
protected def equivalentFilters(a: Array[Filter], b: Array[Filter]): Boolean = {
a.sortBy(_.hashCode()).sameElements(b.sortBy(_.hashCode()))
}
private val isCaseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
private def normalizeName(name: String): String = {
if (isCaseSensitive) {
name
} else {
name.toLowerCase(Locale.ROOT)
}
}
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileScan.scala | Scala | apache-2.0 | 8,248 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.Properties
import java.util.concurrent.TimeUnit
import kafka.consumer.SimpleConsumer
import kafka.integration.KafkaServerTestHarness
import kafka.message.Message
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer._
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.common.errors.SerializationException
import org.apache.kafka.common.serialization.ByteArraySerializer
import org.junit.Assert._
import org.junit.{After, Before, Test}
class ProducerSendTest extends KafkaServerTestHarness {
val numServers = 2
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.NumPartitionsProp, 4.toString)
def generateConfigs() =
TestUtils.createBrokerConfigs(numServers, zkConnect, false).map(KafkaConfig.fromProps(_, overridingProps))
private var consumer1: SimpleConsumer = null
private var consumer2: SimpleConsumer = null
private val topic = "topic"
private val numRecords = 100
@Before
override def setUp() {
super.setUp()
// TODO: we need to migrate to new consumers when 0.9 is final
consumer1 = new SimpleConsumer("localhost", servers(0).boundPort(), 100, 1024*1024, "")
consumer2 = new SimpleConsumer("localhost", servers(1).boundPort(), 100, 1024*1024, "")
}
@After
override def tearDown() {
consumer1.close()
consumer2.close()
super.tearDown()
}
/**
* testSendOffset checks the basic send API behavior
*
* 1. Send with null key/value/partition-id should be accepted; send with null topic should be rejected.
* 2. Last message of the non-blocking send should return the correct offset metadata
*/
@Test
def testSendOffset() {
var producer = TestUtils.createNewProducer(brokerList)
val partition = new Integer(0)
object callback extends Callback {
var offset = 0L
def onCompletion(metadata: RecordMetadata, exception: Exception) {
if (exception == null) {
assertEquals(offset, metadata.offset())
assertEquals(topic, metadata.topic())
assertEquals(partition, metadata.partition())
offset += 1
} else {
fail("Send callback returns the following exception", exception)
}
}
}
try {
// create topic
TestUtils.createTopic(zkUtils, topic, 1, 2, servers)
// send a normal record
val record0 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, "value".getBytes)
assertEquals("Should have offset 0", 0L, producer.send(record0, callback).get.offset)
// send a record with null value should be ok
val record1 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, null)
assertEquals("Should have offset 1", 1L, producer.send(record1, callback).get.offset)
// send a record with null key should be ok
val record2 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, null, "value".getBytes)
assertEquals("Should have offset 2", 2L, producer.send(record2, callback).get.offset)
// send a record with null part id should be ok
val record3 = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes)
assertEquals("Should have offset 3", 3L, producer.send(record3, callback).get.offset)
// send a record with null topic should fail
try {
val record4 = new ProducerRecord[Array[Byte],Array[Byte]](null, partition, "key".getBytes, "value".getBytes)
producer.send(record4, callback)
fail("Should not allow sending a record without topic")
} catch {
case iae: IllegalArgumentException => // this is ok
case e: Throwable => fail("Only expecting IllegalArgumentException", e)
}
// non-blocking send a list of records
for (i <- 1 to numRecords)
producer.send(record0, callback)
// check that all messages have been acked via offset
assertEquals("Should have offset " + (numRecords + 4), numRecords + 4L, producer.send(record0, callback).get.offset)
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
@Test
def testSerializer() {
// send a record with a wrong type should receive a serialization exception
try {
val producer = createNewProducerWithWrongSerializer(brokerList)
val record5 = new ProducerRecord[Array[Byte],Array[Byte]](topic, new Integer(0), "key".getBytes, "value".getBytes)
producer.send(record5)
fail("Should have gotten a SerializationException")
} catch {
case se: SerializationException => // this is ok
}
try {
createNewProducerWithNoSerializer(brokerList)
fail("Instantiating a producer without specifying a serializer should cause a ConfigException")
} catch {
case ce : ConfigException => // this is ok
}
// create a producer with explicit serializers should succeed
createNewProducerWithExplicitSerializer(brokerList)
}
private def createNewProducerWithWrongSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = {
import org.apache.kafka.clients.producer.ProducerConfig
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
return new KafkaProducer[Array[Byte],Array[Byte]](producerProps)
}
private def createNewProducerWithNoSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = {
import org.apache.kafka.clients.producer.ProducerConfig
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
return new KafkaProducer[Array[Byte],Array[Byte]](producerProps)
}
private def createNewProducerWithExplicitSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = {
import org.apache.kafka.clients.producer.ProducerConfig
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
return new KafkaProducer[Array[Byte],Array[Byte]](producerProps, new ByteArraySerializer, new ByteArraySerializer)
}
/**
* testClose checks the closing behavior
*
* After close() returns, all messages should be sent with correct returned offset metadata
*/
@Test
def testClose() {
var producer = TestUtils.createNewProducer(brokerList)
try {
// create topic
TestUtils.createTopic(zkUtils, topic, 1, 2, servers)
// non-blocking send a list of records
val record0 = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes)
for (i <- 1 to numRecords)
producer.send(record0)
val response0 = producer.send(record0)
// close the producer
producer.close()
producer = null
// check that all messages have been acked via offset,
// this also checks that messages with same key go to the same partition
assertTrue("The last message should be acked before producer is shutdown", response0.isDone)
assertEquals("Should have offset " + numRecords, numRecords.toLong, response0.get.offset)
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
/**
* testSendToPartition checks the partitioning behavior
*
* The specified partition-id should be respected
*/
@Test
def testSendToPartition() {
var producer = TestUtils.createNewProducer(brokerList)
try {
// create topic
val leaders = TestUtils.createTopic(zkUtils, topic, 2, 2, servers)
val partition = 1
// make sure leaders exist
val leader1 = leaders(partition)
assertTrue("Leader for topic \\"topic\\" partition 1 should exist", leader1.isDefined)
val responses =
for (i <- 1 to numRecords)
yield producer.send(new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, null, ("value" + i).getBytes))
val futures = responses.toList
futures.map(_.get)
for (future <- futures)
assertTrue("Request should have completed", future.isDone)
// make sure all of them end up in the same partition with increasing offset values
for ((future, offset) <- futures zip (0 until numRecords)) {
assertEquals(offset.toLong, future.get.offset)
assertEquals(topic, future.get.topic)
assertEquals(partition, future.get.partition)
}
// make sure the fetched messages also respect the partitioning and ordering
val fetchResponse1 = if(leader1.get == configs(0).brokerId) {
consumer1.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build())
} else {
consumer2.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build())
}
val messageSet1 = fetchResponse1.messageSet(topic, partition).iterator.toBuffer
assertEquals("Should have fetched " + numRecords + " messages", numRecords, messageSet1.size)
// TODO: also check topic and partition after they are added in the return messageSet
for (i <- 0 to numRecords - 1) {
assertEquals(new Message(bytes = ("value" + (i + 1)).getBytes), messageSet1(i).message)
assertEquals(i.toLong, messageSet1(i).offset)
}
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
/**
* testAutoCreateTopic
*
* The topic should be created upon sending the first message
*/
@Test
def testAutoCreateTopic() {
var producer = TestUtils.createNewProducer(brokerList, retries = 5)
try {
// Send a message to auto-create the topic
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes)
assertEquals("Should have offset 0", 0L, producer.send(record).get.offset)
// double check that the topic is created with leader elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0)
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
/**
* Test that flush immediately sends all accumulated requests.
*/
@Test
def testFlush() {
var producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue)
try {
TestUtils.createTopic(zkUtils, topic, 2, 2, servers)
val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, "value".getBytes)
for(i <- 0 until 50) {
val responses = (0 until numRecords) map (i => producer.send(record))
assertTrue("No request is complete.", responses.forall(!_.isDone()))
producer.flush()
assertTrue("All requests are complete.", responses.forall(_.isDone()))
}
} finally {
if (producer != null)
producer.close()
}
}
/**
* Test close with zero timeout from caller thread
*/
@Test
def testCloseWithZeroTimeoutFromCallerThread() {
var producer: KafkaProducer[Array[Byte],Array[Byte]] = null
try {
// create topic
val leaders = TestUtils.createTopic(zkUtils, topic, 2, 2, servers)
val leader0 = leaders(0)
val leader1 = leaders(1)
// create record
val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, null, "value".getBytes)
val record1 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 1, null, "value".getBytes)
// Test closing from caller thread.
for(i <- 0 until 50) {
producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue)
val responses = (0 until numRecords) map (i => producer.send(record0))
assertTrue("No request is complete.", responses.forall(!_.isDone()))
producer.close(0, TimeUnit.MILLISECONDS)
responses.foreach { future =>
try {
future.get()
fail("No message should be sent successfully.")
} catch {
case e: Exception =>
assertEquals("java.lang.IllegalStateException: Producer is closed forcefully.", e.getMessage)
}
}
val fetchResponse = if (leader0.get == configs(0).brokerId) {
consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build())
} else {
consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build())
}
assertEquals("Fetch response should have no message returned.", 0, fetchResponse.messageSet(topic, 0).size)
}
} finally {
if (producer != null)
producer.close()
}
}
/**
* Test close with zero and non-zero timeout from sender thread
*/
@Test
def testCloseWithZeroTimeoutFromSenderThread() {
var producer: KafkaProducer[Array[Byte],Array[Byte]] = null
try {
// create topic
val leaders = TestUtils.createTopic(zkUtils, topic, 1, 2, servers)
val leader = leaders(0)
// create record
val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, null, "value".getBytes)
// Test closing from sender thread.
class CloseCallback(producer: KafkaProducer[Array[Byte], Array[Byte]]) extends Callback {
override def onCompletion(metadata: RecordMetadata, exception: Exception) {
// Trigger another batch in accumulator before close the producer. These messages should
// not be sent.
(0 until numRecords) map (i => producer.send(record))
// The close call will be called by all the message callbacks. This tests idempotence of the close call.
producer.close(0, TimeUnit.MILLISECONDS)
// Test close with non zero timeout. Should not block at all.
producer.close(Long.MaxValue, TimeUnit.MICROSECONDS)
}
}
for(i <- 0 until 50) {
producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue)
// send message to partition 0
val responses = ((0 until numRecords) map (i => producer.send(record, new CloseCallback(producer))))
assertTrue("No request is complete.", responses.forall(!_.isDone()))
// flush the messages.
producer.flush()
assertTrue("All request are complete.", responses.forall(_.isDone()))
// Check the messages received by broker.
val fetchResponse = if (leader.get == configs(0).brokerId) {
consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build())
} else {
consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build())
}
val expectedNumRecords = (i + 1) * numRecords
assertEquals("Fetch response to partition 0 should have %d messages.".format(expectedNumRecords),
expectedNumRecords, fetchResponse.messageSet(topic, 0).size)
}
} finally {
if (producer != null)
producer.close()
}
}
}
| vkroz/kafka | core/src/test/scala/integration/kafka/api/ProducerSendTest.scala | Scala | apache-2.0 | 16,095 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Algolia
* http://www.algolia.com/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package algolia.dsl
import algolia.definitions._
import algolia.objects.{
CompoundEntry,
DictionaryEntry,
DictionarySettings,
PluralEntry,
StopwordEntry
}
import algolia.responses.{DictionaryTask, SearchDictionaryResult}
import algolia.{AlgoliaClient, AlgoliaClientException, Executable}
import org.json4s.Formats
import scala.concurrent.{ExecutionContext, Future}
trait DictionaryDsl {
implicit val formats: Formats
// Save Dictionary Definition
implicit object SaveStopwordDictionaryDefinitionExecutable
extends SaveDictionaryDefinitionExecutable[StopwordEntry]
implicit object SavePluralDictionaryDefinitionExecutable
extends SaveDictionaryDefinitionExecutable[PluralEntry]
implicit object SaveCompoundDictionaryDefinitionExecutable
extends SaveDictionaryDefinitionExecutable[CompoundEntry]
sealed abstract class SaveDictionaryDefinitionExecutable[T <: DictionaryEntry]
extends Executable[SaveDictionaryDefinition[T], DictionaryTask] {
override def apply(
client: AlgoliaClient,
query: SaveDictionaryDefinition[T]
)(
implicit executor: ExecutionContext
): Future[DictionaryTask] = {
if (query.dictionaryEntries.isEmpty) {
return Future.failed(
new AlgoliaClientException(s"Dictionary entries cannot be empty")
)
}
client.request[DictionaryTask](query.build())
}
}
// Replace Dictionary Definition
implicit object ReplaceStopwordDictionaryDefinitionExecutable
extends ReplaceDictionaryDefinitionExecutable[StopwordEntry]
implicit object ReplacePluralDictionaryDefinitionExecutable
extends ReplaceDictionaryDefinitionExecutable[PluralEntry]
implicit object ReplaceCompoundDictionaryDefinitionExecutable
extends ReplaceDictionaryDefinitionExecutable[CompoundEntry]
sealed abstract class ReplaceDictionaryDefinitionExecutable[
T <: DictionaryEntry
] extends Executable[ReplaceDictionaryDefinition[T], DictionaryTask] {
override def apply(
client: AlgoliaClient,
query: ReplaceDictionaryDefinition[T]
)(
implicit executor: ExecutionContext
): Future[DictionaryTask] = {
if (query.dictionaryEntries.isEmpty) {
return Future.failed(
new AlgoliaClientException(s"Dictionary entries cannot be empty")
)
}
client.request[DictionaryTask](query.build())
}
}
// Delete Dictionary Definition
implicit object DeleteDictionaryDefinitionExecutable
extends Executable[DeleteDictionaryDefinition, DictionaryTask] {
override def apply(
client: AlgoliaClient,
query: DeleteDictionaryDefinition
)(
implicit executor: ExecutionContext
): Future[DictionaryTask] = {
if (query.objectIDs.isEmpty) {
return Future.failed(
new AlgoliaClientException(s"Dictionary entries cannot be empty")
)
}
client.request[DictionaryTask](query.build())
}
}
// Clear Dictionary Definition
implicit object ClearDictionaryDefinitionExecutable
extends Executable[ClearDictionaryDefinition, DictionaryTask] {
override def apply(
client: AlgoliaClient,
query: ClearDictionaryDefinition
)(
implicit executor: ExecutionContext
): Future[DictionaryTask] = {
client.request[DictionaryTask](query.build())
}
}
// Search Dictionary Definition
implicit object SearchStopwordDictionaryDefinitionExecutable
extends SearchDictionaryDefinitionExecutable[StopwordEntry]
implicit object SearchPluralDictionaryDefinitionExecutable
extends SearchDictionaryDefinitionExecutable[PluralEntry]
implicit object SearchCompoundDictionaryDefinitionExecutable
extends SearchDictionaryDefinitionExecutable[CompoundEntry]
sealed abstract class SearchDictionaryDefinitionExecutable[
T <: DictionaryEntry
] extends Executable[SearchDictionaryDefinition[T], SearchDictionaryResult] {
override def apply(
client: AlgoliaClient,
query: SearchDictionaryDefinition[T]
)(
implicit executor: ExecutionContext
): Future[SearchDictionaryResult] = {
client.requestSearch[SearchDictionaryResult](query.build())
}
}
// Dictionary Settings Definition
implicit object SetSettingsDictionaryDefinitionExecutable
extends Executable[SetSettingsDictionaryDefinition, DictionaryTask] {
override def apply(
client: AlgoliaClient,
query: SetSettingsDictionaryDefinition
)(implicit executor: ExecutionContext): Future[DictionaryTask] = {
client.requestSearch[DictionaryTask](query.build())
}
}
implicit object GetSettingsDictionaryDefinitionExecutable
extends Executable[GetSettingsDictionaryDefinition, DictionarySettings] {
override def apply(
client: AlgoliaClient,
query: GetSettingsDictionaryDefinition
)(implicit executor: ExecutionContext): Future[DictionarySettings] = {
client.requestSearch[DictionarySettings](query.build())
}
}
}
| algolia/algoliasearch-client-scala | src/main/scala/algolia/dsl/DictionaryDsl.scala | Scala | mit | 6,226 |
/*
* Copyright (C) 2016 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.agreement.fixture
import java.util.TimeZone
import org.joda.time.{ DateTime, DateTimeUtils, DateTimeZone }
trait FixedDateTime {
val nowYMD = "2018-03-22"
val now = s"${ nowYMD }T21:43:01.576"
val nowUTC = s"${ nowYMD }T20:43:01Z"
/** Causes DateTime.now() to return a predefined value. */
DateTimeUtils.setCurrentMillisFixed(new DateTime(nowUTC).getMillis)
DateTimeZone.setDefault(DateTimeZone.forTimeZone(TimeZone.getTimeZone("Europe/Amsterdam")))
}
| DANS-KNAW/easy-license-creator | src/test/scala/nl/knaw/dans/easy/agreement/fixture/FixedDateTime.scala | Scala | apache-2.0 | 1,141 |
/*
* Copyright (c) 2014-2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.schemaddl
package generators
package redshift
// Scalaz
import scalaz._
import Scalaz._
// This project
import com.snowplowanalytics.schemaddl.generators.redshift.Ddl.DataTypes.RedshiftInteger
import utils.{ StringUtils => SU }
/**
* Module containing functions for data type suggestions
*/
object TypeSuggestions {
import Ddl._
/**
* Type alias for function suggesting an encode type based on map of
* JSON Schema properties
*/
type DataTypeSuggestion = (Map[String, String], String) => Option[DataType]
// For complex enums Suggest VARCHAR with length of longest element
val complexEnumSuggestion: DataTypeSuggestion = (properties, columnName) =>
properties.get("enum") match {
case Some(enums) if isComplexEnum(enums) =>
val longest = excludeNull(enums).map(_.length).max
Some(DataTypes.RedshiftVarchar(longest))
case _ => None
}
// Suggest VARCHAR(4096) for all product types. Should be in the beginning
val productSuggestion: DataTypeSuggestion = (properties, columnName) =>
properties.get("type") match {
case (Some(types)) if excludeNull(types).size > 1 =>
Some(CustomDataTypes.ProductType(List(s"Product type $types encountered in $columnName")))
case _ => None
}
val timestampSuggestion: DataTypeSuggestion = (properties, columnName) =>
(properties.get("type"), properties.get("format")) match {
case (Some(types), Some("date-time")) if types.contains("string") =>
Some(DataTypes.RedshiftTimestamp)
case _ => None
}
val arraySuggestion: DataTypeSuggestion = (properties, columnName) =>
properties.get("type") match {
case Some(types) if types.contains("array") =>
Some(DataTypes.RedshiftVarchar(5000))
case _ => None
}
val numberSuggestion: DataTypeSuggestion = (properties, columnName) =>
(properties.get("type"), properties.get("multipleOf")) match {
case (Some(types), Some(multipleOf)) if types.contains("number") && multipleOf == "0.01" =>
Some(DataTypes.RedshiftDecimal(Some(36), Some(2)))
case (Some(types), _) if types.contains("number") =>
Some(DataTypes.RedshiftDouble)
case _ => None
}
val integerSuggestion: DataTypeSuggestion = (properties, columnName) => {
(properties.get("type"), properties.get("maximum"), properties.get("enum"), properties.get("multipleOf")) match {
case (Some(types), Some(maximum), _, _) if excludeNull(types) == Set("integer") =>
getIntSize(maximum)
// Contains only enum
case (types, _, Some(enum), _) if (types.isEmpty || excludeNull(types.get) == Set("integer")) && SU.isIntegerList(enum) =>
val max = enum.split(",").toList.map(el => try Some(el.toLong) catch { case e: NumberFormatException => None } )
val maxLong = max.sequence.getOrElse(Nil).maximum
maxLong.flatMap(m => getIntSize(m)) // This will short-circuit integer suggestions on any non-integer enum
case (Some(types), _, _, _) if excludeNull(types) == Set("integer") =>
Some(DataTypes.RedshiftBigInt)
case (Some(types), max, _, Some(multipleOf)) if types.contains("number") && multipleOf == "1" =>
max.flatMap(m => getIntSize(m)).orElse(Some(RedshiftInteger))
case _ => None
}
}
val charSuggestion: DataTypeSuggestion = (properties, columnName) => {
(properties.get("type"), properties.get("minLength"), properties.get("maxLength")) match {
case (Some(types), Some(SU.IntegerAsString(minLength)), Some(SU.IntegerAsString(maxLength)))
if (minLength == maxLength && excludeNull(types) == Set("string")) =>
Some(DataTypes.RedshiftChar(maxLength))
case _ => None
}
}
val booleanSuggestion: DataTypeSuggestion = (properties, columnName) => {
properties.get("type") match {
case Some(types) if excludeNull(types) == Set("boolean") => Some(DataTypes.RedshiftBoolean)
case _ => None
}
}
val uuidSuggestion: DataTypeSuggestion = (properties, columnName) => {
(properties.get("type"), properties.get("format")) match {
case (Some(types), Some("uuid")) if types.contains("string") =>
Some(DataTypes.RedshiftChar(36))
case _ => None
}
}
val varcharSuggestion: DataTypeSuggestion = (properties, columnName) => {
(properties.get("type"), properties.get("maxLength"), properties.get("enum"), properties.get("format")) match {
case (Some(types), _, _, Some("ipv6")) if types.contains("string") =>
Some(DataTypes.RedshiftVarchar(39))
case (Some(types), _, _, Some("ipv4")) if types.contains("string") =>
Some(DataTypes.RedshiftVarchar(15))
case (Some(types), Some(SU.IntegerAsString(maxLength)), _, _) if types.contains("string") =>
Some(DataTypes.RedshiftVarchar(maxLength))
case (_, _, Some(enum), _) => {
val enumItems = enum.split(",")
val maxLength = enumItems.toList.reduceLeft((a, b) => if (a.length > b.length) a else b).length
if (enumItems.length == 1) {
Some(DataTypes.RedshiftChar(maxLength))
} else {
Some(DataTypes.RedshiftVarchar(maxLength))
}
}
case _ => None
}
}
/**
* Get set of types or enum as string excluding null
*
* @param types comma-separated types
* @return set of strings
*/
private def excludeNull(types: String): Set[String] = types.split(",").toSet - "null"
/**
* Helper function to get size of Integer
*
* @param max upper bound extracted from properties as string
* @return Long representing biggest possible value or None if it's not Int
*/
private def getIntSize(max: => String): Option[DataType] =
try {
val maxLong = max.toLong
getIntSize(maxLong)
} catch {
case e: NumberFormatException => None
}
/**
* Helper function to get size of Integer
*
* @param max upper bound
* @return Long representing biggest possible value or None if it's not Int
*/
private def getIntSize(max: Long): Option[DataType] =
if (max <= Short.MaxValue) Some(DataTypes.RedshiftSmallInt)
else if (max <= Int.MaxValue) Some(DataTypes.RedshiftInteger)
else if (max <= Long.MaxValue) Some(DataTypes.RedshiftBigInt)
else None
/**
* Check enum contains some different types
* (string and number or number and boolean)
*/
private def isComplexEnum(enum: String) = {
// Predicates
def isNumeric(s: String) = try {
s.toDouble
true
} catch {
case e: NumberFormatException => false
}
def isNonNumeric(s: String) = !isNumeric(s)
def isBoolean(s: String) = s == "true" || s == "false"
val nonNullEnum = excludeNull(enum)
somePredicates(nonNullEnum, List(isNumeric _, isNonNumeric _, isBoolean _), 2)
}
/**
* Check at least some `quantity` of `predicates` are true on `instances`
*
* @param instances list of instances to check on
* @param predicates list of predicates to check
* @param quantity required quantity
*/
private def somePredicates(instances: Set[String], predicates: List[String => Boolean], quantity: Int): Boolean = {
if (quantity == 0) true
else predicates match {
case Nil => false
case h :: tail if instances.exists(h) => somePredicates(instances, tail, quantity - 1)
case _ :: tail => somePredicates(instances, tail, quantity)
}
}
}
| snowplow/iglu-utils | src/main/scala/com.snowplowanalytics/schemaddl/generators/redshift/TypeSuggestions.scala | Scala | apache-2.0 | 8,314 |
package util
import scala.concurrent._
import ExecutionContext.Implicits.global
import org.apache.commons.mail.{DefaultAuthenticator, HtmlEmail}
import org.slf4j.LoggerFactory
import app.Context
import service.{AccountService, RepositoryService, IssuesService, SystemSettingsService}
import servlet.Database
import SystemSettingsService.Smtp
import _root_.util.ControlUtil.defining
trait Notifier extends RepositoryService with AccountService with IssuesService {
def toNotify(r: RepositoryService.RepositoryInfo, issueId: Int, content: String)
(msg: String => String)(implicit context: Context): Unit
protected def recipients(issue: model.Issue)(notify: String => Unit)(implicit context: Context) =
(
// individual repository's owner
issue.userName ::
// collaborators
getCollaborators(issue.userName, issue.repositoryName) :::
// participants
issue.openedUserName ::
getComments(issue.userName, issue.repositoryName, issue.issueId).map(_.commentedUserName)
)
.distinct
.withFilter ( n => n != context.loginAccount.get.userName && n == issue.assignedUserName.orNull ) // the operation in person is excluded, and not assigned users are excluded
.foreach ( getAccountByUserName(_) filterNot (_.isGroupAccount) foreach (x => notify(x.mailAddress)) )
}
object Notifier {
// TODO We want to be able to switch to mock.
def apply(): Notifier = new SystemSettingsService {}.loadSystemSettings match {
case settings if settings.notification => new Mailer(settings.smtp.get)
case _ => new MockMailer
}
def msgIssue(url: String) = (content: String) => s"""
|${content}<br/>
|--<br/>
|<a href="${url}">View it on GitBucket</a>
""".stripMargin
def msgPullRequest(url: String) = (content: String) => s"""
|${content}<hr/>
|View, comment on, or merge it at:<br/>
|<a href="${url}">${url}</a>
""".stripMargin
def msgComment(url: String) = (content: String) => s"""
|${content}<br/>
|--<br/>
|<a href="${url}">View it on GitBucket</a>
""".stripMargin
def msgStatus(url: String) = (content: String) => s"""
|${content} <a href="${url}">#${url split('/') last}</a>
""".stripMargin
}
class Mailer(private val smtp: Smtp) extends Notifier {
private val logger = LoggerFactory.getLogger(classOf[Mailer])
def toNotify(r: RepositoryService.RepositoryInfo, issueId: Int, content: String)
(msg: String => String)(implicit context: Context) = {
val database = Database(context.request.getServletContext)
val f = future {
// TODO Can we use the Database Session in other than Transaction Filter?
database withSession {
getIssue(r.owner, r.name, issueId.toString) foreach { issue =>
defining(
s"[${r.name}] ${issue.title} (#${issueId})" ->
msg(view.Markdown.toHtml(content, r, false, true))) { case (subject, msg) =>
recipients(issue) { to =>
val email = new HtmlEmail
email.setHostName(smtp.host)
email.setSmtpPort(smtp.port.get)
smtp.user.foreach { user =>
email.setAuthenticator(new DefaultAuthenticator(user, smtp.password.getOrElse("")))
}
smtp.ssl.foreach { ssl =>
email.setSSLOnConnect(ssl)
}
smtp.fromAddress
.map (_ -> smtp.fromName.orNull)
.orElse (Some("[email protected]" -> context.loginAccount.get.userName))
.foreach { case (address, name) =>
email.setFrom(address, name)
}
email.setCharset("UTF-8")
email.setSubject(subject)
email.setHtmlMsg(msg)
email.addTo(to).send
}
}
}
}
"Notifications Successful."
}
f onSuccess {
case s => logger.debug(s)
}
f onFailure {
case t => logger.error("Notifications Failed.", t)
}
}
}
class MockMailer extends Notifier {
def toNotify(r: RepositoryService.RepositoryInfo, issueId: Int, content: String)
(msg: String => String)(implicit context: Context): Unit = {}
}
| takuok/gitbucket | src/main/scala/util/Notifier.scala | Scala | apache-2.0 | 4,346 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.javautils.s2j
import java.util.{Set => JSet}
import scala.collection.jcl.{SetWrapper => JCLSetWrapper}
import scala.collection.mutable.Set
import org.scala_tools.javautils.j2s.JSetWrapper
class RichSMutableSet[T](set: Set[T]) {
def asJava: JSet[T] = set match {
case sw: JCLSetWrapper[_] =>
sw.underlying.asInstanceOf[JSet[T]]
case sw: JSetWrapper[_] =>
sw.asJava.asInstanceOf[JSet[T]]
case _ => new SMutableSetWrapper[T] {
type Wrapped = Set[T]
val underlying = set
}
}
}
| jorgeortiz85/scala-javautils | src/main/scala/org/scala_tools/javautils/s2j/RichSMutableSet.scala | Scala | apache-2.0 | 1,142 |
package x7c1.wheat.modern.database.selector
import android.database.sqlite.SQLiteDatabase
import x7c1.wheat.macros.reify.{HasConstructor, New}
import x7c1.wheat.modern.database.selector.SelectorProvidable.CanReify
import scala.language.reflectiveCalls
trait CanProvideSelector[A]{
type Selector
def createFrom(db: SQLiteDatabase): Selector
}
class SelectorProvidable[A, S: CanReify] extends CanProvideSelector[A]{
override type Selector = S
override def createFrom(db: SQLiteDatabase): S = New[S](db)
}
object SelectorProvidable {
object Implicits {
implicit class SelectorProvidableDatabase(val db: SQLiteDatabase) extends AnyVal {
def selectorOf[A](implicit x: CanProvideSelector[A]): x.Selector = {
x createFrom db
}
}
}
type CanReify[A] = HasConstructor[SQLiteDatabase => A]
}
| x7c1/Linen | wheat-modern/src/main/scala/x7c1/wheat/modern/database/selector/CanProvideSelector.scala | Scala | mit | 829 |
package looty
package views.loot
import looty.model.{ComputedItem, LootContainerId}
import org.scalajs.jquery.JQuery
//////////////////////////////////////////////////////////////
// Copyright (c) 2014 Ben Jackman, Jeff Gomberg
// All Rights Reserved
// please contact [email protected] or [email protected]
// for licensing inquiries
// Created by bjackman @ 8/24/14 6:28 PM
//////////////////////////////////////////////////////////////
object Container {
val visCls = "visible-loot-container"
val invisCls = "invisible-loot-container"
}
class Container(val id: LootContainerId, html: JQuery, initialVisible: Boolean, refreshFn: () => Unit) {
import Container._
private var _items = Vector.empty[ComputedItem]
def items = _items
def setItems(items: Vector[ComputedItem]) {
html.removeClass("loading")
_items = items
}
def refresh() {
html.addClass("loading")
refreshFn()
}
private var listeners = Vector.empty[Boolean => Unit]
private var _visible = initialVisible
private def refreshHtml() {
if (visible) {
html.addClass(visCls).removeClass(invisCls)
} else {
html.addClass(invisCls).removeClass(visCls)
}
}
private def changed() {
refreshHtml()
listeners.foreach(_(_visible))
}
def visible = _visible
def hide() {
_visible = false
changed()
}
def show() {
_visible = true
changed()
}
def toggle() {
_visible = !_visible
changed()
}
def onChange(f: Boolean => Unit) {
listeners :+= f
}
}
class Containers {
var _containers = Vector.empty[Container]
var _allMap = Map.empty[LootContainerId, Container]
def all = _containers
def addContainer(c: Container) {
_containers :+= c
_allMap += c.id -> c
c.onChange(v => conChanged(c))
}
def get(id: LootContainerId): Option[Container] = _allMap.get(id)
private def conChanged(c: Container) {
changed(c)
}
private var listeners = Vector.empty[Container => Unit]
def onChange(f: (Container) => Unit) {
listeners :+= f
}
private def changed(c: Container) {
listeners.foreach(_(c))
}
} | benjaminjackman/looty | looty/src/main/scala/looty/views/loot/Container.scala | Scala | gpl-2.0 | 2,127 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.storage.mapdb
import slamdata.Predef._
import quasar.impl.storage.{IsPrefix, PrefixStore}
import scala.collection.JavaConverters._
import java.util.{Map => JMap}
import cats.effect.{Blocker, ContextShift, Sync}
import cats.syntax.functor._
import fs2.Stream
import org.mapdb.{BTreeMap, DB, Serializer}
import org.mapdb.serializer.{GroupSerializer, SerializerArrayTuple}
import shapeless._
import shapeless.ops.hlist._
import shapeless.ops.traversable._
final class MapDbPrefixStore[F[_]: Sync: ContextShift, K <: HList, V] private (
db: DB,
store: BTreeMap[Array[AnyRef], V],
blocker: Blocker)(
implicit
kToArray: ToTraversable.Aux[K, Array, AnyRef],
kFromTraversable: FromTraversable[K])
extends PrefixStore[F, K, V] {
type Constraint[P <: HList] = PrefixStore.ToArray[P]
def prefixedEntries[P <: HList](p: P)(
implicit
pfx: IsPrefix[P, K],
pToArray: ToTraversable.Aux[P, Array, AnyRef])
: Stream[F, (K, V)] =
entriesStream(store.prefixSubMap(pToArray(p)))
def deletePrefixed[P <: HList](p: P)(
implicit
pfx: IsPrefix[P, K],
pToArray: ToTraversable.Aux[P, Array, AnyRef])
: F[Unit] =
blocker.delay[F, Unit] {
store.prefixSubMap(pToArray(p)).clear()
db.commit()
}
val entries: Stream[F, (K, V)] =
entriesStream(store)
def lookup(k: K): F[Option[V]] =
blocker.delay(Option(store.get(kToArray(k))))
def insert(k: K, v: V): F[Unit] =
blocker.delay[F, Unit] {
store.put(kToArray(k), v)
db.commit()
}
def delete(k: K): F[Boolean] =
blocker.delay(Option(store.remove(kToArray(k))).nonEmpty)
////
private def entriesStream(m: JMap[Array[AnyRef], V]): Stream[F, (K, V)] =
Stream.eval(Sync[F].delay(m.entrySet.iterator))
.flatMap(it => Stream.fromIterator[F](it.asScala))
.evalMap(e => mkKey(e.getKey).tupleRight(e.getValue))
.translate(blocker.blockOnK[F])
private def mkKey(parts: Array[AnyRef]): F[K] =
kFromTraversable(parts) match {
case Some(k) =>
Sync[F].pure(k)
case None =>
Sync[F].raiseError[K](new RuntimeException(
s"Unable to decode key from '${parts.mkString("[", ", ", "]")}'"))
}
}
object MapDbPrefixStore {
// If instantiation fails due to lack of FromTraversable, ensure
// scala.Predef.classOf is in scope.
def apply[F[_]]: PartiallyApplied[F] =
new PartiallyApplied[F]
final class PartiallyApplied[F[_]] {
def apply[SS <: HList, V, K <: HList, S[X] <: Serializer[X]](
name: String,
db: DB,
keySerializer: SS,
valueSerializer: GroupSerializer[V],
blocker: Blocker)(
implicit
sync: Sync[F],
contextShift: ContextShift[F],
ssComapped: Comapped.Aux[SS, S, K],
ssToList: ToTraversable.Aux[SS, List, S[_]],
kToArray: ToTraversable.Aux[K, Array, AnyRef],
kFromTraversable: FromTraversable[K])
: F[PrefixStore.Legacy[F, K, V]] =
blocker.delay[F, PrefixStore.Legacy[F, K, V]] {
val store =
db.treeMap(name)
.keySerializer(new SerializerArrayTuple(ssToList(keySerializer): _*))
.valueSerializer(valueSerializer)
.createOrOpen()
new MapDbPrefixStore[F, K, V](db, store, blocker)
}
}
}
| quasar-analytics/quasar | impl/src/main/scala/quasar/impl/storage/mapdb/MapDbPrefixStore.scala | Scala | apache-2.0 | 3,939 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.v1
import org.apache.spark.sql.{AnalysisException, Row, SaveMode}
import org.apache.spark.sql.execution.command
/**
* This base suite contains unified tests for the `SHOW PARTITIONS` command that check V1
* table catalogs. The tests that cannot run for all V1 catalogs are located in more
* specific test suites:
*
* - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowPartitionsSuite`
* - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.ShowPartitionsSuite`
*/
trait ShowPartitionsSuiteBase extends command.ShowPartitionsSuiteBase {
test("show everything in the default database") {
val table = "dateTable"
withTable(table) {
createDateTable(table)
runShowPartitionsSql(
s"show partitions default.$table",
Row("year=2015/month=1") ::
Row("year=2015/month=2") ::
Row("year=2016/month=2") ::
Row("year=2016/month=3") :: Nil)
}
}
// The test fails for V2 Table Catalogs with the exception:
// org.apache.spark.sql.AnalysisException: CREATE VIEW is only supported with v1 tables.
test("show partitions of a view") {
val table = "dateTable"
withTable(table) {
createDateTable(table)
val view = "view1"
withView(view) {
sql(s"CREATE VIEW $view as select * from $table")
val errMsg = intercept[AnalysisException] {
sql(s"SHOW PARTITIONS $view")
}.getMessage
assert(errMsg.contains("'SHOW PARTITIONS' expects a table"))
}
}
}
test("show partitions of a temporary view") {
val viewName = "test_view"
withTempView(viewName) {
spark.range(10).createTempView(viewName)
val errMsg = intercept[AnalysisException] {
sql(s"SHOW PARTITIONS $viewName")
}.getMessage
assert(errMsg.contains("'SHOW PARTITIONS' expects a table"))
}
}
test("SPARK-33591: null as a partition value") {
val t = "part_table"
withTable(t) {
sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)")
sql(s"INSERT INTO TABLE $t PARTITION (p1 = null) SELECT 0")
checkAnswer(sql(s"SHOW PARTITIONS $t"), Row("p1=__HIVE_DEFAULT_PARTITION__"))
checkAnswer(
sql(s"SHOW PARTITIONS $t PARTITION (p1 = null)"),
Row("p1=__HIVE_DEFAULT_PARTITION__"))
}
}
}
/**
* The class contains tests for the `SHOW PARTITIONS` command to check V1 In-Memory table catalog.
*/
class ShowPartitionsSuite extends ShowPartitionsSuiteBase with CommandSuiteBase {
// The test is placed here because it fails with `USING HIVE`:
// org.apache.spark.sql.AnalysisException:
// Hive data source can only be used with tables, you can't use it with CREATE TEMP VIEW USING
test("issue exceptions on the temporary view") {
val viewName = "test_view"
withTempView(viewName) {
sql(s"""
|CREATE TEMPORARY VIEW $viewName (c1 INT, c2 STRING)
|$defaultUsing""".stripMargin)
val errMsg = intercept[AnalysisException] {
sql(s"SHOW PARTITIONS $viewName")
}.getMessage
assert(errMsg.contains("'SHOW PARTITIONS' expects a table"))
}
}
test("show partitions from a datasource") {
import testImplicits._
withTable("part_datasrc") {
val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c")
df.write
.partitionBy("a")
.format("parquet")
.mode(SaveMode.Overwrite)
.saveAsTable("part_datasrc")
assert(sql("SHOW PARTITIONS part_datasrc").count() == 3)
}
}
test("SPARK-33904: null and empty string as partition values") {
withNamespaceAndTable("ns", "tbl") { t =>
createNullPartTable(t, "parquet")
runShowPartitionsSql(
s"SHOW PARTITIONS $t",
Row("part=__HIVE_DEFAULT_PARTITION__") :: Nil)
checkAnswer(spark.table(t), Row(0, null) :: Row(1, null) :: Nil)
}
}
}
| shaneknapp/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowPartitionsSuite.scala | Scala | apache-2.0 | 4,748 |
package com.hackathon
import org.joda.time.DateTime
case class IssueType(issueType: Long, name: String)
object IssueType {
def apply(issueType: Long): IssueType = issueType match {
case 1 => IssueType(1, "Glass on road")
case 2 => IssueType(2, "Construction")
case 3 => IssueType(3, "Car on bike lane")
case 4 => IssueType(4, "Bike crash")
case _ => IssueType(0, "Unknown")
}
def apply: IssueType = apply(0)
}
object Severity {
def apply(severity: Long): String = severity match {
case 1 => "orange"
case 2 => "red"
case _ => "unknown"
}
}
case class Issue(
id: String,
timestamp: Long,
issueType: Long,
issueName: String,
severity: Long,
longitude: Double,
latitude: Double,
creator: String
)
case class DistancedIssue(
count: Int,
id: String,
`type`: Long,
severity: Long,
latitude: Double,
longitude: Double,
distance: Double,
angle: Double
)
case class TimestampedIssue(timestamp: Long, id: String, severity: Long, `type`: Long, latitude: Double, longitude: Double)
case class IssuesSince(count: Long, issues: List[TimestampedIssue])
object Issue {
def apply(latitude: Double, longitude: Double, severity: Long, creator: String): Issue = Issue(
"",
DateTime.now.getMillis,
0,
IssueType(0).name,
severity,
longitude,
latitude,
creator
)
} | norefle/hackathon-locations-backend | src/main/scala/com/hackathon/Issue.scala | Scala | mit | 1,469 |
package ch.ltouroumov.modularmachines.common.blocks
import ch.ltouroumov.modularmachines.Settings
import ch.ltouroumov.modularmachines.ModularMachines
import ch.ltouroumov.modularmachines.common.texture.ConnectedTextureHandler
import net.minecraft.block.Block
import net.minecraft.block.material.Material
import net.minecraft.client.renderer.texture.IIconRegister
import net.minecraft.util.IIcon
import net.minecraft.world.IBlockAccess
import net.minecraftforge.common.util.ForgeDirection
class MachineGlass extends Block(Material.glass) {
setHardness(1.0F)
setStepSound(Block.soundTypeGlass)
setBlockName("MachineGlass")
setCreativeTab(ModularMachines.tabModularMachines)
val textureHandler = new ConnectedTextureHandler(Settings.assetName("Window_Side"), block => block.isInstanceOf[MachineGlass])
override def registerBlockIcons(register: IIconRegister) =
textureHandler.loadTextures(register)
override def getIcon(side:Int, meta:Int): IIcon =
textureHandler.getTexture(side)
override def getIcon(world: IBlockAccess, x: Int, y: Int, z:Int, side: Int): IIcon =
textureHandler.getTexture(world, x, y, z, side)
override def shouldSideBeRendered(world: IBlockAccess, x: Int, y: Int, z: Int, side: Int): Boolean =
textureHandler.shouldRenderSide(world, x, y, z, side)
override def isOpaqueCube = false
}
| ltouroumov/modular-machines | src/main/scala/ch/ltouroumov/modularmachines/common/blocks/MachineGlass.scala | Scala | gpl-2.0 | 1,348 |
package scala.quoted
/** Excetion thrown when an Expr or Type is used ouside of the scope where it is valid */
class ScopeException(msg: String) extends Exception(msg)
| som-snytt/dotty | library/src/scala/quoted/ScopeException.scala | Scala | apache-2.0 | 169 |
package pl.touk.nussknacker.ui.definition
import pl.touk.nussknacker.engine.ModelData
import pl.touk.nussknacker.engine.api.{FragmentSpecificData, MetaData}
import pl.touk.nussknacker.engine.api.async.{DefaultAsyncInterpretationValue, DefaultAsyncInterpretationValueDeterminer}
import pl.touk.nussknacker.engine.api.component.{ComponentGroupName, SingleComponentConfig}
import pl.touk.nussknacker.engine.api.definition.{Parameter, RawParameterEditor}
import pl.touk.nussknacker.engine.api.deployment.DeploymentManager
import pl.touk.nussknacker.engine.api.component.{AdditionalPropertyConfig, ParameterConfig}
import pl.touk.nussknacker.engine.api.typed.typing.{Typed, Unknown}
import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess
import pl.touk.nussknacker.engine.canonicalgraph.canonicalnode.FlatNode
import pl.touk.nussknacker.engine.component.ComponentsUiConfigExtractor
import pl.touk.nussknacker.engine.definition.DefinitionExtractor.ObjectDefinition
import pl.touk.nussknacker.engine.definition.ProcessDefinitionExtractor.ProcessDefinition
import pl.touk.nussknacker.engine.definition.TypeInfos
import pl.touk.nussknacker.engine.definition.TypeInfos.{ClazzDefinition, MethodInfo}
import pl.touk.nussknacker.engine.definition.parameter.ParameterData
import pl.touk.nussknacker.engine.definition.parameter.defaults.{DefaultValueDeterminerChain, DefaultValueDeterminerParameters}
import pl.touk.nussknacker.engine.definition.parameter.editor.EditorExtractor
import pl.touk.nussknacker.engine.definition.parameter.validator.{ValidatorExtractorParameters, ValidatorsExtractor}
import pl.touk.nussknacker.engine.graph.node.SubprocessInputDefinition
import pl.touk.nussknacker.engine.graph.node.SubprocessInputDefinition.SubprocessParameter
import pl.touk.nussknacker.engine.util.Implicits.RichScalaMap
import pl.touk.nussknacker.restmodel.definition._
import pl.touk.nussknacker.ui.component.ComponentDefinitionPreparer
import pl.touk.nussknacker.ui.config.ComponentsGroupMappingConfigExtractor
import pl.touk.nussknacker.ui.definition.additionalproperty.{AdditionalPropertyValidatorDeterminerChain, UiAdditionalPropertyEditorDeterminer}
import pl.touk.nussknacker.ui.process.ProcessCategoryService
import pl.touk.nussknacker.ui.process.subprocess.SubprocessDetails
import pl.touk.nussknacker.ui.security.api.LoggedUser
object UIProcessObjectsFactory {
import net.ceedubs.ficus.Ficus._
import pl.touk.nussknacker.engine.util.config.FicusReaders._
def prepareUIProcessObjects(modelDataForType: ModelData,
deploymentManager: DeploymentManager,
user: LoggedUser,
subprocessesDetails: Set[SubprocessDetails],
isSubprocess: Boolean,
processCategoryService: ProcessCategoryService,
processingType: String): UIProcessObjects = {
val processConfig = modelDataForType.processConfig
val chosenProcessDefinition: ProcessDefinition[ObjectDefinition] = modelDataForType.processDefinition
val fixedComponentsUiConfig = ComponentsUiConfigExtractor.extract(processConfig)
//FIXME: how to handle dynamic configuration of subprocesses??
val subprocessInputs = fetchSubprocessInputs(subprocessesDetails, modelDataForType.modelClassLoader.classLoader, fixedComponentsUiConfig)
val uiProcessDefinition = createUIProcessDefinition(chosenProcessDefinition, subprocessInputs, modelDataForType.typeDefinitions.map(prepareClazzDefinition), processCategoryService)
val customTransformerAdditionalData = chosenProcessDefinition.customStreamTransformers.mapValuesNow(_._2)
val dynamicComponentsConfig = uiProcessDefinition.allDefinitions.mapValues(_.componentConfig)
val subprocessesComponentsConfig = subprocessInputs.mapValues(_.componentConfig)
//we append fixedComponentsConfig, because configuration of default components (filters, switches) etc. will not be present in dynamicComponentsConfig...
//maybe we can put them also in uiProcessDefinition.allDefinitions?
val finalComponentsConfig = ComponentDefinitionPreparer.combineComponentsConfig(subprocessesComponentsConfig, fixedComponentsUiConfig, dynamicComponentsConfig)
val componentsGroupMapping = ComponentsGroupMappingConfigExtractor.extract(processConfig)
val additionalPropertiesConfig = processConfig
.getOrElse[Map[String, AdditionalPropertyConfig]]("additionalPropertiesConfig", Map.empty)
.filter(_ => !isSubprocess) // fixme: it should be introduced separate config for additionalPropertiesConfig for fragments. For now we skip that
.mapValues(createUIAdditionalPropertyConfig)
val defaultUseAsyncInterpretationFromConfig = processConfig.as[Option[Boolean]]("asyncExecutionConfig.defaultUseAsyncInterpretation")
val defaultAsyncInterpretation: DefaultAsyncInterpretationValue = DefaultAsyncInterpretationValueDeterminer.determine(defaultUseAsyncInterpretationFromConfig)
UIProcessObjects(
componentGroups = ComponentDefinitionPreparer.prepareComponentsGroupList(
user = user,
processDefinition = uiProcessDefinition,
isSubprocess = isSubprocess,
componentsConfig = finalComponentsConfig,
componentsGroupMapping = componentsGroupMapping,
processCategoryService = processCategoryService,
customTransformerAdditionalData = customTransformerAdditionalData,
processingType
),
processDefinition = uiProcessDefinition,
componentsConfig = finalComponentsConfig,
additionalPropertiesConfig = additionalPropertiesConfig,
edgesForNodes = ComponentDefinitionPreparer.prepareEdgeTypes(
processDefinition = chosenProcessDefinition,
isSubprocess = isSubprocess,
subprocessesDetails = subprocessesDetails
),
customActions = deploymentManager.customActions.map(UICustomAction(_)),
defaultAsyncInterpretation = defaultAsyncInterpretation.value)
}
private def prepareClazzDefinition(definition: ClazzDefinition): UIClazzDefinition = {
def toUIBasicParam(p: TypeInfos.Parameter): UIBasicParameter = UIBasicParameter(p.name, p.refClazz)
// TODO: present all overloaded methods on FE
def toUIMethod(methods: List[MethodInfo]): UIMethodInfo = {
val m = methods.maxBy(_.parameters.size)
UIMethodInfo(m.parameters.map(toUIBasicParam), m.refClazz, m.description, m.varArgs)
}
val methodsWithHighestArity = definition.methods.mapValues(toUIMethod)
val staticMethodsWithHighestArity = definition.staticMethods.mapValues(toUIMethod)
UIClazzDefinition(definition.clazzName, methodsWithHighestArity, staticMethodsWithHighestArity)
}
private def fetchSubprocessInputs(subprocessesDetails: Set[SubprocessDetails],
classLoader: ClassLoader,
fixedComponentsConfig: Map[String, SingleComponentConfig]): Map[String, ObjectDefinition] = {
val subprocessInputs = subprocessesDetails.collect {
case SubprocessDetails(CanonicalProcess(MetaData(id, FragmentSpecificData(docsUrl), _, _), FlatNode(SubprocessInputDefinition(_, parameters, _)) :: _, _), category) =>
val config = fixedComponentsConfig.getOrElse(id, SingleComponentConfig.zero).copy(docsUrl = docsUrl)
val typedParameters = parameters.map(extractSubprocessParam(classLoader, config))
(id, new ObjectDefinition(typedParameters, Typed[java.util.Map[String, Any]], Some(List(category)), config))
}.toMap
subprocessInputs
}
private def extractSubprocessParam(classLoader: ClassLoader, componentConfig: SingleComponentConfig)(p: SubprocessParameter): Parameter = {
val runtimeClass = p.typ.toRuntimeClass(classLoader)
//TODO: currently if we cannot parse parameter class we assume it's unknown
val typ = runtimeClass.map(Typed(_)).getOrElse(Unknown)
val config = componentConfig.params.flatMap(_.get(p.name)).getOrElse(ParameterConfig.empty)
val parameterData = ParameterData(typ, Nil)
val extractedEditor = EditorExtractor.extract(parameterData, config)
Parameter(
name = p.name,
typ = typ,
editor = extractedEditor,
validators = ValidatorsExtractor.extract(ValidatorExtractorParameters(parameterData, isOptional = true, config, extractedEditor)),
// TODO: ability to pick default value from gui
defaultValue = DefaultValueDeterminerChain.determineParameterDefaultValue(DefaultValueDeterminerParameters(parameterData, isOptional = true, config, extractedEditor)),
additionalVariables = Map.empty,
variablesToHide = Set.empty,
branchParam = false,
isLazyParameter = false,
scalaOptionParameter = false,
javaOptionalParameter = false)
}
def createUIObjectDefinition(objectDefinition: ObjectDefinition, processCategoryService: ProcessCategoryService): UIObjectDefinition = {
UIObjectDefinition(
parameters = objectDefinition.parameters.map(param => createUIParameter(param)),
returnType = if (objectDefinition.hasNoReturn) None else Some(objectDefinition.returnType),
categories = objectDefinition.categories.getOrElse(processCategoryService.getAllCategories),
componentConfig = objectDefinition.componentConfig
)
}
def createUIProcessDefinition(processDefinition: ProcessDefinition[ObjectDefinition],
subprocessInputs: Map[String, ObjectDefinition],
types: Set[UIClazzDefinition],
processCategoryService: ProcessCategoryService): UIProcessDefinition = {
def createUIObjectDef(objDef: ObjectDefinition) = createUIObjectDefinition(objDef, processCategoryService)
val uiProcessDefinition = UIProcessDefinition(
services = processDefinition.services.mapValues(createUIObjectDef),
sourceFactories = processDefinition.sourceFactories.mapValues(createUIObjectDef),
sinkFactories = processDefinition.sinkFactories.mapValues(createUIObjectDef),
subprocessInputs = subprocessInputs.mapValues(createUIObjectDef),
customStreamTransformers = processDefinition.customStreamTransformers.mapValues(e => createUIObjectDef(e._1)),
signalsWithTransformers = processDefinition.signalsWithTransformers.mapValues(e => createUIObjectDef(e._1)),
globalVariables = processDefinition.expressionConfig.globalVariables.mapValues(createUIObjectDef),
typesInformation = types
)
uiProcessDefinition
}
def createUIParameter(parameter: Parameter): UIParameter = {
UIParameter(name = parameter.name, typ = parameter.typ, editor = parameter.editor.getOrElse(RawParameterEditor), validators = parameter.validators, defaultValue = parameter.defaultValue.getOrElse(""),
additionalVariables = parameter.additionalVariables.mapValuesNow(_.typingResult), variablesToHide = parameter.variablesToHide, branchParam = parameter.branchParam)
}
def createUIAdditionalPropertyConfig(config: AdditionalPropertyConfig): UiAdditionalPropertyConfig = {
val editor = UiAdditionalPropertyEditorDeterminer.determine(config)
val determinedValidators = AdditionalPropertyValidatorDeterminerChain(config).determine()
UiAdditionalPropertyConfig(config.defaultValue, editor, determinedValidators, config.label)
}
}
object SortedComponentGroup {
def apply(name: ComponentGroupName, components: List[ComponentTemplate]): ComponentGroup =
ComponentGroup(name, components.sortBy(_.label.toLowerCase))
}
| TouK/nussknacker | ui/server/src/main/scala/pl/touk/nussknacker/ui/definition/UIProcessObjectsFactory.scala | Scala | apache-2.0 | 11,548 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{DenseVector, Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.{LinearDataGenerator, MLlibTestSparkContext}
import org.apache.spark.sql.{DataFrame, Row}
class LinearRegressionSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
private val seed: Int = 42
@transient var datasetWithDenseFeature: DataFrame = _
@transient var datasetWithStrongNoise: DataFrame = _
@transient var datasetWithDenseFeatureWithoutIntercept: DataFrame = _
@transient var datasetWithSparseFeature: DataFrame = _
@transient var datasetWithWeight: DataFrame = _
@transient var datasetWithWeightConstantLabel: DataFrame = _
@transient var datasetWithWeightZeroLabel: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
datasetWithDenseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
datasetWithStrongNoise = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 100, seed, eps = 5.0), 2).map(_.asML).toDF()
/*
datasetWithDenseFeatureWithoutIntercept is not needed for correctness testing
but is useful for illustrating training model without intercept
*/
datasetWithDenseFeatureWithoutIntercept = sc.parallelize(
LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
val r = new Random(seed)
// When feature size is larger than 4096, normal optimizer is chosen
// as the solver of linear regression in the case of "auto" mode.
val featureSize = 4100
datasetWithSparseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Seq.fill(featureSize)(r.nextDouble()).toArray,
xMean = Seq.fill(featureSize)(r.nextDouble()).toArray,
xVariance = Seq.fill(featureSize)(r.nextDouble()).toArray, nPoints = 200,
seed, eps = 0.1, sparsity = 0.7), 2).map(_.asML).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(17, 19, 23, 29)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
datasetWithWeight = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b.const <- c(17, 17, 17, 17)
w <- c(1, 2, 3, 4)
df.const.label <- as.data.frame(cbind(A, b.const))
*/
datasetWithWeightConstantLabel = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(17.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(17.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(17.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithWeightZeroLabel = sc.parallelize(Seq(
Instance(0.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(0.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(0.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(0.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glmnet package.
*/
ignore("export test data into CSV format") {
datasetWithDenseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithDenseFeature")
datasetWithDenseFeatureWithoutIntercept.rdd.map {
case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/LinearRegressionSuite/datasetWithDenseFeatureWithoutIntercept")
datasetWithSparseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithSparseFeature")
}
test("params") {
ParamsSuite.checkParams(new LinearRegression)
val model = new LinearRegressionModel("linearReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("linear regression: default params") {
val lir = new LinearRegression
assert(lir.getLabelCol === "label")
assert(lir.getFeaturesCol === "features")
assert(lir.getPredictionCol === "prediction")
assert(lir.getRegParam === 0.0)
assert(lir.getElasticNetParam === 0.0)
assert(lir.getFitIntercept)
assert(lir.getStandardization)
assert(lir.getSolver == "auto")
val model = lir.fit(datasetWithDenseFeature)
MLTestingUtils.checkCopyAndUids(lir, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
model.setSummary(None)
assert(!model.hasSummary)
model.transform(datasetWithDenseFeature)
.select("label", "prediction")
.collect()
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.intercept !== 0.0)
assert(model.hasParent)
val numFeatures = datasetWithDenseFeature.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
}
test("linear regression handles singular matrices") {
// check for both constant columns with intercept (zero std) and collinear
val singularDataConstantColumn = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(1.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(1.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(1.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataConstantColumn)
// to make it clear that WLS did not solve analytically
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
val singularDataCollinearFeatures = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(10.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(14.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(22.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(26.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataCollinearFeatures)
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
}
test("linear regression with intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = new LinearRegression().setSolver(solver)
// The result should be the same regardless of standardization without regularization
val trainer2 = (new LinearRegression).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE, stringsAsFactors=FALSE)
features <- as.matrix(data.frame(as.numeric(data$V2), as.numeric(data$V3)))
label <- as.numeric(data$V1)
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.298698
as.numeric.data.V2. 4.700706
as.numeric.data.V3. 7.199082
*/
val interceptR = 6.298698
val coefficientsR = Vectors.dense(4.700706, 7.199082)
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.coefficients ~= coefficientsR relTol 1E-3)
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-3)
model1.transform(datasetWithDenseFeature).select("features", "prediction").collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setFitIntercept(false).setSolver(solver)
// Without regularization the results should be the same
val trainer2 = (new LinearRegression).setFitIntercept(false).setStandardization(false)
.setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val modelWithoutIntercept1 = trainer1.fit(datasetWithDenseFeatureWithoutIntercept)
val model2 = trainer2.fit(datasetWithDenseFeature)
val modelWithoutIntercept2 = trainer2.fit(datasetWithDenseFeatureWithoutIntercept)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.973403
as.numeric.data.V3. 5.284370
*/
val coefficientsR = Vectors.dense(6.973403, 5.284370)
assert(model1.intercept ~== 0 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR relTol 1E-2)
assert(model2.intercept ~== 0 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR relTol 1E-2)
/*
Then again with the data with no intercept:
> coefficientsWithoutIntercept
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data3.V2. 4.70011
as.numeric.data3.V3. 7.19943
*/
val coefficientsWithoutInterceptR = Vectors.dense(4.70011, 7.19943)
assert(modelWithoutIntercept1.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept1.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
assert(modelWithoutIntercept2.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept2.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
}
}
test("linear regression with intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver).setStandardization(false)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian",
alpha = 1.0, lambda = 0.57 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.242284
as.numeric.d1.V2. 4.019605
as.numeric.d1.V3. 6.679538
*/
val interceptR1 = 6.242284
val coefficientsR1 = Vectors.dense(4.019605, 6.679538)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.416948
as.numeric.data.V2. 3.893869
as.numeric.data.V3. 6.724286
*/
val interceptR2 = 6.416948
val coefficientsR2 = Vectors.dense(3.893869, 6.724286)
assert(model2.intercept ~== interceptR2 relTol 1E-3)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-3)
model1.transform(datasetWithDenseFeature).select("features", "prediction")
.collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.272927
as.numeric.data.V3. 4.782604
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(6.272927, 4.782604)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.207817
as.numeric.data.V3. 4.775780
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(6.207817, 4.775780)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
model1.transform(datasetWithDenseFeature).select("features", "prediction")
.collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.260103
as.numeric.d1.V2. 3.725522
as.numeric.d1.V3. 5.711203
*/
val interceptR1 = 5.260103
val coefficientsR1 = Vectors.dense(3.725522, 5.711203)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.790885
as.numeric.d1.V2. 3.432373
as.numeric.d1.V3. 5.919196
*/
val interceptR2 = 5.790885
val coefficientsR2 = Vectors.dense(3.432373, 5.919196)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
model1.transform(datasetWithDenseFeature).select("features", "prediction").collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.493430
as.numeric.d1.V3. 4.223082
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.493430, 4.223082)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE, standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.244324
as.numeric.d1.V3. 4.203106
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.244324, 4.203106)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
model1.transform(datasetWithDenseFeature).select("features", "prediction").collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.689855
as.numeric.d1.V2. 3.661181
as.numeric.d1.V3. 6.000274
*/
val interceptR1 = 5.689855
val coefficientsR1 = Vectors.dense(3.661181, 6.000274)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3, lambda = 1.6
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.113890
as.numeric.d1.V2. 3.407021
as.numeric.d1.V3. 6.152512
*/
val interceptR2 = 6.113890
val coefficientsR2 = Vectors.dense(3.407021, 6.152512)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
model1.transform(datasetWithDenseFeature).select("features", "prediction")
.collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.643748
as.numeric.d1.V3. 4.331519
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.643748, 4.331519)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.455902
as.numeric.d1.V3. 4.312266
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.455902, 4.312266)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
model1.transform(datasetWithDenseFeature).select("features", "prediction")
.collect().foreach {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression model with constant label") {
/*
R code:
for (formula in c(b.const ~ . -1, b.const ~ .)) {
model <- lm(formula, data=df.const.label, weights=w)
print(as.vector(coef(model)))
}
[1] -9.221298 3.394343
[1] 17 0 0
*/
val expected = Seq(
Vectors.dense(0.0, -9.221298, 3.394343),
Vectors.dense(17.0, 0.0, 0.0))
Seq("auto", "l-bfgs", "normal").foreach { solver =>
var idx = 0
for (fitIntercept <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightConstantLabel)
val actual1 = Vectors.dense(model1.intercept, model1.coefficients(0),
model1.coefficients(1))
assert(actual1 ~== expected(idx) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightConstantLabel.schema.fieldNames.toSet + model1.getPredictionCol)
.subsetOf(model1.summary.predictions.schema.fieldNames.toSet))
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightZeroLabel)
val actual2 = Vectors.dense(model2.intercept, model2.coefficients(0),
model2.coefficients(1))
assert(actual2 ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightZeroLabel.schema.fieldNames.toSet + model2.getPredictionCol)
.subsetOf(model2.summary.predictions.schema.fieldNames.toSet))
idx += 1
}
}
}
test("regularized linear regression through origin with constant label") {
// The problem is ill-defined if fitIntercept=false, regParam is non-zero.
// An exception is thrown in this case.
Seq("auto", "l-bfgs", "normal").foreach { solver =>
for (standardization <- Seq(false, true)) {
val model = new LinearRegression().setFitIntercept(false)
.setRegParam(0.1).setStandardization(standardization).setSolver(solver)
intercept[IllegalArgumentException] {
model.fit(datasetWithWeightConstantLabel)
}
}
}
}
test("linear regression with l-bfgs when training is not needed") {
// When label is constant, l-bfgs solver returns results without training.
// There are two possibilities: If the label is non-zero but constant,
// and fitIntercept is true, then the model return yMean as intercept without training.
// If label is all zeros, then all coefficients are zero regardless of fitIntercept, so
// no training is needed.
for (fitIntercept <- Seq(false, true)) {
for (standardization <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightConstantLabel)
if (fitIntercept) {
assert(model1.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightZeroLabel)
assert(model2.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
}
}
test("linear regression model training summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setPredictionCol("myPrediction")
val model = trainer.fit(datasetWithDenseFeature)
val trainerNoPredictionCol = trainer.setPredictionCol("")
val modelNoPredictionCol = trainerNoPredictionCol.fit(datasetWithDenseFeature)
// Training results for the model should be available
assert(model.hasSummary)
assert(modelNoPredictionCol.hasSummary)
// Schema should be a superset of the input dataset
assert((datasetWithDenseFeature.schema.fieldNames.toSet + model.getPredictionCol).subsetOf(
model.summary.predictions.schema.fieldNames.toSet))
// Validate that we re-insert a prediction column for evaluation
val modelNoPredictionColFieldNames
= modelNoPredictionCol.summary.predictions.schema.fieldNames
assert(datasetWithDenseFeature.schema.fieldNames.toSet.subsetOf(
modelNoPredictionColFieldNames.toSet))
assert(modelNoPredictionColFieldNames.exists(s => s.startsWith("prediction_")))
// Residuals in [[LinearRegressionResults]] should equal those manually computed
datasetWithDenseFeature.select("features", "label")
.rdd
.map { case Row(features: DenseVector, label: Double) =>
val prediction =
features(0) * model.coefficients(0) + features(1) * model.coefficients(1) +
model.intercept
label - prediction
}
.zip(model.summary.residuals.rdd.map(_.getDouble(0)))
.collect()
.foreach { case (manualResidual: Double, resultResidual: Double) =>
assert(manualResidual ~== resultResidual relTol 1E-5)
}
/*
# Use the following R code to generate model training results.
# path/part-00000 is the file generated by running LinearDataGenerator.generateLinearInput
# as described before the beforeAll() method.
d1 <- read.csv("path/part-00000", header=FALSE, stringsAsFactors=FALSE)
fit <- glm(V1 ~ V2 + V3, data = d1, family = "gaussian")
names(f1)[1] = c("V2")
names(f1)[2] = c("V3")
f1 <- data.frame(as.numeric(d1$V2), as.numeric(d1$V3))
predictions <- predict(fit, newdata=f1)
l1 <- as.numeric(d1$V1)
residuals <- l1 - predictions
> mean(residuals^2) # MSE
[1] 0.00985449
> mean(abs(residuals)) # MAD
[1] 0.07961668
> cor(predictions, l1)^2 # r^2
[1] 0.9998737
> summary(fit)
Call:
glm(formula = V1 ~ V2 + V3, family = "gaussian", data = d1)
Deviance Residuals:
Min 1Q Median 3Q Max
-0.47082 -0.06797 0.00002 0.06725 0.34635
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 6.3022157 0.0018600 3388 <2e-16 ***
V2 4.6982442 0.0011805 3980 <2e-16 ***
V3 7.1994344 0.0009044 7961 <2e-16 ***
---
....
*/
assert(model.summary.meanSquaredError ~== 0.00985449 relTol 1E-4)
assert(model.summary.meanAbsoluteError ~== 0.07961668 relTol 1E-4)
assert(model.summary.r2 ~== 0.9998737 relTol 1E-4)
// Normal solver uses "WeightedLeastSquares". If no regularization is applied or only L2
// regularization is applied, this algorithm uses a direct solver and does not generate an
// objective history because it does not run through iterations.
if (solver == "l-bfgs") {
// Objective function should be monotonically decreasing for linear regression
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
} else {
// To clarify that the normal solver is used here.
assert(model.summary.objectiveHistory.length == 1)
assert(model.summary.objectiveHistory(0) == 0.0)
val devianceResidualsR = Array(-0.47082, 0.34635)
val seCoefR = Array(0.0011805, 0.0009044, 0.0018600)
val tValsR = Array(3980, 7961, 3388)
val pValsR = Array(0, 0, 0)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.tValues.map(_.round).zip(tValsR).foreach{ x => assert(x._1 === x._2) }
model.summary.pValues.map(_.round).zip(pValsR).foreach{ x => assert(x._1 === x._2) }
}
}
}
test("linear regression model testset evaluation summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver)
val model = trainer.fit(datasetWithDenseFeature)
// Evaluating on training dataset should yield results summary equal to training summary
val testSummary = model.evaluate(datasetWithDenseFeature)
assert(model.summary.meanSquaredError ~== testSummary.meanSquaredError relTol 1E-5)
assert(model.summary.r2 ~== testSummary.r2 relTol 1E-5)
model.summary.residuals.select("residuals").collect()
.zip(testSummary.residuals.select("residuals").collect())
.forall { case (Row(r1: Double), Row(r2: Double)) => r1 ~== r2 relTol 1E-5 }
}
}
test("linear regression with weighted samples") {
val sqlContext = spark.sqlContext
import sqlContext.implicits._
val numClasses = 0
def modelEquals(m1: LinearRegressionModel, m2: LinearRegressionModel): Unit = {
assert(m1.coefficients ~== m2.coefficients relTol 0.01)
assert(m1.intercept ~== m2.intercept relTol 0.01)
}
val testParams = Seq(
// (elasticNetParam, regParam, fitIntercept, standardization)
(0.0, 0.21, true, true),
(0.0, 0.21, true, false),
(0.0, 0.21, false, false),
(1.0, 0.21, true, true)
)
for (solver <- Seq("auto", "l-bfgs", "normal");
(elasticNetParam, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
.setElasticNetParam(elasticNetParam)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals, seed)
}
}
test("linear regression model with l-bfgs with big feature datasets") {
val trainer = new LinearRegression().setSolver("auto")
val model = trainer.fit(datasetWithSparseFeature)
// Training results for the model should be available
assert(model.hasSummary)
// When LBFGS is used as optimizer, objective history can be restored.
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ .", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ .", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.920 -1.358 -1.109 0.960
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 18.080 9.608 1.882 0.311
V1 6.080 5.556 1.094 0.471
V2 -0.600 1.960 -0.306 0.811
(Dispersion parameter for gaussian family taken to be 7.68)
Null deviance: 202.00 on 3 degrees of freedom
Residual deviance: 7.68 on 1 degrees of freedom
AIC: 18.783
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(6.080, -0.600))
val interceptR = 18.080
val devianceResidualsR = Array(-1.358, 1.920)
val seCoefR = Array(5.556, 1.960, 9.608)
val tValsR = Array(1.094, -0.306, 1.882)
val pValsR = Array(0.471, 0.811, 0.311)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
val modelWithL1 = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setRegParam(0.5)
.setElasticNetParam(1.0)
.fit(datasetWithWeight)
assert(modelWithL1.summary.objectiveHistory !== Array(0.0))
assert(
modelWithL1.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and w/o intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ . -1", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ . -1", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.950 2.344 -4.600 2.103
Coefficients:
Estimate Std. Error t value Pr(>|t|)
V1 -3.7271 2.9032 -1.284 0.3279
V2 3.0100 0.6022 4.998 0.0378 *
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for gaussian family taken to be 17.4376)
Null deviance: 5962.000 on 4 degrees of freedom
Residual deviance: 34.875 on 2 degrees of freedom
AIC: 22.835
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setFitIntercept(false)
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(-3.7271, 3.0100))
val interceptR = 0.0
val devianceResidualsR = Array(-4.600, 2.344)
val seCoefR = Array(2.9032, 0.6022)
val tValsR = Array(-1.284, 4.998)
val pValsR = Array(0.3279, 0.0378)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept === interceptR)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
}
test("read/write") {
def checkModelData(model: LinearRegressionModel, model2: LinearRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients === model2.coefficients)
}
val lr = new LinearRegression()
testEstimatorAndModelReadWrite(lr, datasetWithWeight, LinearRegressionSuite.allParamSettings,
LinearRegressionSuite.allParamSettings, checkModelData)
}
test("should support all NumericType labels and weights, and not support other types") {
for (solver <- Seq("auto", "l-bfgs", "normal")) {
val lr = new LinearRegression().setMaxIter(1).setSolver(solver)
MLTestingUtils.checkNumericTypes[LinearRegressionModel, LinearRegression](
lr, spark, isClassification = false) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients === actual.coefficients)
}
}
}
}
object LinearRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"predictionCol" -> "myPrediction",
"regParam" -> 0.01,
"elasticNetParam" -> 0.1,
"maxIter" -> 2, // intentionally small
"fitIntercept" -> true,
"tol" -> 0.8,
"standardization" -> false,
"solver" -> "l-bfgs"
)
}
| minixalpha/spark | mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala | Scala | apache-2.0 | 41,611 |
package edu.gemini.qpt.core.listeners
import edu.gemini.ags.api.AgsGuideQuality._
import java.beans.PropertyChangeEvent
import edu.gemini.ags.api.AgsAnalysis
import edu.gemini.qpt.core.Marker.Severity
import edu.gemini.qpt.core.Variant
import edu.gemini.qpt.core.util.MarkerManager
import edu.gemini.spModel.gemini.gems.Canopus
import edu.gemini.spModel.gemini.gsaoi.GsaoiOdgw
import edu.gemini.spModel.guide.GuideProbeGroup
import scala.collection.JavaConverters._
class AgsAnalysisListener extends MarkerModelListener[Variant] {
import AgsAnalysisListener._
override def propertyChange(evt: PropertyChangeEvent): Unit = {
val variant = evt.getSource.asInstanceOf[Variant]
val markerManager = variant.getSchedule.getMarkerManager
markerManager.clearMarkers(this, variant)
// Iterate over the observations in the variant and determine if they should generate
// markers based on the AgsAnalysis.
for (alloc <- variant.getAllocs.asScala if !alloc.getObs.getAgsAnalysis.isEmpty) {
// Only analyses with a severity level that are not ODGW and Canopus should generate a marker.
for {
a <- alloc.getObs.getAgsAnalysis.asScala
if (a match {
case AgsAnalysis.NoGuideStarForGroup(group, _) => !ignoredProbeGroups.contains(group)
case _ => true
})
s <- severity(a)
} markerManager.addMarker(false, this, s, a.message(withProbe = true), variant, alloc)
}
}
override protected def getMarkerManager(t: Variant): MarkerManager = {
t.getSchedule.getMarkerManager
}
}
object AgsAnalysisListener {
// We want to ignore AgsAnalysis problems for ODGW and Canopus.
val ignoredProbeGroups: Set[GuideProbeGroup] = Set(GsaoiOdgw.Group.instance, Canopus.Wfs.Group.instance)
def severity(a: AgsAnalysis): Option[Severity] =
a.quality match {
case DeliversRequestedIq => None
case PossibleIqDegradation => Some(Severity.Warning)
case IqDegradation => Some(Severity.Warning)
case PossiblyUnusable => Some(Severity.Warning)
case Unusable => Some(Severity.Error)
}
} | arturog8m/ocs | bundle/edu.gemini.qpt.client/src/main/scala/edu/gemini/qpt/core/listeners/AgsAnalysisListener.scala | Scala | bsd-3-clause | 2,133 |
package utest.framework
import scala.concurrent.{ExecutionContext, Future}
/**
* An immutable tree with each node containing a value, and a `Seq` of
* children. Provides all normal `Seq` functionality as well as some tree
* specific methods.
*/
case class Tree[+T](value: T, children: Tree[T]*) {
/**
* The number of nodes in this tree.
*/
def length: Int = {
children.foldLeft(1)(_ + _.length)
}
def map[V](f: T => V): Tree[V] = {
Tree(f(value), children.map(_.map(f)): _*)
}
/**
* An iterator over the values stored on the nodes of this tree, in a depth
* first manner starting from the root.
*/
def iterator: Iterator[T] = {
Iterator(this.value) ++ children.flatMap(_.iterator)
}
def leafPaths: Iterator[List[T]] = {
if (children.isEmpty) Iterator(List(this.value))
else children.toIterator.flatMap(_.leafPaths).map(this.value :: _)
}
def toSeq: Seq[T] = iterator.toList
/**
* Returns an iterator for the values at the leaves of this tree
*/
def leaves: Iterator[T] = {
if (children.isEmpty) Iterator(this.value)
else children.toIterator.flatMap(_.leaves)
}
}
/**
* The executable portion of a tree of tests. Each node contains an
* executable, which when run either returns a Left(result) or a
* Right(sequence) of child nodes which you can execute.
*/
class TestCallTree(inner: => Either[(() => Any, () => Unit), IndexedSeq[TestCallTree]]) {
/**
* Runs the test in this [[TestCallTree]] at the specified `path`. Called
* by the [[TestTreeSeq.run]] method and usually not called manually.
*/
def run(path: List[Int])(implicit executionContext: ExecutionContext): Any = {
path match {
case head :: tail =>
val Right(children) = inner
children(head).run(tail)
case Nil =>
val Left((res, hook)) = inner
try {
val resFuture = res() match {
case x: Future[_] => x
case notFuture => Future.successful(notFuture)
}
resFuture.map { r => hook(); r }
} catch {
case scala.util.control.NonFatal(e) => hook(); throw e
}
}
}
} | cuzfrog/scala_sbt_template | macros/src/main/scala/utest/framework/Tree.scala | Scala | apache-2.0 | 2,156 |
package controllers
import play.api.mvc._
import play.api.libs.json._
/**
* Used for testing stuff.
*/
object TestController extends Controller {
object Model {
import play.api.libs.functional.syntax._
import org.apache.commons.codec.binary.Base64
import play.api.data.validation.ValidationError
case class Echo(method: String,
version: String,
body: Option[Array[Byte]],
headers: Map[String, Seq[String]],
session: Map[String, String],
flash: Map[String, String],
remoteAddress: String,
queryString: Map[String, Seq[String]],
uri: String,
path: String)
case class ToReturn(status: Int = 200,
body: Option[Array[Byte]] = None,
headers: Map[String, String] = Map(),
cookies: List[Cookie] = List(),
session: Map[String, String] = Map(),
flash: Map[String, String] = Map())
implicit val byteArrayWrites = new Writes[Array[Byte]] {
def writes(o: Array[Byte]) = JsString(new String((Base64.encodeBase64(o))))
}
implicit val byteArrayReads = new Reads[Array[Byte]] {
def reads(json: JsValue) = json match {
case JsString(value) => JsSuccess(Base64.decodeBase64(value.getBytes))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("validate.error.expected.jsstring"))))
}
}
implicit val cookieReads = Json.reads[Cookie]
implicit val cookieWrites = Json.writes[Cookie]
implicit val echoReads = Json.reads[Echo]
implicit val echoWrites = Json.writes[Echo]
implicit val toReturnReads = Json.reads[ToReturn]
implicit val toReturnWrites = Json.writes[ToReturn]
}
import Model._
def echo = Action(parse.raw) { request =>
import request._
Ok(Json.toJson(Echo(method, version, body.asBytes(), headers.toMap, request.session.data, request.flash.data, remoteAddress,
queryString, uri, path)))
}
def slave = Action(parse.json) { request =>
Json.fromJson[ToReturn](request.body).fold({ errors =>
BadRequest(errors.toString())
}, { toReturn =>
toReturn.body.map(body => Status(toReturn.status)(body)).getOrElse(Status(toReturn.status))
.withHeaders(toReturn.headers.toSeq:_*)
.withCookies(toReturn.cookies:_*)
.withSession(toReturn.session.foldLeft(request.session)((s, item) => s + item))
.flashing(toReturn.flash.toSeq:_*)
})
}
}
| michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/test/integrationtest-scala/app/controllers/TestController.scala | Scala | mit | 2,608 |
package org.bitcoins.core.protocol.ln.node
import org.bitcoins.core.crypto.ECPublicKey
import org.bitcoins.core.protocol.NetworkElement
import org.bitcoins.core.util.Factory
import scodec.bits.ByteVector
/**
* `NodeId` is simply a wrapper for
* [[org.bitcoins.core.crypto.ECPublicKey ECPublicKey]].
* This public key needs to be a
* 33 byte compressed secp256k1 public key.
*/
case class NodeId(pubKey: ECPublicKey) extends NetworkElement {
require(
pubKey.isCompressed,
s"Cannot create a nodeId from a public key that was not compressed ${pubKey.hex}")
override def toString: String = pubKey.hex
override def bytes: ByteVector = pubKey.bytes
}
object NodeId extends Factory[NodeId] {
def fromPubKey(pubKey: ECPublicKey): NodeId = {
NodeId(pubKey)
}
override def fromBytes(bytes: ByteVector): NodeId = {
val pubKey = ECPublicKey.fromBytes(bytes)
fromPubKey(pubKey)
}
}
| bitcoin-s/bitcoin-s-core | core/src/main/scala/org/bitcoins/core/protocol/ln/node/NodeId.scala | Scala | mit | 920 |
/*
* Copyright 2016 Alexey Kardapoltsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.kardapoltsev.astparser.parser.http
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class HttpLexerSpec extends AnyWordSpec with Matchers {
import HttpLexer._
private val lexer = new HttpLexer
private def scan(input: String): List[Token] = {
lexer.scan(input)
}
"HttpLexer" should {
"parse GET route" in {
scan("GET /api/users/") shouldBe List(
Method("GET"),
Slash(),
Lexeme("api"),
Slash(),
Lexeme("users"),
Slash()
)
}
"parse route params" in {
scan("GET /api/{userId}/") shouldBe List(
Method("GET"),
Slash(),
Lexeme("api"),
Slash(),
LeftBrace(),
Lexeme("userId"),
RightBrace(),
Slash()
)
}
"parse escaped route params" in {
scan("GET /api/{`type`}/") shouldBe List(
Method("GET"),
Slash(),
Lexeme("api"),
Slash(),
LeftBrace(),
Lexeme("type"),
RightBrace(),
Slash()
)
}
"parse query params" in {
scan("GET /api?{param1}&{param2}") shouldBe List(
Method("GET"),
Slash(),
Lexeme("api"),
QuestionMark(),
LeftBrace(),
Lexeme("param1"),
RightBrace(),
Ampersand(),
LeftBrace(),
Lexeme("param2"),
RightBrace()
)
}
"parse escaped query params" in {
scan("GET /api?{`call`}") shouldBe List(
Method("GET"),
Slash(),
Lexeme("api"),
QuestionMark(),
LeftBrace(),
Lexeme("call"),
RightBrace()
)
}
"parse CACHED directive" in {
scan("CACHED GET /user") shouldBe List(
CachedDirective(),
Method("GET"),
Slash(),
Lexeme("user")
)
}
}
}
| kardapoltsev/astparser | src/test/scala/com/github/kardapoltsev/astparser/parser/http/HttpLexerSpec.scala | Scala | apache-2.0 | 2,484 |
package cmwell.analytics.main
import java.nio.file.Paths
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithSystemFields}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.commons.io.FileUtils
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}
import scala.concurrent.ExecutionContextExecutor
object DumpSystemFieldsFromEs {
def main(args: Array[String]): Unit = {
val logger = LogManager.getLogger(DumpSystemFieldsFromEs.getClass)
implicit val system: ActorSystem = ActorSystem("dump-system-fields-from-es")
implicit val executionContext: ExecutionContextExecutor = system.dispatcher
implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()
try {
// Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
// so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
// be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)
object Opts extends ScallopConf(args) {
val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from (default: cm_well_all)", required = false)
val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))
val currentOnly: ScallopOption[Boolean] = opt[Boolean]("current-only", short = 'c', descr = "Only download current uuids", default = Some(false))
val format: ScallopOption[String] = opt[String]("format", short = 'f', descr = "The data format: either 'parquet' or 'csv'", default = Some("parquet"))
val out: ScallopOption[String] = opt[String]("out", short = 'o', descr = "The path to save the output to", required = true)
val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)
val sourceFilter: ScallopOption[Boolean] = toggle("source-filter", noshort = true, default=Some(true), prefix = "no-",
descrNo = "Do not filter _source fields (workaround for bad index)", descrYes = "Use source filtering to reduce network traffic")
verify()
}
val esContactPoint = FindContactPoints.es(Opts.url())
val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)
// Calling script should clear output directory as necessary.
val objectExtractor = IndexWithSystemFields
val dataWriterFactory = DataWriterFactory.file(format = Opts.format(), objectExtractor, outDirectory = Opts.out())
PartitionedDownloader.runDownload(
esTopology = esTopology,
parallelism = Opts.parallelism(),
currentOnly = Opts.currentOnly(),
objectExtractor = objectExtractor,
dataWriterFactory = dataWriterFactory,
sourceFilter = Opts.sourceFilter())
// The Hadoop convention is to touch the (empty) _SUCCESS file to signal successful completion.
FileUtils.touch(Paths.get(Opts.out(), "_SUCCESS").toFile)
}
catch {
case ex: Throwable =>
logger.error(ex.getMessage, ex)
System.exit(1)
}
finally {
system.terminate()
}
}
}
| bryaakov/CM-Well | tools/dataConsistencyTool/extract-index-from-es/src/main/scala/cmwell/analytics/main/DumpSystemFieldsFromEs.scala | Scala | apache-2.0 | 3,668 |
package eu.timepit.refined
import eu.timepit.refined.api.Inference
import eu.timepit.refined.generic.Equal
import eu.timepit.refined.numeric.Greater
import eu.timepit.refined.string.StartsWith
import org.scalacheck.Prop._
import org.scalacheck.Properties
class GenericInferenceSpec extends Properties("GenericInference") {
property("""Equal["abcd"] ==> StartsWith["ab"]""") = secure {
Inference[Equal[W.`"abcd"`.T], StartsWith[W.`"ab"`.T]].isValid
}
property("""Equal["abcd"] =!> StartsWith["cd"]""") = secure {
Inference[Equal[W.`"abcd"`.T], StartsWith[W.`"cd"`.T]].notValid
}
property("Equal[10] ==> Greater[5]") = secure {
Inference[Equal[W.`10`.T], Greater[W.`5`.T]].isValid
}
property("Equal[5] =!> Greater[10]") = secure {
Inference[Equal[W.`5`.T], Greater[W.`10`.T]].notValid
}
}
| fthomas/refined | modules/core/shared/src/test/scala-3.0-/eu/timepit/refined/GenericInferenceSpec.scala | Scala | mit | 825 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical.statsEstimation
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Statistics}
import org.apache.spark.sql.internal.SQLConf
object AggregateEstimation {
import EstimationUtils._
/**
* Estimate the number of output rows based on column stats of group-by columns, and propagate
* column stats for aggregate expressions.
*/
def estimate(conf: SQLConf, agg: Aggregate): Option[Statistics] = {
val childStats = agg.child.stats(conf)
// Check if we have column stats for all group-by columns.
val colStatsExist = agg.groupingExpressions.forall { e =>
e.isInstanceOf[Attribute] && childStats.attributeStats.contains(e.asInstanceOf[Attribute])
}
if (rowCountsExist(conf, agg.child) && colStatsExist) {
// Multiply distinct counts of group-by columns. This is an upper bound, which assumes
// the data contains all combinations of distinct values of group-by columns.
var outputRows: BigInt = agg.groupingExpressions.foldLeft(BigInt(1))(
(res, expr) => res * childStats.attributeStats(expr.asInstanceOf[Attribute]).distinctCount)
outputRows = if (agg.groupingExpressions.isEmpty) {
// If there's no group-by columns, the output is a single row containing values of aggregate
// functions: aggregated results for non-empty input or initial values for empty input.
1
} else {
// Here we set another upper bound for the number of output rows: it must not be larger than
// child's number of rows.
outputRows.min(childStats.rowCount.get)
}
val outputAttrStats = getOutputMap(childStats.attributeStats, agg.output)
Some(Statistics(
sizeInBytes = getOutputSize(agg.output, outputRows, outputAttrStats),
rowCount = Some(outputRows),
attributeStats = outputAttrStats,
hints = childStats.hints))
} else {
None
}
}
}
| wangyixiaohuihui/spark2-annotation | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/AggregateEstimation.scala | Scala | apache-2.0 | 2,886 |
package week7.bloxorz
/**
* A main object that can be used to execute the Bloxorz solver
*/
object Bloxorz {
/**
* A level constructed using the `InfiniteTerrain` trait which defines
* the terrain to be valid at every position.
*/
object InfiniteLevel extends Solver with InfiniteTerrain {
val startPos = Pos(1, 3)
val goal = Pos(5, 8)
}
/**
* A simple level constructed using the StringParserTerrain
*/
abstract class Level extends Solver with StringParserTerrain
object Level0 extends Level {
val level =
"""------
|--ST--
|--oo--
|--oo--
|------""".stripMargin
}
/**
* Level 1 of the official Bloxorz game
*/
object Level1 extends Level {
val level =
"""ooo-------
|oSoooo----
|ooooooooo-
|-ooooooooo
|-----ooToo
|------ooo-""".stripMargin
}
/** Unit tests */
def main(args: Array[String]): Unit = {
println(InfiniteLevel.solution)
println(Level0.solution)
println(Level1.solution)
}
}
| zapstar/funprog | fp-scala/src/week7/bloxorz/Bloxorz.scala | Scala | mit | 1,056 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.TestData._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.TestSQLContext
import org.apache.spark.sql.test.TestSQLContext.implicits._
import org.apache.spark.sql.types.DecimalType
class DataFrameAggregateSuite extends QueryTest {
test("groupBy") {
checkAnswer(
testData2.groupBy("a").agg(sum($"b")),
Seq(Row(1, 3), Row(2, 3), Row(3, 3))
)
checkAnswer(
testData2.groupBy("a").agg(sum($"b").as("totB")).agg(sum('totB)),
Row(9)
)
checkAnswer(
testData2.groupBy("a").agg(count("*")),
Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil
)
checkAnswer(
testData2.groupBy("a").agg(Map("*" -> "count")),
Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil
)
checkAnswer(
testData2.groupBy("a").agg(Map("b" -> "sum")),
Row(1, 3) :: Row(2, 3) :: Row(3, 3) :: Nil
)
val df1 = Seq(("a", 1, 0, "b"), ("b", 2, 4, "c"), ("a", 2, 3, "d"))
.toDF("key", "value1", "value2", "rest")
checkAnswer(
df1.groupBy("key").min(),
df1.groupBy("key").min("value1", "value2").collect()
)
checkAnswer(
df1.groupBy("key").min("value2"),
Seq(Row("a", 0), Row("b", 4))
)
}
test("spark.sql.retainGroupColumns config") {
checkAnswer(
testData2.groupBy("a").agg(sum($"b")),
Seq(Row(1, 3), Row(2, 3), Row(3, 3))
)
TestSQLContext.conf.setConf("spark.sql.retainGroupColumns", "false")
checkAnswer(
testData2.groupBy("a").agg(sum($"b")),
Seq(Row(3), Row(3), Row(3))
)
TestSQLContext.conf.setConf("spark.sql.retainGroupColumns", "true")
}
test("agg without groups") {
checkAnswer(
testData2.agg(sum('b)),
Row(9)
)
}
test("average") {
checkAnswer(
testData2.agg(avg('a)),
Row(2.0))
// Also check mean
checkAnswer(
testData2.agg(mean('a)),
Row(2.0))
checkAnswer(
testData2.agg(avg('a), sumDistinct('a)), // non-partial
Row(2.0, 6.0) :: Nil)
checkAnswer(
decimalData.agg(avg('a)),
Row(new java.math.BigDecimal(2.0)))
checkAnswer(
decimalData.agg(avg('a), sumDistinct('a)), // non-partial
Row(new java.math.BigDecimal(2.0), new java.math.BigDecimal(6)) :: Nil)
checkAnswer(
decimalData.agg(avg('a cast DecimalType(10, 2))),
Row(new java.math.BigDecimal(2.0)))
// non-partial
checkAnswer(
decimalData.agg(avg('a cast DecimalType(10, 2)), sumDistinct('a cast DecimalType(10, 2))),
Row(new java.math.BigDecimal(2.0), new java.math.BigDecimal(6)) :: Nil)
}
test("null average") {
checkAnswer(
testData3.agg(avg('b)),
Row(2.0))
checkAnswer(
testData3.agg(avg('b), countDistinct('b)),
Row(2.0, 1))
checkAnswer(
testData3.agg(avg('b), sumDistinct('b)), // non-partial
Row(2.0, 2.0))
}
test("zero average") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(avg('a)),
Row(null))
checkAnswer(
emptyTableData.agg(avg('a), sumDistinct('b)), // non-partial
Row(null, null))
}
test("count") {
assert(testData2.count() === testData2.map(_ => 1).count())
checkAnswer(
testData2.agg(count('a), sumDistinct('a)), // non-partial
Row(6, 6.0))
}
test("null count") {
checkAnswer(
testData3.groupBy('a).agg(count('b)),
Seq(Row(1, 0), Row(2, 1))
)
checkAnswer(
testData3.groupBy('a).agg(count('a + 'b)),
Seq(Row(1, 0), Row(2, 1))
)
checkAnswer(
testData3.agg(count('a), count('b), count(lit(1)), countDistinct('a), countDistinct('b)),
Row(2, 1, 2, 2, 1)
)
checkAnswer(
testData3.agg(count('b), countDistinct('b), sumDistinct('b)), // non-partial
Row(1, 1, 2)
)
}
test("zero count") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
assert(emptyTableData.count() === 0)
checkAnswer(
emptyTableData.agg(count('a), sumDistinct('a)), // non-partial
Row(0, null))
}
test("zero sum") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(sum('a)),
Row(null))
}
test("zero sum distinct") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(sumDistinct('a)),
Row(null))
}
}
| andrewor14/iolap | sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala | Scala | apache-2.0 | 5,253 |
package juan.ddd.proto.contract.routing
import juan.ddd.proto.domain.routing._
trait UserRepository {
def nextIdentity: UserId
def userOfId(id: UserId): Option[User]
def remove(user: User): Unit
def save(user: User): Unit
}
| yoskhdia/ddd-proto | src/main/scala/juan/ddd/proto/contract/routing/UserRepository.scala | Scala | mit | 238 |
package org.jetbrains.plugins.scala
package codeInspection.typeChecking
import com.intellij.codeInspection.LocalInspectionTool
import org.jetbrains.plugins.scala.codeInspection.ScalaLightInspectionFixtureTestAdapter
/**
* Nikolay.Tropin
* 9/26/13
*/
class ComparingUnrelatedTypesInspectionTest extends ScalaLightInspectionFixtureTestAdapter {
protected def classOfInspection: Class[_ <: LocalInspectionTool] = classOf[ComparingUnrelatedTypesInspection]
protected val annotation: String = ComparingUnrelatedTypesInspection.inspectionName
def testWeakConformance() {
val text1 = s"""val a = 0
|val b: Short = 1
|${START}b == a$END"""
val text2 = s"""val a = 0
|val b = 1.0
|${START}b != a$END"""
val text3 = s"""val a = 0.0
|val b: Byte = 100
|${START}a == b$END"""
val text4 = s"${START}1 == 1.0$END"
checkTextHasNoErrors(text1)
checkTextHasNoErrors(text2)
checkTextHasNoErrors(text3)
checkTextHasNoErrors(text4)
}
def testValueTypes() {
val text1 = s"""val a = true
|val b = 1
|${START}b == a$END"""
val text2 = s"""val a = true
|val b = 0.0
|${START}a != b$END"""
val text3 = s"${START}true != 0$END"
val text4: String = s"${START}1.isInstanceOf[Boolean]$END"
checkTextHasError(text1)
checkTextHasError(text2)
checkTextHasError(text3)
checkTextHasError(text4)
}
def testString() {
val text1 = s"""val a = "a"
|val b = Array('a')
|${START}b == a$END"""
val text2 = s"""val a = "0"
|val b = 0
|${START}a == b$END"""
val text3 = s"""val s = "s"
|${START}s == 's'$END"""
val text4 = s"""val a = "a"
|val b: CharSequence = null
|${START}b != a$END"""
checkTextHasError(text1)
checkTextHasError(text2)
checkTextHasError(text3)
checkTextHasNoErrors(text4)
}
def testInheritors() {
val text1 = s"""val a = scala.collection.Iterable(1)
|val b = List(0)
|${START}b == a$END"""
val text2 = s"""case class A(i: Int)
|final class B extends A(1)
|val a: A = A(0)
|val b: B = new B
|${START}a == b$END"""
val text3 = """trait A
|object B extends A
|B.isInstanceOf[A]"""
checkTextHasNoErrors(text1)
checkTextHasNoErrors(text2)
checkTextHasNoErrors(text3)
}
def testFinal() {
val text1 = s"""case class A(i: Int)
|class B extends A(1)
|val a: A = A(0)
|val b: B = new B
|${START}a == b$END"""
val text2 = s"""final class A extends Serializable
|final class B extends Serializable
|val a: A = new A
|val b: B = new B
|${START}a == b$END"""
val text3 = s"""final class A extends Serializable
|final class B extends Serializable
|val a: A = new A
|${START}a.isInstanceOf[B]$END"""
checkTextHasNoErrors(text1)
checkTextHasError(text2)
checkTextHasError(text3)
}
def testTraits() {
val text1 = s"""trait A
|trait B
|val a: A = _
|val b: B = _
|${START}a == b$END"""
checkTextHasNoErrors(text1)
}
def testObject() {
val text1 = s"""trait A
|object B extends A
|val a: A = _
|${START}a == B$END"""
val text2 = s"""trait A
|object B extends A
|class C extends A
|val c = new C
|${START}c == B$END"""
val text3 = s"""trait A
|object B extends A
|class C extends A
|val c: A = new C
|${START}c != B$END"""
checkTextHasNoErrors(text1)
checkTextHasError(text2)
checkTextHasNoErrors(text3)
}
def testBoxedTypes() {
val text1 = """val i = new java.lang.Integer(0)
|i == 100"""
val text2 = """val b = new java.lang.Boolean(false)
|b equals true"""
val text3 = "def test(i: Integer) = if (i == null) \\"foo\\" else \\"bar\\""
checkTextHasNoErrors(text1)
checkTextHasNoErrors(text2)
checkTextHasNoErrors(text3)
}
def testExistential(): Unit = {
checkTextHasNoErrors("Seq(1).isInstanceOf[List[_])")
checkTextHasError(s"${START}Some(1).isInstanceOf[List[_]]$END")
checkTextHasNoErrors("def foo(x: Some[_]) { x == Some(1) }")
checkTextHasError(s"def foo(x: Some[_]) { ${START}x == Seq(1)$END }")
}
def testNumeric(): Unit = {
checkTextHasNoErrors("BigInt(1) == 1")
checkTextHasNoErrors("BigInt(1) == 1L")
checkTextHasNoErrors("BigInt(1) == new java.lang.Integer(1)")
checkTextHasError(s"${START}BigInt(1) == true$END")
checkTextHasError(s"${START}BigInt(1) == 1.toString$END")
}
}
| LPTK/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/typeChecking/ComparingUnrelatedTypesInspectionTest.scala | Scala | apache-2.0 | 5,190 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import common.Validation._
import models._
import play.api.data.Forms._
import play.api.data._
import utils.Constants._
object CurrentPensionsForm {
def currentPensionsForm = Form(
mapping(
"currentPensionsAmt" -> optional(bigDecimal)
.verifying("pla.base.errors.errorQuestion", currentPensionsAmt => currentPensionsAmt.isDefined)
.verifying("pla.base.errors.errorNegative", currentPensionsAmt => isPositive(currentPensionsAmt.getOrElse(0)))
.verifying("pla.base.errors.errorDecimalPlaces", currentPensionsAmt => isMaxTwoDecimalPlaces(currentPensionsAmt.getOrElse(0)))
.verifying("pla.base.errors.errorMaximum", currentPensionsAmt => isLessThanDouble(currentPensionsAmt.getOrElse(BigDecimal(0)).toDouble, npsMaxCurrency))
)(CurrentPensionsModel.apply)(CurrentPensionsModel.unapply)
)
}
| hmrc/pensions-lifetime-allowance-frontend | app/forms/CurrentPensionsForm.scala | Scala | apache-2.0 | 1,458 |
// See LICENSE.txt for license details.
package problems
import chisel3._
import chisel3.util._
// Problem:
//
// Create a composition (chain) of two filters:
//
// SingleFilter - indicates that input is single decimal digit
// (i.e. is less or equal to 9)
//
// EvenFilter - indicates that input is even number
//
abstract class Filter[T <: Data](dtype: T) extends Module {
val io = IO(new Bundle {
val in = Input(Valid(dtype))
val out = Output(Valid(dtype))
})
}
class PredicateFilter[T <: Data](dtype: T, f: T => Bool) extends Filter(dtype) {
io.out.valid := io.in.valid && f(io.in.bits)
io.out.bits := io.in.bits
}
object SingleFilter {
def apply[T <: UInt](dtype: T) =
// Change function argument of Predicate filter below ----------
Module(new PredicateFilter(dtype, (x: T) => false.B))
// Change function argument of Predicate filter above ----------
}
object EvenFilter {
def apply[T <: UInt](dtype: T) =
// Change function argument of Predicate filter below ----------
Module(new PredicateFilter(dtype, (x: T) => false.B))
// Change function argument of Predicate filter above ----------
}
class SingleEvenFilter[T <: UInt](dtype: T) extends Filter(dtype) {
// Implement composition below ----------
io.out <> io.in
// Implement composition above ----------
}
| timtian090/Playground | chiselTutorial/src/main/scala/problems/SingleEvenFilter.scala | Scala | mit | 1,332 |
/**
* This file is part of mycollab-services.
*
* mycollab-services is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-services is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-services. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.project.esb
import com.esofthead.mycollab.module.project.domain.Project
/**
* @author MyCollab Ltd
* @since 5.1.0
*/
class DeleteProjectEvent(val projects: Array[Project], val accountId: Integer) {}
| uniteddiversity/mycollab | mycollab-services/src/main/scala/com/esofthead/mycollab/module/project/esb/DeleteProjectEvent.scala | Scala | agpl-3.0 | 957 |
package org.jetbrains.plugins.scala
package codeInsight
package intention
package types
import org.jetbrains.plugins.scala.codeInsight.intentions.ScalaIntentionTestBase
import org.jetbrains.plugins.scala.debugger.{ScalaVersion, Scala_2_12}
class ToggleTypeAnnotationIntentionTest extends ScalaIntentionTestBase {
override def familyName: String = ToggleTypeAnnotation.FamilyName
override implicit val version: ScalaVersion = Scala_2_12
def testCollectionFactorySimplification(): Unit = doTest(
"val v = Seq.empty[String]",
"val v: Seq[String] = Seq.empty"
)
def testCollectionFactoryNoSimplification(): Unit = doTest(
"val v = Seq.empty[String].to[Seq]",
"val v: Seq[String] = Seq.empty[String].to[Seq]"
)
def testOptionFactorySimplification(): Unit = doTest(
"val v = Option.empty[String]",
"val v: Option[String] = Option.empty"
)
def testOptionFactoryNoSimplification(): Unit = doTest(
"val v = Option.empty[String].to[Option]",
"val v: Option[String] = Option.empty[String].to[Option]"
)
def testCompoundType(): Unit = doTest(
"""
|val foo = new Runnable {
| def helper(): Unit = ???
|
| override def run(): Unit = ???
|}""".stripMargin,
"""
|val foo: Runnable = new Runnable {
| def helper(): Unit = ???
|
| override def run(): Unit = ???
|}""".stripMargin
)
def testCompoundTypeWithTypeMember(): Unit = doTest(
s"""
|trait Foo {
| type X
|}
|
|val f${caretTag}oo = new Foo {
| override type X = Int
|
| def helper(x: X): Unit = ???
|}
""".stripMargin,
s"""
|trait Foo {
| type X
|}
|
|val f${caretTag}oo: Foo {
| type X = Int
|} = new Foo {
| override type X = Int
|
| def helper(x: X): Unit = ???
|}
""".stripMargin
)
def testInfixType(): Unit = doTest(
s"""
|trait A
|
|trait B
|
|def foo(): =:=[A, <:<[B, =:=[=:=[B, B], A]]] = ???
|val ba${caretTag}r = foo()
""".stripMargin,
s"""
|trait A
|
|trait B
|
|def foo(): =:=[A, <:<[B, =:=[=:=[B, B], A]]] = ???
|val ba${caretTag}r: A =:= (B <:< (B =:= B =:= A)) = foo()
""".stripMargin
)
def testInfixDifferentAssociativity(): Unit = doTest(
s"""
|trait +[A, B]
|
|trait ::[A, B]
|
|trait A
|
|def foo(): ::[+[A, +[::[A, A], A]], +[A, ::[A, A]]] = ???
|val ba${caretTag}r = foo()
""".stripMargin,
s"""
|trait +[A, B]
|
|trait ::[A, B]
|
|trait A
|
|def foo(): ::[+[A, +[::[A, A], A]], +[A, ::[A, A]]] = ???
|val ba${caretTag}r: (A + ((A :: A) + A)) :: (A + (A :: A)) = foo()
""".stripMargin
)
def testShowAsInfixAnnotation(): Unit = doTest(
s"""
|import scala.annotation.showAsInfix
|
|@showAsInfix class Map[A, B]
|
|def foo(): Map[Int, Map[Int, String]] = ???
|val b${caretTag}ar = foo()
""".stripMargin,
s"""
|import scala.annotation.showAsInfix
|
|@showAsInfix class Map[A, B]
|
|def foo(): Map[Int, Map[Int, String]] = ???
|val b${caretTag}ar: Int Map (Int Map String) = foo()
""".stripMargin
)
}
| jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInsight/intention/types/ToggleTypeAnnotationIntentionTest.scala | Scala | apache-2.0 | 3,434 |
package io.protoless.tests.samples
case object Colors extends Enumeration {
type Color = Value
val Black, White, Green = Value
}
| julien-lafont/protoless | modules/testing/src/main/scala/io/protoless/tests/samples/Colors.scala | Scala | apache-2.0 | 134 |
import stainless.lang._
import stainless.annotation._
object SimpleImperative {
@mutable abstract class A {
def f(): Unit
}
def proc(a: A) = {
a.f()
}
case class B(var x: BigInt) extends A {
def f(): Unit = {
x = x + 1
}
}
def theorem() = {
val a = B(0)
proc(a)
assert(a.x == 0) // should be 1
}
}
| epfl-lara/stainless | frontends/benchmarks/imperative/invalid/SimpleImperative.scala | Scala | apache-2.0 | 353 |
package chiselutils.utils
import Chisel._
object MemShiftRegister {
def apply[ T <: Data ]( in : T, n : Int, en : Bool = Bool(true) ) : T = {
val memSR = Module( new MemShiftRegister( in, n ) )
memSR.io.en := en
memSR.io.in := in
memSR.io.out
}
}
class MemShiftRegister[ T <: Data ]( genType : T, n : Int ) extends Module {
val io = new Bundle {
val in = genType.cloneType.asInput
val en = Bool( INPUT )
val out = genType.cloneType.asOutput
}
if ( n <= 2 ) {
val reg1 = Reg( genType )
val reg2 = Reg( genType )
// use this as en is implemented differently in a shift register
// in ShiftRegister en is just on input
when ( io.en ) {
reg1 := io.in
reg2 := reg1
}
io.out := {
if ( n == 2 )
reg2
else if ( n == 1 )
reg1
else
io.in
}
} else {
val myMem = Mem( n - 1, genType )
// put a register at the front and back
val regTop = Reg( genType )
val regBot = Reg( genType )
val cntr = Counter( io.en, n - 1 )
val incrAddr = cntr._1 + UInt(1)
val readAddr = UInt( width = incrAddr.getWidth )
readAddr := incrAddr
when ( cntr._1 === UInt( n - 2 ) ) {
readAddr := UInt( 0 )
}
when ( io.en ) {
myMem.write( cntr._1, regTop )
regTop := io.in
regBot := myMem( readAddr )
}
io.out := regBot
}
}
| da-steve101/chisel-utils | src/main/scala/chiselutils/utils/MemShiftRegister.scala | Scala | lgpl-3.0 | 1,397 |
package nak.classify
import nak.nnet.{NNObjective, NeuralNetwork}
import breeze.util.{Encoder, Index}
import breeze.linalg._
import nak.data.Example
import breeze.numerics._
import breeze.optimize.FirstOrderMinimizer.OptParams
/**
* A NeuralNetwork classifier uses a neural network to get unnormalize log probabilities
* for the scores of the classifier. These are used to predict terms.
* @author dlwh
*/
class NNetClassifier[L, T](nnet: NeuralNetwork,
inputEncoder: T=>DenseVector[Double],
labelIndex: Index[L]) extends Classifier[L, T] {
/**For the observation, return the score for each label that has a nonzero
* score.
*/
def scores(o: T): Counter[L, Double] = {
Encoder.fromIndex(labelIndex).decode(nnet(inputEncoder(o)))
}
}
object NNetClassifier {
class CounterTrainer[L, T](opt: OptParams = OptParams(),layersIn:Array[Int] = Array(100)) extends Classifier.Trainer[L, Counter[T, Double]] {
type MyClassifier = NNetClassifier[L, Counter[T, Double]]
def train(data: Iterable[Example[L, Counter[T, Double]]]) = {
val labels = Index[L]()
data foreach { labels index _.label}
val featureIndex = Index[T]()
for(d <- data; f <- d.features.keysIterator) featureIndex.index(f)
val fEncoder = Encoder.fromIndex(featureIndex)
val processedData = data.toArray.par.map { d =>
fEncoder.encodeDense(d.features) -> labels(d.label)
}
// log loss error function log( input(gold)/(sum of all outputs))
def errorFun(input: DenseVector[Double], label: Int) = {
val sm = softmax(input)
val obj = sm - input(label)
val deriv = exp(input - sm)
deriv(label) -= 1
obj -> deriv
}
val layers = Array(featureIndex.size) ++ layersIn ++ Array(labels.size)
val obj = new NNObjective(processedData.toIndexedSeq, errorFun, layers)
val guess = obj.initialWeightVector
val weights = opt.minimize(obj,guess)
new NNetClassifier(obj.extract(weights), {fEncoder.encodeDense(_:Counter[T, Double], true)}, labels)
}
}
}
| scalanlp/nak | src/main/scala/nak/classify/NNetClassifier.scala | Scala | apache-2.0 | 2,121 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events._
class SpecSpec extends Spec with SharedHelpers with GivenWhenThen {
describe("A Spec") {
it("should return the test names in registration order from testNames") {
val a = new Spec {
it("should test this") {}
it("should test that") {}
}
expect(List("should test this", "should test that")) {
a.testNames.iterator.toList
}
val b = new Spec {}
expect(List[String]()) {
b.testNames.iterator.toList
}
val c = new Spec {
it("should test that") {}
it("should test this") {}
}
expect(List("should test that", "should test this")) {
c.testNames.iterator.toList
}
val d = new Spec {
describe("A Tester") {
it("should test that") {}
it("should test this") {}
}
}
expect(List("A Tester should test that", "A Tester should test this")) {
d.testNames.iterator.toList
}
val e = new Spec {
describe("A Tester") {
it("should test this") {}
it("should test that") {}
}
}
expect(List("A Tester should test this", "A Tester should test that")) {
e.testNames.iterator.toList
}
}
it("should throw DuplicateTestNameException if a duplicate test name registration is attempted") {
intercept[DuplicateTestNameException] {
new Spec {
it("should test this") {}
it("should test this") {}
}
}
intercept[DuplicateTestNameException] {
new Spec {
it("should test this") {}
ignore("should test this") {}
}
}
intercept[DuplicateTestNameException] {
new Spec {
ignore("should test this") {}
ignore("should test this") {}
}
}
intercept[DuplicateTestNameException] {
new Spec {
ignore("should test this") {}
it("should test this") {}
}
}
}
it("should invoke withFixture from runTest") {
val a = new Spec {
var withFixtureWasInvoked = false
var testWasInvoked = false
override def withFixture(test: NoArgTest) {
withFixtureWasInvoked = true
super.withFixture(test)
}
it("should do something") {
testWasInvoked = true
}
}
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(a.withFixtureWasInvoked)
assert(a.testWasInvoked)
}
it("should pass the correct test name in the NoArgTest passed to withFixture") {
val a = new Spec {
var correctTestNameWasPassed = false
override def withFixture(test: NoArgTest) {
correctTestNameWasPassed = test.name == "should do something"
super.withFixture(test)
}
it("should do something") {}
}
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
assert(a.correctTestNameWasPassed)
}
it("should pass the correct config map in the NoArgTest passed to withFixture") {
val a = new Spec {
var correctConfigMapWasPassed = false
override def withFixture(test: NoArgTest) {
correctConfigMapWasPassed = (test.configMap == Map("hi" -> 7))
super.withFixture(test)
}
it("should do something") {}
}
a.run(None, SilentReporter, new Stopper {}, Filter(), Map("hi" -> 7), None, new Tracker())
assert(a.correctConfigMapWasPassed)
}
describe("(with info calls)") {
class InfoInsideTestSpec extends Spec {
val msg = "hi there, dude"
val testName = "test name"
it(testName) {
info(msg)
}
}
// In a Spec, any InfoProvided's fired during the test should be cached and sent out after the test has
// suceeded or failed. This makes the report look nicer, because the info is tucked under the "specifier'
// text for that test.
it("should, when the info appears in the code of a successful test, report the info after the TestSucceeded") {
val spec = new InfoInsideTestSpec
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(testSucceededIndex < infoProvidedIndex)
}
class InfoBeforeTestSpec extends Spec {
val msg = "hi there, dude"
val testName = "test name"
info(msg)
it(testName) {}
}
it("should, when the info appears in the body before a test, report the info before the test") {
val spec = new InfoBeforeTestSpec
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(infoProvidedIndex < testStartingIndex)
assert(testStartingIndex < testSucceededIndex)
}
it("should, when the info appears in the body after a test, report the info after the test runs") {
val msg = "hi there, dude"
val testName = "test name"
class MySpec extends Spec {
it(testName) {}
info(msg)
}
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(new MySpec, testName, msg)
assert(testStartingIndex < testSucceededIndex)
assert(testSucceededIndex < infoProvidedIndex)
}
it("should throw an IllegalStateException when info is called by a method invoked after the suite has been executed") {
class MySpec extends Spec {
callInfo() // This should work fine
def callInfo() {
info("howdy")
}
it("howdy also") {
callInfo() // This should work fine
}
}
val spec = new MySpec
val myRep = new EventRecordingReporter
spec.run(None, myRep, new Stopper {}, Filter(), Map(), None, new Tracker)
intercept[IllegalStateException] {
spec.callInfo()
}
}
it("should send an InfoProvided with an IndentedText formatter with level 1 when called outside a test") {
val spec = new InfoBeforeTestSpec
val indentedText = getIndentedTextFromInfoProvided(spec)
assert(indentedText === IndentedText("+ " + spec.msg, spec.msg, 0))
}
it("should send an InfoProvided with an IndentedText formatter with level 2 when called within a test") {
val spec = new InfoInsideTestSpec
val indentedText = getIndentedTextFromInfoProvided(spec)
assert(indentedText === IndentedText(" + " + spec.msg, spec.msg, 1))
}
}
describe("(when a nesting rule has been violated)") {
it("should, if they call a describe from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a describe with a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
it("should never run") {
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
it("should never run") {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
it("should never run", mytags.SlowAsMolasses) {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a describe with a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
ignore("should never run") {
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
ignore("should never run") {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends Spec {
it("should blow up") {
ignore("should never run", mytags.SlowAsMolasses) {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
}
it("should run tests registered via the 'it should behave like' syntax") {
trait SharedSpecTests { this: Spec =>
def nonEmptyStack(s: String)(i: Int) {
it("should be that I am shared") {}
}
}
class MySpec extends Spec with SharedSpecTests {
it should behave like nonEmptyStack("hi")(1)
}
val suite = new MySpec
val reporter = new EventRecordingReporter
suite.run(None, reporter, new Stopper {}, Filter(), Map(), None, new Tracker)
val indexedList = reporter.eventsReceived
val testStartingOption = indexedList.find(_.isInstanceOf[TestStarting])
assert(testStartingOption.isDefined)
assert(testStartingOption.get.asInstanceOf[TestStarting].testName === "should be that I am shared")
}
it("should throw NullPointerException if a null test tag is provided") {
// it
intercept[NullPointerException] {
new Spec {
it("hi", null) {}
}
}
val caught = intercept[NullPointerException] {
new Spec {
it("hi", mytags.SlowAsMolasses, null) {}
}
}
assert(caught.getMessage === "a test tag was null")
intercept[NullPointerException] {
new Spec {
it("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {}
}
}
// ignore
intercept[NullPointerException] {
new Spec {
ignore("hi", null) {}
}
}
val caught2 = intercept[NullPointerException] {
new Spec {
ignore("hi", mytags.SlowAsMolasses, null) {}
}
}
assert(caught2.getMessage === "a test tag was null")
intercept[NullPointerException] {
new Spec {
ignore("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {}
}
}
}
class TestWasCalledSuite extends Spec {
var theTestThisCalled = false
var theTestThatCalled = false
it("should run this") { theTestThisCalled = true }
it("should run that, maybe") { theTestThatCalled = true }
}
it("should execute all tests when run is called with testName None") {
val b = new TestWasCalledSuite
b.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(b.theTestThisCalled)
assert(b.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
val a = new TestWasCalledSuite
a.run(Some("should run this"), SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
it("should report as ignored, and not run, tests marked ignored") {
val a = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
it("test this") { theTestThisCalled = true }
it("test that") { theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, repA, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
val b = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { theTestThisCalled = true }
it("test that") { theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, repB, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.theTestThisCalled)
assert(b.theTestThatCalled)
val c = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
it("test this") { theTestThisCalled = true }
ignore("test that") { theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, repC, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.theTestThisCalled)
assert(!c.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
val d = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { theTestThisCalled = true }
ignore("test that") { theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, repD, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.theTestThisCalled)
assert(!d.theTestThatCalled)
}
it("should ignore a test marked as ignored if run is invoked with that testName") {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
val e = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { theTestThisCalled = true }
it("test that") { theTestThatCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(Some("test this"), repE, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(!e.theTestThatCalled)
}
it("should run only those tests selected by the tags to include and exclude sets") {
// Nothing is excluded
val a = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
it("test this", mytags.SlowAsMolasses) { theTestThisCalled = true }
it("test that") { theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, repA, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
val b = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
it("test this", mytags.SlowAsMolasses) { theTestThisCalled = true }
it("test that") { theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, repB, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(!repB.testIgnoredReceived)
assert(b.theTestThisCalled)
assert(!b.theTestThatCalled)
// SlowAsMolasses is included, and both tests should be included
val c = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
it("test this", mytags.SlowAsMolasses) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, repB, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(!repC.testIgnoredReceived)
assert(c.theTestThisCalled)
assert(c.theTestThatCalled)
// SlowAsMolasses is included. both tests should be included but one ignored
val d = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this", mytags.SlowAsMolasses) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, repD, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), Map(), None, new Tracker)
assert(repD.testIgnoredReceived)
assert(!d.theTestThisCalled)
assert(d.theTestThatCalled)
// SlowAsMolasses included, FastAsLight excluded
val e = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
it("test the other") { theTestTheOtherCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(None, repE, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(!repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(e.theTestThatCalled)
assert(!e.theTestTheOtherCalled)
// An Ignored test that was both included and excluded should not generate a TestIgnored event
val f = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
it("test the other") { theTestTheOtherCalled = true }
}
val repF = new TestIgnoredTrackingReporter
f.run(None, repF, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(!repF.testIgnoredReceived)
assert(!f.theTestThisCalled)
assert(f.theTestThatCalled)
assert(!f.theTestTheOtherCalled)
// An Ignored test that was not included should not generate a TestIgnored event
val g = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
ignore("test the other") { theTestTheOtherCalled = true }
}
val repG = new TestIgnoredTrackingReporter
g.run(None, repG, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(!repG.testIgnoredReceived)
assert(!g.theTestThisCalled)
assert(g.theTestThatCalled)
assert(!g.theTestTheOtherCalled)
// No tagsToInclude set, FastAsLight excluded
val h = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
it("test the other") { theTestTheOtherCalled = true }
}
val repH = new TestIgnoredTrackingReporter
h.run(None, repH, new Stopper {}, Filter(None, Set("org.scalatest.FastAsLight")), Map(), None, new Tracker)
assert(!repH.testIgnoredReceived)
assert(!h.theTestThisCalled)
assert(h.theTestThatCalled)
assert(h.theTestTheOtherCalled)
// No tagsToInclude set, mytags.SlowAsMolasses excluded
val i = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
it("test the other") { theTestTheOtherCalled = true }
}
val repI = new TestIgnoredTrackingReporter
i.run(None, repI, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker)
assert(!repI.testIgnoredReceived)
assert(!i.theTestThisCalled)
assert(!i.theTestThatCalled)
assert(i.theTestTheOtherCalled)
// No tagsToInclude set, mytags.SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
val j = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
it("test the other") { theTestTheOtherCalled = true }
}
val repJ = new TestIgnoredTrackingReporter
j.run(None, repJ, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker)
assert(!repI.testIgnoredReceived)
assert(!j.theTestThisCalled)
assert(!j.theTestThatCalled)
assert(j.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
val k = new Spec {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { theTestThatCalled = true }
ignore("test the other") { theTestTheOtherCalled = true }
}
val repK = new TestIgnoredTrackingReporter
k.run(None, repK, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), Map(), None, new Tracker)
assert(repK.testIgnoredReceived)
assert(!k.theTestThisCalled)
assert(!k.theTestThatCalled)
assert(!k.theTestTheOtherCalled)
}
it("should return the correct test count from its expectedTestCount method") {
val a = new Spec {
it("test this") {}
it("test that") {}
}
assert(a.expectedTestCount(Filter()) === 2)
val b = new Spec {
ignore("test this") {}
it("test that") {}
}
assert(b.expectedTestCount(Filter()) === 1)
val c = new Spec {
it("test this", mytags.FastAsLight) {}
it("test that") {}
}
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
val d = new Spec {
it("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {}
it("test that", mytags.SlowAsMolasses) {}
it("test the other thing") {}
}
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(d.expectedTestCount(Filter()) === 3)
val e = new Spec {
it("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {}
it("test that", mytags.SlowAsMolasses) {}
ignore("test the other thing") {}
}
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0)
assert(e.expectedTestCount(Filter()) === 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) === 10)
}
it("should generate a TestPending message when the test body is (pending)") {
val a = new Spec {
it("should do this") (pending)
it("should do that") {
assert(2 + 2 === 4)
}
it("should do something else") {
assert(2 + 2 === 4)
pending
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
val a = new Spec {
it("throws AssertionError") { throw new AssertionError }
it("throws plain old Error") { throw new Error }
it("throws Throwable") { throw new Throwable }
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
it("should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " +
"AssertionError, causing Suites and Runs to abort.") {
val a = new Spec {
it("throws AssertionError") { throw new OutOfMemoryError }
}
intercept[OutOfMemoryError] {
a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
}
}
it("should send InfoProvided events with aboutAPendingTest set to true for info " +
"calls made from a test that is pending") {
val a = new Spec with GivenWhenThen {
it("should do something else") {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
pending
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ip = rep.infoProvidedEventsReceived
assert(ip.size === 3)
for (event <- ip) {
assert(event.aboutAPendingTest.isDefined && event.aboutAPendingTest.get)
}
}
it("should send InfoProvided events with aboutAPendingTest set to false for info " +
"calls made from a test that is not pending") {
val a = new Spec with GivenWhenThen {
it("should do something else") {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
assert(1 + 1 === 2)
}
}
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ip = rep.infoProvidedEventsReceived
assert(ip.size === 3)
for (event <- ip) {
assert(event.aboutAPendingTest.isDefined && !event.aboutAPendingTest.get)
}
}
}
}
| yyuu/scalatest | src/test/scala/org/scalatest/SpecSpec.scala | Scala | apache-2.0 | 29,294 |
package persistence.entities
import utils.Profile
case class Supplier(id: Option[Int],name: String,desc: String)
case class SimpleSupplier(name: String,desc: String)
trait Suppliers extends Profile{
import profile.api._
class Suppliers(tag: Tag) extends Table[Supplier](tag, "SUPPLIERS") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("userID")
def desc = column[String]("last_name")
def * = (id.?, name, desc) <> (Supplier.tupled, Supplier.unapply)
}
val suppliers = TableQuery[Suppliers]
} | edvorkin/simple-docker-scala-app | src/main/scala/persistence/entities/Suppliers.scala | Scala | apache-2.0 | 556 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.eventlog
import scala.xml.{Node => XNode, _}
import net.liftweb.common._
import net.liftweb.common.Box._
import net.liftweb.util.Helpers.tryo
import com.normation.cfclerk.domain.TechniqueVersion
import com.normation.cfclerk.domain.TechniqueName
import com.normation.utils.Control.sequence
import com.normation.rudder.domain.policies._
import com.normation.rudder.domain.nodes._
import com.normation.rudder.services.queries.CmdbQueryParser
import com.normation.rudder.domain.queries.Query
import com.normation.inventory.domain.NodeId
import org.joda.time.format.ISODateTimeFormat
import com.normation.rudder.services.marshalling._
import com.normation.rudder.batch.SuccessStatus
import com.normation.rudder.batch.ErrorStatus
import com.normation.rudder.domain.servers.NodeConfiguration
import com.normation.rudder.services.marshalling.DeploymentStatusUnserialisation
import com.normation.rudder.batch.CurrentDeploymentStatus
import com.normation.inventory.domain.AgentType
import com.normation.rudder.domain.eventlog._
import com.normation.cfclerk.domain.TechniqueId
import com.normation.rudder.repository.GitPath
import com.normation.rudder.repository.GitCommitId
import com.normation.rudder.repository.GitArchiveId
import org.eclipse.jgit.lib.PersonIdent
import com.normation.rudder.domain.Constants
import com.normation.rudder.services.marshalling.TestFileFormat
import com.normation.eventlog.EventLog
import org.joda.time.DateTime
import com.normation.rudder.domain.workflows.ConfigurationChangeRequest
import com.normation.rudder.domain.workflows.ConfigurationChangeRequest
import com.normation.rudder.domain.workflows.ChangeRequestId
import com.normation.rudder.domain.workflows.ChangeRequestInfo
import com.normation.rudder.domain.workflows.WorkflowStepChange
import com.normation.rudder.domain.workflows.WorkflowNodeId
import com.normation.rudder.domain.workflows.WorkflowStepChange
import com.normation.eventlog.ModificationId
/**
* A service that helps mapping event log details to there structured data model.
* Details should always be in the format: <entry>{more details here}</entry>
*/
trait EventLogDetailsService {
/**
* From a given nodeSeq, retrieve the real details, that should be in a <entry/> tag.
* The result, if Full, is trimed so that pattern matching can be made without
* taking care of empty texts.
*/
def getEntryContent(xml:NodeSeq) : Box[Elem]
///// rule /////
def getRuleAddDetails(xml:NodeSeq) : Box[AddRuleDiff]
def getRuleDeleteDetails(xml:NodeSeq) : Box[DeleteRuleDiff]
def getRuleModifyDetails(xml:NodeSeq) : Box[ModifyRuleDiff]
///// directive /////
def getDirectiveAddDetails(xml:NodeSeq) : Box[(AddDirectiveDiff, SectionVal)]
def getDirectiveDeleteDetails(xml:NodeSeq) : Box[(DeleteDirectiveDiff, SectionVal)]
def getDirectiveModifyDetails(xml:NodeSeq) : Box[ModifyDirectiveDiff]
///// node group /////
def getNodeGroupAddDetails(xml:NodeSeq) : Box[AddNodeGroupDiff]
def getNodeGroupDeleteDetails(xml:NodeSeq) : Box[DeleteNodeGroupDiff]
def getNodeGroupModifyDetails(xml:NodeSeq) : Box[ModifyNodeGroupDiff]
///// node /////
def getAcceptNodeLogDetails(xml:NodeSeq) : Box[InventoryLogDetails]
def getRefuseNodeLogDetails(xml:NodeSeq) : Box[InventoryLogDetails]
def getDeleteNodeLogDetails(xml:NodeSeq) : Box[InventoryLogDetails]
///// other /////
def getDeploymentStatusDetails(xml:NodeSeq) : Box[CurrentDeploymentStatus]
def getUpdatePolicyServerDetails(xml:NodeSeq) : Box[AuthorizedNetworkModification]
def getTechniqueLibraryReloadDetails(xml:NodeSeq) : Box[Seq[TechniqueId]]
def getTechniqueModifyDetails(xml: NodeSeq): Box[ModifyTechniqueDiff]
def getTechniqueDeleteDetails(xml:NodeSeq) : Box[DeleteTechniqueDiff]
///// archiving & restoration /////
def getNewArchiveDetails[T <: ExportEventLog](xml:NodeSeq, archive:T) : Box[GitArchiveId]
def getRestoreArchiveDetails[T <: ImportEventLog](xml:NodeSeq, archive:T) : Box[GitCommitId]
def getRollbackDetails(xml:NodeSeq) : Box[RollbackInfo]
def getChangeRequestDetails(xml:NodeSeq) : Box[ChangeRequestDiff]
def getWorkflotStepChange(xml:NodeSeq) : Box[WorkflowStepChange]
}
/**
* Details should always be in the format: <entry>{more details here}</entry>
*/
class EventLogDetailsServiceImpl(
cmdbQueryParser : CmdbQueryParser
, piUnserialiser : DirectiveUnserialisation
, groupUnserialiser : NodeGroupUnserialisation
, crUnserialiser : RuleUnserialisation
, techniqueUnserialiser : ActiveTechniqueUnserialisation
, deploymentStatusUnserialisation : DeploymentStatusUnserialisation
) extends EventLogDetailsService {
/**
* An utility method that is able the parse a <X<from>....</from><to>...</to></X>
* attribute and extract it into a SimpleDiff class.
*/
private[this] def getFromTo[T](opt:Option[NodeSeq], f:NodeSeq => Box[T] ) : Box[Option[SimpleDiff[T]]] = {
opt match {
case None => Full(None)
case Some(x) =>
for {
fromS <- (x \ "from").headOption ?~! ("Missing required tag 'from'")
toS <- (x \ "to").headOption ?~! ("Missing required tag 'to'")
from <- f(fromS)
to <- f(toS)
} yield {
Some(SimpleDiff(from, to))
}
}
}
/**
* Special case of getFromTo for strings.
*/
private[this] def getFromToString(opt:Option[NodeSeq]) = getFromTo[String](opt,(s:NodeSeq) => Full(s.text))
def getEntryContent(xml:NodeSeq) : Box[Elem] = {
if(xml.size == 1) {
val node = Utility.trim(xml.head)
node match {
case e:Elem => Full(e)
case _ => Failure("Given node is not an XML element: " + node.toString)
}
} else {
Failure("Bad details format. We were expected an unique node <entry/>, and we get: " + xml.toString)
}
}
/**
* Version 2:
<rule changeType="add" fileFormat="2">
<id>{rule.id.value}</id>
<name>{rule.name}</name>
<serial>{rule.serial}</serial>
<target>{ rule.target.map( _.target).getOrElse("") }</target>
<directiveIds>{
rule.directiveIds.map { id => <id>{id.value}</id> }
}</directiveIds>
<shortDescription>{rule.shortDescription}</shortDescription>
<longDescription>{rule.longDescription}</longDescription>
<isEnabled>{rule.isEnabledStatus}</isEnabled>
<isSystem>{rule.isSystem}</isSystem>
</rule>
*/
override def getRuleAddDetails(xml:NodeSeq) : Box[AddRuleDiff] = {
getRuleFromXML(xml, "add").map { rule =>
AddRuleDiff(rule)
}
}
/**
* Version 2:
<rule changeType="delete" fileFormat="2">
<id>{rule.id.value}</id>
<name>{rule.name}</name>
<serial>{rule.serial}</serial>
<target>{ rule.target.map( _.target).getOrElse("") }</target>
<directiveIds>{
rule.directiveIds.map { id => <id>{id.value}</id> }
}</directiveIds>
<shortDescription>{rule.shortDescription}</shortDescription>
<longDescription>{rule.longDescription}</longDescription>
<isEnabled>{rule.isEnabledStatus}</isEnabled>
<isSystem>{rule.isSystem}</isSystem>
</rule>
*/
override def getRuleDeleteDetails(xml:NodeSeq) : Box[DeleteRuleDiff] = {
getRuleFromXML(xml, "delete").map { rule =>
DeleteRuleDiff(rule)
}
}
/**
* <rule changeType="modify">
<id>012f3064-d392-43a3-bec9-b0f75950a7ea</id>
<displayName>cr1</displayName>
<name><from>cr1</from><to>cr1-x</to></name>
<target><from>....</from><to>....</to></target>
<directiveIds><from><id>...</id><id>...</id></from><to><id>...</id></to></directiveIds>
<shortDescription><from>...</from><to>...</to></shortDescription>
<longDescription><from>...</from><to>...</to></longDescription>
</rule>
*/
override def getRuleModifyDetails(xml:NodeSeq) : Box[ModifyRuleDiff] = {
for {
entry <- getEntryContent(xml)
rule <- (entry \ "rule").headOption ?~! ("Entry type is not rule : " + entry)
changeTypeAddOk <- {
if(rule.attribute("changeType").map( _.text ) == Some("modify"))
Full("OK")
else
Failure("Rule attribute does not have changeType=modify: " + entry)
}
fileFormatOk <- TestFileFormat(rule)
id <- (rule \ "id").headOption.map( _.text ) ?~!
("Missing attribute 'id' in entry type rule : " + entry)
displayName <- (rule \ "displayName").headOption.map( _.text ) ?~!
("Missing attribute 'displayName' in entry type rule : " + entry)
name <- getFromToString((rule \ "name").headOption)
serial <- getFromTo[Int]((rule \ "serial").headOption,
{ x => tryo(x.text.toInt) } )
targets <- getFromTo[Set[RuleTarget]]((rule \ "targets").headOption,
{ x:NodeSeq =>
Full((x \ "target").toSet.flatMap{ y: NodeSeq =>
RuleTarget.unser(y.text )})
})
shortDescription <- getFromToString((rule \ "shortDescription").headOption)
longDescription <- getFromToString((rule \ "longDescription").headOption)
isEnabled <- getFromTo[Boolean]((rule \ "isEnabled").headOption,
{ s => tryo { s.text.toBoolean } } )
isSystem <- getFromTo[Boolean]((rule \ "isSystem").headOption,
{ s => tryo { s.text.toBoolean } } )
directiveIds <- getFromTo[Set[DirectiveId]]((rule \ "directiveIds").headOption,
{ x:NodeSeq =>
Full((x \ "id").toSet.map( (y:NodeSeq) =>
DirectiveId( y.text )))
} )
} yield {
ModifyRuleDiff(
id = RuleId(id)
, name = displayName
, modName = name
, modSerial = serial
, modTarget = targets
, modDirectiveIds = directiveIds
, modShortDescription = shortDescription
, modLongDescription = longDescription
, modIsActivatedStatus = isEnabled
, modIsSystem = isSystem
)
}
}
/**
* Map XML into a rule
*/
private[this] def getRuleFromXML(xml:NodeSeq, changeType:String) : Box[Rule] = {
for {
entry <- getEntryContent(xml)
crXml <- (entry \ "rule").headOption ?~! ("Entry type is not a rule: " + entry)
changeTypeAddOk <- {
if(crXml.attribute("changeType").map( _.text ) == Some(changeType)) Full("OK")
else Failure("Rule attribute does not have changeType=%s: ".format(changeType) + entry)
}
rule <- crUnserialiser.unserialise(crXml)
} yield {
rule
}
}
///// directives /////
/**
* Map XML into a directive
*/
private[this] def getDirectiveFromXML(xml:NodeSeq, changeType:String) : Box[(TechniqueName, Directive, SectionVal)] = {
for {
entry <- getEntryContent(xml)
piXml <- (entry \ "directive").headOption ?~! ("Entry type is not a directive: " + entry)
changeTypeAddOk <- {
if(piXml.attribute("changeType").map( _.text ) == Some(changeType)) Full("OK")
else Failure("Directive attribute does not have changeType=%s: ".format(changeType) + entry)
}
ptPiSectionVals <- piUnserialiser.unserialise(piXml)
} yield {
ptPiSectionVals
}
}
def getDirectiveAddDetails(xml:NodeSeq) : Box[(AddDirectiveDiff, SectionVal)] = {
getDirectiveFromXML(xml, "add").map { case (ptName, directive,sectionVal) =>
(AddDirectiveDiff(ptName, directive),sectionVal)
}
}
def getDirectiveDeleteDetails(xml:NodeSeq) : Box[(DeleteDirectiveDiff, SectionVal)] = {
getDirectiveFromXML(xml, "delete").map { case(ptName, directive,sectionVal) =>
(DeleteDirectiveDiff(ptName, directive),sectionVal)
}
}
def getDirectiveModifyDetails(xml:NodeSeq) : Box[ModifyDirectiveDiff] = {
for {
entry <- getEntryContent(xml)
directive <- (entry \ "directive").headOption ?~! ("Entry type is not directive : " + entry)
changeTypeAddOk <- {
if(directive.attribute("changeType").map( _.text ) == Some("modify")) Full("OK")
else Failure("Directive attribute does not have changeType=modify: " + entry)
}
fileFormatOk <- TestFileFormat(directive)
id <- (directive \ "id").headOption.map( _.text ) ?~! ("Missing attribute 'id' in entry type directive : " + entry)
ptName <- (directive \ "techniqueName").headOption.map( _.text ) ?~! ("Missing attribute 'techniqueName' in entry type directive : " + entry)
displayName <- (directive \ "displayName").headOption.map( _.text ) ?~! ("Missing attribute 'displayName' in entry type directive : " + entry)
name <- getFromToString((directive \ "name").headOption)
techniqueVersion <- getFromTo[TechniqueVersion]((directive \ "techniqueVersion").headOption, {v =>
tryo(TechniqueVersion(v.text))
} )
parameters <- getFromTo[SectionVal]((directive \ "parameters").headOption, {parameter =>
piUnserialiser.parseSectionVal(parameter)
})
shortDescription <- getFromToString((directive \ "shortDescription").headOption)
longDescription <- getFromToString((directive \ "longDescription").headOption)
priority <- getFromTo[Int]((directive \ "priority").headOption, { x => tryo(x.text.toInt) } )
isEnabled <- getFromTo[Boolean]((directive \ "isEnabled").headOption, { s => tryo { s.text.toBoolean } } )
isSystem <- getFromTo[Boolean]((directive \ "isSystem").headOption, { s => tryo { s.text.toBoolean } } )
} yield {
ModifyDirectiveDiff(
techniqueName = TechniqueName(ptName)
, id = DirectiveId(id)
, name = displayName
, modName = name
, modTechniqueVersion = techniqueVersion
, modParameters = parameters
, modShortDescription = shortDescription
, modLongDescription = longDescription
, modPriority = priority
, modIsActivated = isEnabled
, modIsSystem = isSystem
)
}
}
///// node group /////
override def getNodeGroupAddDetails(xml:NodeSeq) : Box[AddNodeGroupDiff] = {
getNodeGroupFromXML(xml, "add").map { group =>
AddNodeGroupDiff(group)
}
}
override def getNodeGroupDeleteDetails(xml:NodeSeq) : Box[DeleteNodeGroupDiff] = {
getNodeGroupFromXML(xml, "delete").map { group =>
DeleteNodeGroupDiff(group)
}
}
override def getNodeGroupModifyDetails(xml:NodeSeq) : Box[ModifyNodeGroupDiff] = {
for {
entry <- getEntryContent(xml)
group <- (entry \ "nodeGroup").headOption ?~! ("Entry type is not nodeGroup : " + entry)
changeTypeAddOk <- {
if(group.attribute("changeType").map( _.text ) == Some("modify")) Full("OK")
else Failure("NodeGroup attribute does not have changeType=modify: " + entry)
}
fileFormatOk <- TestFileFormat(group)
id <- (group \ "id").headOption.map( _.text ) ?~! ("Missing attribute 'id' in entry type nodeGroup : " + entry)
displayName <- (group \ "displayName").headOption.map( _.text ) ?~! ("Missing attribute 'displayName' in entry type nodeGroup : " + entry)
name <- getFromToString((group \ "name").headOption)
description <- getFromToString((group \ "description").headOption)
query <- getFromTo[Option[Query]]((group \ "query").headOption, {s =>
//check for <from><none></none></from> or the same with <to>, <none/>, etc
if( (s \ "none").isEmpty) cmdbQueryParser(s.text).map( Some(_) )
else Full(None)
} )
isDynamic <- getFromTo[Boolean]((group \ "isDynamic").headOption, { s => tryo { s.text.toBoolean } } )
serverList <- getFromTo[Set[NodeId]]((group \ "nodeIds").headOption, { x:NodeSeq =>
Full((x \ "id").toSet.map( (y:NodeSeq) => NodeId( y.text ) ))
} )
isEnabled <- getFromTo[Boolean]((group \ "isEnabled").headOption, { s => tryo { s.text.toBoolean } } )
isSystem <- getFromTo[Boolean]((group \ "isSystem").headOption, { s => tryo { s.text.toBoolean } } )
} yield {
ModifyNodeGroupDiff(
id = NodeGroupId(id)
, name = displayName
, modName = name
, modDescription = description
, modQuery = query
, modIsDynamic = isDynamic
, modNodeList = serverList
, modIsActivated = isEnabled
, modIsSystem = isSystem
)
}
}
/**
* Map XML into a node group
*/
private[this] def getNodeGroupFromXML(xml:NodeSeq, changeType:String) : Box[NodeGroup] = {
for {
entry <- getEntryContent(xml)
groupXml <- (entry \ "nodeGroup").headOption ?~! ("Entry type is not a nodeGroup: " + entry)
changeTypeAddOk <- {
if(groupXml.attribute("changeType").map( _.text ) == Some(changeType)) Full("OK")
else Failure("nodeGroup attribute does not have changeType=%s: ".format(changeType) + entry)
}
group <- groupUnserialiser.unserialise(groupXml)
} yield {
group
}
}
def getAcceptNodeLogDetails(xml:NodeSeq) : Box[InventoryLogDetails] = {
getInventoryLogDetails(xml, "accept")
}
def getRefuseNodeLogDetails(xml:NodeSeq) : Box[InventoryLogDetails] = {
getInventoryLogDetails(xml, "refuse")
}
/**
* Get inventory details
*/
private[this] def getInventoryLogDetails(xml:NodeSeq, action:String) : Box[InventoryLogDetails] = {
for {
entry <- getEntryContent(xml)
details <- (entry \ "node").headOption ?~! ("Entry type is not a node: " + entry)
actionOk <- {
if(details.attribute("action").map( _.text ) == Some(action)) Full("OK")
else Failure("node attribute does not have action=%s: ".format(action) + entry)
}
fileFormatOk <- TestFileFormat(details)
nodeId <- (details \ "id").headOption.map( _.text ) ?~! ("Missing attribute 'id' in entry type node: " + entry)
version <- (details \ "inventoryVersion").headOption.map( _.text ) ?~! ("Missing attribute 'inventoryVersion' in entry type node : " + entry)
hostname <- (details \ "hostname").headOption.map( _.text ) ?~! ("Missing attribute 'hostname' in entry type node : " + entry)
os <- (details \ "fullOsName").headOption.map( _.text ) ?~! ("Missing attribute 'fullOsName' in entry type node : " + entry)
actorIp <- (details \ "actorIp").headOption.map( _.text ) ?~! ("Missing attribute 'actorIp' in entry type node : " + entry)
} yield {
InventoryLogDetails(
nodeId = NodeId(nodeId)
, inventoryVersion = ISODateTimeFormat.dateTimeParser.parseDateTime(version)
, hostname = hostname
, fullOsName = os
, actorIp = actorIp
)
}
}
def getDeleteNodeLogDetails(xml:NodeSeq) : Box[InventoryLogDetails] = {
getInventoryLogDetails(xml, "delete")
}
/**
* Get node details
*/
private[this] def getNodeLogDetails(xml:NodeSeq, action:String) : Box[NodeLogDetails] = {
for {
entry <- getEntryContent(xml)
details <- (entry \ "node").headOption ?~! ("Entry type is not a node: " + entry)
actionOk <- {
if(details.attribute("action").map( _.text ) == Some(action)) Full("OK")
else Failure("node attribute does not have action=%s: ".format(action) + entry)
}
fileFormatOk <- TestFileFormat(details)
nodeId <- (details \ "id").headOption.map( _.text ) ?~! ("Missing attribute 'id' in entry type node: " + entry)
name <- (details \ "name").headOption.map( _.text ) ?~! ("Missing attribute 'name' in entry type node : " + entry)
hostname <- (details \ "hostname").headOption.map( _.text ) ?~! ("Missing attribute 'hostname' in entry type node : " + entry)
description <- (details \ "description").headOption.map( _.text ) ?~! ("Missing attribute 'description' in entry type node : " + entry)
ips <- (details \ "ips").headOption.map { case x:NodeSeq =>
(x \ "ip").toSeq.map( (y:NodeSeq) => y.text )
}?~! ("Missing attribute 'ips' in entry type node : " + entry)
machineType <- (details \ "machineType").headOption.map( _.text ) ?~! ("Missing attribute 'machineType' in entry type node : " + entry)
os <- (details \ "os").headOption.map( _.text ) ?~! ("Missing attribute 'os' in entry type node : " + entry)
osVersion <- (details \ "osVersion").headOption.map( _.text ) ?~! ("Missing attribute 'os' in entry type node : " + entry)
servicePack = (details \ "os").headOption.map( _.text )
boxedAgentsName<- (details \ "agentsName").headOption.map {
case x:NodeSeq =>
(x \ "agentName").toSeq.map( (y:NodeSeq) => AgentType.fromValue(y.text) )
} ?~! ("Missing attribute 'agentsName' in entry type node : " + entry)
inventoryDate <- (details \ "inventoryDate").headOption.map( _.text ) ?~! ("Missing attribute 'inventoryDate' in entry type node : " + entry)
publicKey <- (details \ "publicKey").headOption.map( _.text ) ?~! ("Missing attribute 'publicKey' in entry type node : " + entry)
policyServerId <- (details \ "policyServerId").headOption.map( _.text ) ?~! ("Missing attribute 'policyServerId' in entry type node : " + entry)
localAdministratorAccountName <- (details \ "localAdministratorAccountName").headOption.map( _.text ) ?~! ("Missing attribute 'localAdministratorAccountName' in entry type node : " + entry)
creationDate <- (details \ "creationDate").headOption.map( _.text ) ?~! ("Missing attribute 'creationDate' in entry type node : " + entry)
isBroken <- (details \ "isBroken").headOption.map(_.text.toBoolean )
isSystem <- (details \ "isSystem").headOption.map(_.text.toBoolean )
isPolicyServer <- (details \ "isPolicyServer").headOption.map(_.text.toBoolean )
} yield {
val agentsNames = com.normation.utils.Control.boxSequence[AgentType](boxedAgentsName)
NodeLogDetails(node = NodeInfo(
id = NodeId(nodeId)
, name = name
, description = description
, hostname = hostname
, machineType
, os
, osVersion
, servicePack
, ips = ips.toList
, inventoryDate = ISODateTimeFormat.dateTimeParser.parseDateTime(inventoryDate)
, publicKey = publicKey
, agentsName = agentsNames.openOr(Seq())
, policyServerId = NodeId(policyServerId)
, localAdministratorAccountName = localAdministratorAccountName
, creationDate = ISODateTimeFormat.dateTimeParser.parseDateTime(creationDate)
, isBroken = isBroken
, isSystem = isSystem
, isPolicyServer = isPolicyServer
))
}
}
def getDeploymentStatusDetails(xml:NodeSeq) : Box[CurrentDeploymentStatus] = {
for {
entry <- getEntryContent(xml)
details <- (entry \ "deploymentStatus").headOption ?~! ("Entry type is not a deploymentStatus: " + entry)
deploymentStatus <- deploymentStatusUnserialisation.unserialise(details)
} yield {
deploymentStatus
}
}
/**
* <changeAuthorizedNetworks fileFormat="2">
* <oldAuthorizedNetworks>
<net>XXXXX</net>
<net>SSSSS</net>
</oldAuthorizedNetworks>
<newAuthorizedNetworks>
<net>XXXXX</net>
<net>SSSSS</net>
<net>PPPPP</net>
</newAuthorizedNetworks>
</changeAuthorizedNetworks>
*/
def getUpdatePolicyServerDetails(xml:NodeSeq) : Box[AuthorizedNetworkModification] = {
for {
entry <- getEntryContent(xml)
details <- (entry \ "changeAuthorizedNetworks").headOption ?~! ("Entry type is not a changeAuthorizedNetworks: " + entry)
fileFormatOk <- TestFileFormat(details)
oldsXml <- (entry \\ "oldAuthorizedNetworks").headOption ?~! ("Missing attribute 'oldAuthorizedNetworks' in entry: " + entry)
newsXml <- (entry \\ "newAuthorizedNetworks").headOption ?~! ("Missing attribute 'newAuthorizedNetworks' in entry: " + entry)
} yield {
AuthorizedNetworkModification(
oldNetworks = (oldsXml \ "net").map( _.text )
, newNetworks = (newsXml \ "net").map( _.text )
)
}
}
/**
* <techniqueReloaded fileFormat="2">
<modifiedTechnique>
<name>{name.value}</name>
<version>{version.toString}</version>
</modifiedTechnique>
<modifiedTechnique>
<name>{name.value}</name>
<version>{version.toString}</version>
</modifiedTechnique>
....
</techniqueReloaded>
*/
def getTechniqueLibraryReloadDetails(xml:NodeSeq) : Box[Seq[TechniqueId]] = {
for {
entry <- getEntryContent(xml)
details <- (entry \ "reloadTechniqueLibrary").headOption ?~! ("Entry type is not a techniqueReloaded: " + entry)
fileFormatOk <- TestFileFormat(details)
activeTechniqueIds <- sequence((details \ "modifiedTechnique")) { technique =>
for {
name <- (technique \ "name").headOption.map( _.text ) ?~! ("Missing attribute 'name' in entry type techniqueReloaded : " + entry)
version <- (technique \ "version").headOption.map( _.text ) ?~! ("Missing attribute 'version' in entry type techniqueReloaded : " + entry)
v <- tryo { TechniqueVersion(version) }
} yield {
TechniqueId(TechniqueName(name),v)
}
}
} yield {
activeTechniqueIds
}
}
def getTechniqueModifyDetails(xml: NodeSeq): Box[ModifyTechniqueDiff] = {
for {
entry <- getEntryContent(xml)
technique <- (entry \ "activeTechnique").headOption ?~!
("Entry type is not a technique: " + entry)
id <- (technique \ "id").headOption.map( _.text ) ?~!
("Missing attribute 'id' in entry type technique : " + entry)
displayName <- (technique \ "techniqueName").headOption.map( _.text ) ?~!
("Missing attribute 'displayName' in entry type rule : " + entry)
isEnabled <- getFromTo[Boolean]((technique \ "isEnabled").headOption,
{ s => tryo { s.text.toBoolean } } )
fileFormatOk <- TestFileFormat(technique)
} yield {
ModifyTechniqueDiff(
id = ActiveTechniqueId(id)
, name = TechniqueName(displayName)
, modIsEnabled = isEnabled
)
}
}
override def getTechniqueDeleteDetails(xml:NodeSeq) : Box[DeleteTechniqueDiff] = {
getTechniqueFromXML(xml, "delete").map { technique =>
DeleteTechniqueDiff(technique)
}
}
/**
* Map XML into a technique
*/
private[this] def getTechniqueFromXML(xml:NodeSeq, changeType:String) : Box[ActiveTechnique] = {
for {
entry <- getEntryContent(xml)
techniqueXml <- (entry \ "activeTechnique").headOption ?~! ("Entry type is not a technique: " + entry)
changeTypeAddOk <- if(techniqueXml.attribute("changeType").map( _.text ) == Some(changeType))
Full("OK")
else
Failure("Technique attribute does not have changeType=%s: ".format(changeType) + entry)
technique <- techniqueUnserialiser.unserialise(techniqueXml)
} yield {
technique
}
}
def getNewArchiveDetails[T <: ExportEventLog](xml:NodeSeq, archive:T) : Box[GitArchiveId] = {
def getCommitInfo(xml:NodeSeq, tagName:String) = {
for {
entry <- getEntryContent(xml)
details <- (entry \ tagName).headOption ?~! ("Entry type is not a '%s': %s".format(tagName, entry))
fileFormatOk <- TestFileFormat(details)
path <- (details \ "path").headOption.map( _.text ) ?~! ("Missing attribute 'path' in entry: " + xml)
commitId <- (details \ "commit").headOption.map( _.text ) ?~! ("Missing attribute 'commit' in entry: " + xml)
name <- (details \ "commiterName").headOption.map( _.text ) ?~! ("Missing attribute 'commiterName' in entry: " + xml)
email <- (details \ "commiterEmail").headOption.map( _.text ) ?~! ("Missing attribute 'commiterEmail' in entry: " + xml)
} yield {
GitArchiveId(GitPath(path), GitCommitId(commitId), new PersonIdent(name, email))
}
}
archive match {
case x:ExportGroupsArchive => getCommitInfo(xml, ExportGroupsArchive.tagName)
case x:ExportTechniqueLibraryArchive => getCommitInfo(xml, ExportTechniqueLibraryArchive.tagName)
case x:ExportRulesArchive => getCommitInfo(xml, ExportRulesArchive.tagName)
case x:ExportFullArchive => getCommitInfo(xml, ExportFullArchive.tagName)
}
}
def getRestoreArchiveDetails[T <: ImportEventLog](xml:NodeSeq, archive:T) : Box[GitCommitId] = {
def getCommitInfo(xml:NodeSeq, tagName:String) = {
for {
entry <- getEntryContent(xml)
details <- (entry \ tagName).headOption ?~! ("Entry type is not a '%s': %s".format(tagName, entry))
fileFormatOk <- TestFileFormat(details)
commitId <- (details \ "commit").headOption.map( _.text ) ?~! ("Missing attribute 'commit' in entry: " + xml)
} yield {
GitCommitId(commitId)
}
}
archive match {
case x:ImportGroupsArchive => getCommitInfo(xml, ImportGroupsArchive.tagName)
case x:ImportTechniqueLibraryArchive => getCommitInfo(xml, ImportTechniqueLibraryArchive.tagName)
case x:ImportRulesArchive => getCommitInfo(xml, ImportRulesArchive.tagName)
case x:ImportFullArchive => getCommitInfo(xml, ImportFullArchive.tagName)
}
}
def getChangeRequestDetails(xml:NodeSeq) : Box[ChangeRequestDiff] = {
for {
entry <- getEntryContent(xml)
changeRequest <- (entry \ "changeRequest").headOption ?~! s"Entry type is not a 'changeRequest': ${entry}"
kind <- (changeRequest \ "@changeType").headOption.map(_.text) ?~! s"diff is not a valid changeRequest diff: ${changeRequest}"
crId <- (changeRequest \ "id").headOption.map(id => ChangeRequestId(id.text.toInt)) ?~! s"change request does not have any Id: ${changeRequest}"
modId = (changeRequest \ "modId").headOption.map(modId => ModificationId(modId.text))
name <- (changeRequest \ "name").headOption.map(_.text) ?~! s"change request does not have any name: ${changeRequest}"
description <- (changeRequest \ "description").headOption.map(_.text) ?~! s"change request does not have any description: ${changeRequest}"
diffName <- getFromToString((changeRequest \ "diffName").headOption)
diffDesc <- getFromToString((changeRequest \ "diffDescription").headOption)
} yield {
val changeRequest = ConfigurationChangeRequest(crId,modId,ChangeRequestInfo(name,description),Map(),Map(),Map())
kind match {
case "add" => AddChangeRequestDiff(changeRequest)
case "delete" => DeleteChangeRequestDiff(changeRequest)
case "modify" => ModifyToChangeRequestDiff(changeRequest,diffName,diffDesc)
}
}
}
def getWorkflotStepChange(xml:NodeSeq) : Box[WorkflowStepChange] = {
for {
entry <- getEntryContent(xml)
workflowStep <- (entry \ "workflowStep").headOption ?~! s"Entry type is not a 'changeRequest': ${entry}"
crId <- (workflowStep \ "changeRequestId").headOption.map(id => ChangeRequestId(id.text.toInt)) ?~! s"Workflow event does not target any change request: ${workflowStep}"
from <- (workflowStep \ "from").headOption.map(from => WorkflowNodeId(from.text)) ?~! s"Workflow event does not have any from step: ${workflowStep}"
to <- (workflowStep \ "to").headOption.map(to => WorkflowNodeId(to.text)) ?~! s"workflow step does not have any to step: ${workflowStep}"
} yield {
WorkflowStepChange(crId,from,to)
}
}
def getRollbackDetails(xml:NodeSeq) : Box[RollbackInfo] = {
def getEvents(xml:NodeSeq)= {
for{
event <- xml
eventlogs <- event.child
entry <- eventlogs \ "rollbackedEvent"
id <- (entry \ "id").headOption.map(_.text.toInt) ?~! ("Entry type is not a 'rollback': %s".format(entry))
evtType <-(entry \ "type").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
author <-(entry \ "author").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
date <-(entry \ "date").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
} yield {
RollbackedEvent(id,date,evtType,author)
}
}
val rollbackInfo = for{
event <- xml
eventlogs <- event.child
entry <- (eventlogs \ "main").headOption
id <- (entry \ "id").headOption.map(_.text.toInt) ?~! ("Entry type is not a 'rollback': %s".format(entry))
evtType <-(entry \ "type").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
author <-(entry \ "author").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
date <-(entry \ "date").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
rollbackType <-(entry \ "rollbackType").headOption.map(_.text) ?~! ("Entry type is not a 'rollback': %s".format(entry))
} yield {
val target = RollbackedEvent(id,date,evtType,author)
RollbackInfo(target,rollbackType,getEvents(xml))
}
rollbackInfo.headOption
}
}
case class RollbackInfo(
target : RollbackedEvent
, rollbackType : String
, rollbacked : Seq[RollbackedEvent]
)
case class RollbackedEvent(
id : Int
, date : String
, eventType : String
, author : String
)
| jooooooon/rudder | rudder-core/src/main/scala/com/normation/rudder/services/eventlog/EventLogDetailsService.scala | Scala | agpl-3.0 | 37,377 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package internal
import scala.annotation.switch
trait Constants extends api.Constants {
self: SymbolTable =>
import definitions._
final val NoTag = 0
final val UnitTag = 1
final val BooleanTag = 2
final val ByteTag = 3
final val ShortTag = 4
final val CharTag = 5
final val IntTag = 6
final val LongTag = 7
final val FloatTag = 8
final val DoubleTag = 9
final val StringTag = 10
final val NullTag = 11
final val ClazzTag = 12
// For supporting java enumerations inside java annotations (see ClassfileParser)
final val EnumTag = 13
case class Constant(value: Any) extends ConstantApi {
import java.lang.Double.doubleToRawLongBits
import java.lang.Float.floatToRawIntBits
val tag: Int = value match {
case null => NullTag
case x: Unit => UnitTag
case x: Boolean => BooleanTag
case x: Byte => ByteTag
case x: Short => ShortTag
case x: Int => IntTag
case x: Long => LongTag
case x: Float => FloatTag
case x: Double => DoubleTag
case x: String => StringTag
case x: Char => CharTag
case x: Type => ClazzTag
case x: Symbol => EnumTag
case _ => throw new Error("bad constant value: " + value + " of class " + value.getClass)
}
def isByteRange: Boolean = isIntRange && Byte.MinValue <= intValue && intValue <= Byte.MaxValue
def isShortRange: Boolean = isIntRange && Short.MinValue <= intValue && intValue <= Short.MaxValue
def isCharRange: Boolean = isIntRange && Char.MinValue <= intValue && intValue <= Char.MaxValue
def isIntRange: Boolean = ByteTag <= tag && tag <= IntTag
def isLongRange: Boolean = ByteTag <= tag && tag <= LongTag
def isFloatRange: Boolean = ByteTag <= tag && tag <= FloatTag
def isNumeric: Boolean = ByteTag <= tag && tag <= DoubleTag
def isNonUnitAnyVal = BooleanTag <= tag && tag <= DoubleTag
def isSuitableLiteralType = BooleanTag <= tag && tag <= NullTag
def isAnyVal = UnitTag <= tag && tag <= DoubleTag
def tpe: Type = tag match {
case UnitTag => UnitTpe
case BooleanTag => BooleanTpe
case ByteTag => ByteTpe
case ShortTag => ShortTpe
case CharTag => CharTpe
case IntTag => IntTpe
case LongTag => LongTpe
case FloatTag => FloatTpe
case DoubleTag => DoubleTpe
case StringTag => StringTpe
case NullTag => NullTpe
case ClazzTag => ClassType(typeValue)
case EnumTag => EnumType(symbolValue)
}
/** We need the equals method to take account of tags as well as values.
*/
// !!! In what circumstance could `equalHashValue == that.equalHashValue && tag != that.tag` be true?
override def equals(other: Any): Boolean = other match {
case that: Constant =>
this.tag == that.tag && {
//
// Consider two `NaN`s to be identical, despite non-equality
// Consider -0d to be distinct from 0d, despite equality
//
// We use the raw versions (i.e. `floatToRawIntBits` rather than `floatToIntBits`)
// to avoid treating different encodings of `NaN` as the same constant.
// You probably can't express different `NaN` varieties as compile time
// constants in regular Scala code, but it is conceivable that you could
// conjure them with a macro.
//
this.tag match {
case NullTag =>
true
case FloatTag =>
floatToRawIntBits(value.asInstanceOf[Float]) == floatToRawIntBits(that.value.asInstanceOf[Float])
case DoubleTag =>
doubleToRawLongBits(value.asInstanceOf[Double]) == doubleToRawLongBits(that.value.asInstanceOf[Double])
case _ =>
this.value.equals(that.value)
}
}
case _ => false
}
def isNaN = value match {
case f: Float => java.lang.Float.isNaN(f)
case d: Double => java.lang.Double.isNaN(d)
case _ => false
}
def booleanValue: Boolean =
if (tag == BooleanTag) value.asInstanceOf[Boolean]
else throw new Error("value " + value + " is not a boolean")
def byteValue: Byte = tag match {
case ByteTag => value.asInstanceOf[Byte]
case ShortTag => value.asInstanceOf[Short].toByte
case CharTag => value.asInstanceOf[Char].toByte
case IntTag => value.asInstanceOf[Int].toByte
case LongTag => value.asInstanceOf[Long].toByte
case FloatTag => value.asInstanceOf[Float].toByte
case DoubleTag => value.asInstanceOf[Double].toByte
case _ => throw new Error("value " + value + " is not a Byte")
}
def shortValue: Short = tag match {
case ByteTag => value.asInstanceOf[Byte].toShort
case ShortTag => value.asInstanceOf[Short]
case CharTag => value.asInstanceOf[Char].toShort
case IntTag => value.asInstanceOf[Int].toShort
case LongTag => value.asInstanceOf[Long].toShort
case FloatTag => value.asInstanceOf[Float].toShort
case DoubleTag => value.asInstanceOf[Double].toShort
case _ => throw new Error("value " + value + " is not a Short")
}
def charValue: Char = tag match {
case ByteTag => value.asInstanceOf[Byte].toChar
case ShortTag => value.asInstanceOf[Short].toChar
case CharTag => value.asInstanceOf[Char]
case IntTag => value.asInstanceOf[Int].toChar
case LongTag => value.asInstanceOf[Long].toChar
case FloatTag => value.asInstanceOf[Float].toChar
case DoubleTag => value.asInstanceOf[Double].toChar
case _ => throw new Error("value " + value + " is not a Char")
}
def intValue: Int = tag match {
case ByteTag => value.asInstanceOf[Byte].toInt
case ShortTag => value.asInstanceOf[Short].toInt
case CharTag => value.asInstanceOf[Char].toInt
case IntTag => value.asInstanceOf[Int]
case LongTag => value.asInstanceOf[Long].toInt
case FloatTag => value.asInstanceOf[Float].toInt
case DoubleTag => value.asInstanceOf[Double].toInt
case _ => throw new Error("value " + value + " is not an Int")
}
def longValue: Long = tag match {
case ByteTag => value.asInstanceOf[Byte].toLong
case ShortTag => value.asInstanceOf[Short].toLong
case CharTag => value.asInstanceOf[Char].toLong
case IntTag => value.asInstanceOf[Int].toLong
case LongTag => value.asInstanceOf[Long]
case FloatTag => value.asInstanceOf[Float].toLong
case DoubleTag => value.asInstanceOf[Double].toLong
case _ => throw new Error("value " + value + " is not a Long")
}
def floatValue: Float = tag match {
case ByteTag => value.asInstanceOf[Byte].toFloat
case ShortTag => value.asInstanceOf[Short].toFloat
case CharTag => value.asInstanceOf[Char].toFloat
case IntTag => value.asInstanceOf[Int].toFloat
case LongTag => value.asInstanceOf[Long].toFloat
case FloatTag => value.asInstanceOf[Float]
case DoubleTag => value.asInstanceOf[Double].toFloat
case _ => throw new Error("value " + value + " is not a Float")
}
def doubleValue: Double = tag match {
case ByteTag => value.asInstanceOf[Byte].toDouble
case ShortTag => value.asInstanceOf[Short].toDouble
case CharTag => value.asInstanceOf[Char].toDouble
case IntTag => value.asInstanceOf[Int].toDouble
case LongTag => value.asInstanceOf[Long].toDouble
case FloatTag => value.asInstanceOf[Float].toDouble
case DoubleTag => value.asInstanceOf[Double]
case _ => throw new Error("value " + value + " is not a Double")
}
/** Convert constant value to conform to given type.
*/
def convertTo(pt: Type): Constant = {
val target = pt.typeSymbol
if (target == tpe.typeSymbol)
this
else if (target == ByteClass && isByteRange)
Constant(byteValue)
else if (target == ShortClass && isShortRange)
Constant(shortValue)
else if (target == CharClass && isCharRange)
Constant(charValue)
else if (target == IntClass && isIntRange)
Constant(intValue)
else if (target == LongClass && isLongRange)
Constant(longValue)
else if (target == FloatClass && isFloatRange)
Constant(floatValue)
else if (target == DoubleClass && isNumeric)
Constant(doubleValue)
else
null
}
def stringValue: String =
if (value == null) "null"
else if (tag == ClazzTag) signature(typeValue)
else value.toString()
def escapedChar(ch: Char): String = (ch: @switch) match {
case '\\b' => "\\\\b"
case '\\t' => "\\\\t"
case '\\n' => "\\\\n"
case '\\f' => "\\\\f"
case '\\r' => "\\\\r"
case '"' => "\\\\\\""
case '\\'' => "\\\\\\'"
case '\\\\' => "\\\\\\\\"
case _ => if (ch.isControl) "\\\\u%04X".format(ch.toInt) else String.valueOf(ch)
}
def escapedStringValue: String = {
def escape(text: String): String = text flatMap escapedChar
tag match {
case NullTag => "null"
case StringTag => "\\"" + escape(stringValue) + "\\""
case ClazzTag =>
def show(tpe: Type) = "classOf[" + signature(tpe) + "]"
typeValue match {
case ErasedValueType(clazz, underlying) =>
// A note on tpe_* usage here:
//
// We've intentionally erased the type arguments to the value class so that different
// instantiations of a particular value class that erase to the same underlying type
// don't result in spurious bridges (e.g. run/t6385.scala). I don't think that matters;
// printing trees of `classOf[ValueClass[String]]` shows `classOf[ValueClass]` at phase
// erasure both before and after the use of `tpe_*` here.
show(clazz.tpe_*)
case _ => show(typeValue)
}
case CharTag => "'" + escapedChar(charValue) + "'"
case LongTag => longValue.toString() + "L"
case EnumTag => symbolValue.name.toString()
case _ => String.valueOf(value)
}
}
def typeValue: Type = value.asInstanceOf[Type]
def symbolValue: Symbol = value.asInstanceOf[Symbol]
override def hashCode: Int = {
import scala.util.hashing.MurmurHash3._
val seed = 17
var h = seed
h = mix(h, tag.##) // include tag in the hash, otherwise 0, 0d, 0L, 0f collide.
val valueHash = tag match {
case NullTag => 0
// We could just use value.hashCode here, at the cost of a collition between different NaNs
case FloatTag => java.lang.Integer.hashCode(floatToRawIntBits(value.asInstanceOf[Float]))
case DoubleTag => java.lang.Long.hashCode(doubleToRawLongBits(value.asInstanceOf[Double]))
case _ => value.hashCode()
}
h = mix(h, valueHash)
finalizeHash(h, length = 2)
}
}
object Constant extends ConstantExtractor
implicit val ConstantTag = ClassTag[Constant](classOf[Constant])
}
| martijnhoekstra/scala | src/reflect/scala/reflect/internal/Constants.scala | Scala | apache-2.0 | 11,655 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import com.google.inject.Inject
import play.api.Configuration
import uk.gov.hmrc.http.cache.client.SessionCache
import uk.gov.hmrc.play.bootstrap.config.ServicesConfig
import uk.gov.hmrc.http.HttpClient
class AmlsSessionCache @Inject()(val configuration: Configuration,
val httpClient: HttpClient) extends ServicesConfig(configuration) with SessionCache {
override def http = httpClient
override def defaultSource = getConfString("amls-frontend.cache", "amls-frontend")
override def baseUri = baseUrl("cachable.session-cache")
override def domain = getConfString("cachable.session-cache.domain", throw new Exception(s"Could not find config 'cachable.session-cache.domain'"))
}
class BusinessCustomerSessionCache @Inject()(val configuration: Configuration,
val httpClient: HttpClient) extends ServicesConfig(configuration) with SessionCache {
override def http = httpClient
override def defaultSource: String = getConfString("cachable.session-cache.review-details.cache","business-customer-frontend")
override def baseUri = baseUrl("cachable.session-cache")
override def domain = getConfString("cachable.session-cache.domain", throw new Exception(s"Could not find config 'cachable.session-cache.domain'"))
}
| hmrc/amls-frontend | app/config/SessionCache.scala | Scala | apache-2.0 | 1,927 |
package io.udash.testing
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
trait UdashSharedTest extends AnyWordSpec with Matchers with BeforeAndAfterAll {
}
| UdashFramework/udash-core | utils/src/test/scala/io/udash/testing/UdashSharedTest.scala | Scala | apache-2.0 | 237 |
package rssreader.core
import java.time.LocalDateTime
import scala.io.Source
import rssreader.utils.tests.TestSpec
class FeedSpec extends TestSpec {
behavior of "Parsing valid RSS"
it should "correctly parse fields" in {
val result = Feed.parse(fileExampleFeed)
result shouldBe 'good
result.get should have(
'title ("Example Feed"),
'link ("http://www.example.com"),
'description ("The latest stories from ..."),
'items (List(FeedItem("Item 1", "Link 1", "Description 1", None, None, None, None))),
'pubDate (Some(LocalDateTime.of(2015, 7, 1, 15, 0))),
'image (Some(FeedImage("http://example.com/foobar.png", "Title Here", "http://example.com/foo", None, None)))
)
}
it should "ignore optional fields when empty" in {
val result = Feed.parse(fileMinimalFeed)
result shouldBe 'good
result.get should have(
'title ("Minimal Feed"),
'link ("http://www.example.com"),
'description ("The latest stories from ..."),
'items (Nil),
'pubDate (None),
'image (None)
)
}
it should "create from url pointing to xml" in {
Feed.parse(fileExampleFeed) shouldBe 'good
}
it should "create from xml string" in {
val str = Source.fromURL(fileExampleFeed).mkString
Feed.parse(str) shouldBe 'good
}
it should "parse all child items" in {
val result = Feed.parse(fileMultipleItemFeed)
result.get.items should have size 2
}
behavior of "Parsing invalid RSS"
it should "fail when empty title field" in {
val str =
"""
|<rss>
| <channel>
| <title></title>
| <link>http://www.example.com</link>
| <description>The latest stories from ...</description>
| </channel>
|</rss>
""".stripMargin
Feed.parse(str) shouldBe 'bad
}
it should "fail when empty link field" in {
val str =
"""
|<rss>
| <channel>
| <title>Example Feed</title>
| <link></link>
| <description>The latest stories from ...</description>
| </channel>
|</rss>
""".stripMargin
Feed.parse(str) shouldBe 'bad
}
it should "fail when empty description field" in {
val str =
"""
|<rss>
| <channel>
| <title>Example Feed</title>
| <link>http://www.example.com</link>
| <description></description>
| </channel>
|</rss>
""".stripMargin
Feed.parse(str) shouldBe 'bad
}
it should "fail when an item cannot be parsed" in {
val str =
"""
|<rss>
| <channel>
| <title>Example Feed</title>
| <link>http://www.example.com</link>
| <description>The latest stories from ...</description>
| <item>
| <title>Item with no link or description</title>
| </item>
| </channel>
|</rss>
""".stripMargin
val result = Feed.parse(str)
result shouldBe 'bad
result.fold(
good => fail(),
bad => bad should have size 2
)
}
it should "accumulate errors when validating" in {
val str =
"""
|<rss>
| <channel>
| <title></title>
| <link></link>
| <description></description>
| </channel>
|</rss>
""".stripMargin
val result = Feed.parse(str)
result shouldBe 'bad
result.fold(
good => fail(),
bad => bad should have size 3
)
}
}
| Dev25/RSSReader | src/test/scala/rssreader/core/FeedSpec.scala | Scala | mit | 3,528 |
package gh.test.gh2011b.payload
import gh2011b.models.GistEventPayload
import net.liftweb.json._
import org.scalatest.{Matchers, FlatSpec}
class GistEventPayloadTest extends FlatSpec with Matchers
{
"A valid GistEvent payload" must "be correctly parsed" in {
val json = parse(
"""
| {
| "desc":"",
| "name":"gist: 991643",
| "id":991643,
| "url":"https://gist.github.com/991643",
| "action":"update"
|
|}
""".stripMargin)
GistEventPayload(json) shouldBe 'defined
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh2011b/payload/GistEventPayloadTest.scala | Scala | mit | 609 |
/**
* Created on 2011/04/16
* Copyright (c) 2010-2011, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.tads.html
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import java.io._
@RunWith(classOf[JUnitRunner])
class TokenizerSpec extends FlatSpec with ShouldMatchers {
"Tokenizer" should "emit start tokens" in {
val reader = new StringReader("<html><body onload=\\"bla\\">")
val tokenizer = new Tokenizer(reader)
tokenizer.nextToken should be (StartTag("html", Map()))
tokenizer.nextToken should be (StartTag("body", Map("onload" -> "bla")))
}
"Tokenizer" should "emit end tokens" in {
val reader = new StringReader("</body></html>")
val tokenizer = new Tokenizer(reader)
tokenizer.nextToken should be (EndTag("body"))
tokenizer.nextToken should be (EndTag("html"))
}
"Tokenizer" should "emit PCData" in {
val reader = new StringReader("some text")
val tokenizer = new Tokenizer(reader)
tokenizer.nextToken should be (PCData("some text"))
tokenizer.nextToken should be (EOF)
}
"Tokenizer" should "emit EOF" in {
val reader = new StringReader("")
val tokenizer = new Tokenizer(reader)
tokenizer.nextToken should be (EOF)
}
}
| logicmoo/zmpp2 | zmpp-htmltads/src/test/scala/org/zmpp/tads/html/TokenizerTest.scala | Scala | bsd-3-clause | 2,784 |
package com.github.jackcviers
import akka.actor.{Actor, ActorSystem, Props}
class BadShakespearean extends Actor {
def receive = {
case "Good Morning" β println("Him: Forsooth 'tis the 'morn, but mourneth for thou doest I do!")
case "You're terrible" β println("Him: Yup")
}
}
class Wood extends Actor {
def receive = {
case _ β throw new Exception("Wood cannot hear you.")
}
}
class Printing extends Actor {
def receive = {
case msg β println(msg)
}
}
class Mine extends Actor {
def receive = {
case "Hello" β println("Him: Hi")
case 42 β println("Him: I don't know the question. Go ask the Earth Mark II.")
case s: String β println(s"Him: You sent me a string: $s.")
case Alpha(Beta(b1, Gamma(g1)), Beta(b2, Gamma(g2))) β println(s"Him: beta1: $b1, beta2: $b2, gamma1: $g1, gamma2: $g2")
case _ β println("Him: Huh?")
}
}
object BadShakespeareanMain {
val system = ActorSystem("BadShakespearean")
val actor = system.actorOf(Props[BadShakespearean], "Shake")
val wood = system.actorOf(Props[Wood], "Wood")
val printing = system.actorOf(Props[Printing], "Printing")
val mine = system.actorOf(Props[Mine], "Mine")
def send(msg: String) {
println(s"Me: $msg")
actor ! msg
Thread.sleep(100)
}
def sendWood(msg: String) {
println(s"Me: $msg")
wood ! msg
}
def sendPrint() {
for (a β 1 to 10) {
printing ! a
}
}
def sendMine() {
println("Me: Hello")
mine ! "Hello"
Thread.sleep(100)
println("Me: 42")
mine ! 42
Thread.sleep(100)
println("Me: Alpha!")
mine ! Alpha(b1 = Beta(b = "A", g = Gamma(g = "Z")), b2 = Beta(b = "B", g = Gamma(g = "Y")))
Thread.sleep(100)
println("Me: Gamma(How much wood could a woodchuck chuck if a woodchuck could chuck wood.)")
mine ! Gamma(g = "How much wood could a woodchuck chuck if a woodchuck could chuck wood.")
}
def main(args: Array[String]) {
send("Good Morning")
sendPrint()
send("You're terrible")
sendWood("If a tree falls in a forest, does it make a sound?")
sendMine()
system.shutdown()
}
}
| jackcviers/learning-akka | src/main/scala/com/github/jackcviers/actors.scala | Scala | apache-2.0 | 2,138 |
package io.scalac.frees.math
object Formulas {
import freestyle._
import freestyle.implicits._
def `(a+b)^2`[F[_]](a: Int, b: Int)
(implicit A: AllTheMath[F]): FreeS[F, Int] = {
import A._
for {
s <- basic.add(a, b)
r <- high.power(s, 2)
} yield r
}
def `a^2+2ab+b^2`[F[_]](a: Int, b: Int)(implicit A: AllTheMath[F]) = {
import A._
for {
aa <- high.power(a, 2)
ab <- basic.multiply(a, b)
ab2 <- basic.multiply(2, ab)
bb <- high.power(b, 2)
r1 <- basic.add(aa, ab2)
r <- basic.add(r1, bb)
} yield r
}
}
| LGLO/freestyle-login | src/main/scala/io/scalac/frees/math/Formulas.scala | Scala | apache-2.0 | 594 |
package stealthnet.scala.network.protocol.exceptions
import stealthnet.scala.util.log.LoggingContext
/**
* Protocol exception.
*/
class ProtocolException(
// scalastyle:off null
msg: String = null,
cause: Throwable = null,
// scalastyle:on null
override val loggerContext: LoggingContext#LogContext = Nil
) extends Exception(msg, cause)
with LoggingContext
| suiryc/StealthNet | core/src/main/scala/stealthnet/scala/network/protocol/exceptions/ProtocolException.scala | Scala | gpl-3.0 | 373 |
/****************************************************************************
* *
* (C) Copyright 2014 by Peter L Jones *
* [email protected] *
* *
* This file is part of jTrapKATEditor. *
* *
* jTrapKATEditor is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 3 of the License, or *
* (at your option) any later version. *
* *
* jTrapKATEditor is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with jTrapKATEditor. If not, see http://www.gnu.org/licenses/ *
* *
* ************************************************************************ *
* ** ** *
* ** Additionally, this file is also released under the same terms as ** *
* ** the Scala Swing library and it may bw included in that software ** *
* ** rather than jTrapKATEditor. ** *
* ** ** *
* ************************************************************************ *
* *
****************************************************************************/
package info.drealm.scala.spinner
import swing._
import javax.swing.{ JSpinner, SpinnerModel }
class Spinner(_model: SpinnerModel, _name: String = "", _tip: String = null, _label: scala.swing.Label = null) extends Component with Publisher {
def this(_model: SpinnerModel, _name: String, label: scala.swing.Label) = this(_model, _name, _label = label)
override lazy val peer = new javax.swing.JSpinner(_model) with SuperMixin
name = _name
if (_tip != null) tooltip = _tip
if (_label != null) {
// Uhhhhh, right...
_label.peer.setLabelFor(peer.asInstanceOf[java.awt.Component])
if (_label.tooltip == null) _label.tooltip = tooltip
}
def value: Any = peer.getValue
def value_=(o: Any) { peer.setValue(o) }
def model: SpinnerModel = peer.getModel()
def model_=(m: SpinnerModel) { peer.setModel(m) }
peer.addChangeListener(Swing.ChangeListener { e =>
publish(new event.ValueChanged(this))
})
} | pljones/jTrapKATEditor | src/main/scala/info/drealm/scala/spinner/Spinner.scala | Scala | gpl-3.0 | 3,302 |
package se.uu.farmbio.sg.exceptions
class ParseSmilesException (message: String = null, cause: Throwable = null) extends RuntimeException(message, cause) {
} | mcapuccini/spark-cheminformatics | sg/src/main/scala/se/uu/farmbio/sg/exceptions/ParseSmilesException.scala | Scala | apache-2.0 | 161 |
package org.moe.interpreter.guts
import org.moe.runtime._
import org.moe.ast._
trait Utils {
def getCurrentPackage (env: MoeEnvironment): MoePackage = env.getCurrentPackage.getOrElse(
throw new MoeErrors.PackageNotFound("__PACKAGE__")
)
def getCurrentClass (env: MoeEnvironment): MoeClass = env.getCurrentClass.getOrElse(
throw new MoeErrors.ClassNotFound("__CLASS__")
)
def callMethod(invocant: MoeObject, method: String, args: List[MoeObject], klass: String = null) =
invocant.callMethod(
invocant.getAssociatedClass.getOrElse(
throw new MoeErrors.ClassNotFound(Option(klass).getOrElse(invocant.getClassName))
).getMethod(method).getOrElse(
throw new MoeErrors.MethodNotFound("method " + method + "> missing in class " + Option(klass).getOrElse(invocant.getClassName))
),
args
)
def zipVars (r: MoeRuntime, names: List[String], expressions: List[MoeObject], f: ((String, MoeObject)) => Unit): Unit = {
if (expressions.isEmpty) {
names.foreach(f(_, r.NativeObjects.getUndef))
} else if (names.isEmpty) {
()
} else {
f(names.head, expressions.headOption.getOrElse(r.NativeObjects.getUndef))
zipVars(r, names.tail, expressions.tail, f)
}
}
// Throw an exception if a variable isn't closed over at declaration time
// This is to prevent variables in the same env but after declaration getting
// sucked into the closure and causing unexpected behavior.
def throwForUndeclaredVars(env: MoeEnvironment, signature: MoeSignature, body: StatementsNode): Unit = {
var declared: Set[String] = signature.getParams.map(_.getName).toSet
walkAST(
body,
{ ast: AST =>
ast match {
case VariableDeclarationNode(varname, _) =>
declared += varname
case VariableAccessNode(varname) =>
if (!env.has(varname) && !declared(varname) && !env.isSpecialMarker(varname)) {
throw new MoeErrors.VariableNotFound(varname)
}
case _ => Unit
}
}
)
}
// XXX - this no longer captures all the AST nodes anymore
def walkAST(ast: AST, callback: (AST) => Unit): Unit = {
callback(ast)
ast match {
case CompilationUnitNode(body) => walkAST(body, callback)
case ScopeNode(body) => walkAST(body, callback)
case StatementsNode(nodes) => nodes.foreach(walkAST(_, callback))
case PairLiteralNode(key, value) => {
walkAST(key, callback)
walkAST(value, callback)
}
case ArrayLiteralNode(values) => values.foreach(walkAST(_, callback))
case HashLiteralNode(map) => map.foreach(walkAST(_, callback))
case PrefixUnaryOpNode(receiver, _) => walkAST(receiver, callback)
case PostfixUnaryOpNode(receiver, _) => walkAST(receiver, callback)
case BinaryOpNode(lhs, _, rhs) => {
walkAST(lhs, callback)
walkAST(rhs, callback)
}
case ClassDeclarationNode(name, superclass, body, _, _) => walkAST(body, callback)
case PackageDeclarationNode(_, body, _, _) => walkAST(body, callback)
case SubMethodDeclarationNode(_, _, body) => walkAST(body, callback)
case MethodDeclarationNode(_, _, body) => walkAST(body, callback)
case SubroutineDeclarationNode(_, _, body, _) => walkAST(body, callback)
case AttributeAssignmentNode(name, expression) => walkAST(expression, callback)
case AttributeDeclarationNode(name, expression) => walkAST(expression, callback)
case VariableAssignmentNode(name, expression) => walkAST(expression, callback)
case VariableDeclarationNode(name, expression) => walkAST(expression, callback)
case HashElementAccessNode(hashName, keys) => keys.foreach(walkAST(_, callback))
case ArrayElementAccessNode(arrayName, indices) => indices.foreach(walkAST(_, callback))
// ^ Maybe we need to walk VariableAccessNode(arrayName)? Not sure.
case MethodCallNode(invocant, method_name, args) => {
walkAST(invocant, callback)
args.foreach(walkAST(_, callback))
}
case SubroutineCallNode(method_name, args) => {
args.foreach(walkAST(_, callback))
}
case IfNode(if_node) => {
walkAST(if_node, callback)
}
case UnlessNode(unless_node) => {
walkAST(unless_node, callback)
}
case TryNode(body, catch_nodes, finally_nodes) => {
walkAST(body, callback)
catch_nodes.foreach(walkAST(_, callback))
finally_nodes.foreach(walkAST(_, callback))
}
case CatchNode(_, _, body) => {
walkAST(body, callback)
}
case FinallyNode(body) => walkAST(body, callback)
case WhileNode(condition, body) => {
walkAST(condition, callback)
walkAST(body, callback)
}
case DoWhileNode(condition, body) => {
walkAST(body, callback)
walkAST(condition, callback)
}
case ForeachNode(topic, list, body) => {
walkAST(topic, callback)
walkAST(list, callback)
walkAST(body, callback)
}
case ForNode(init, condition, update, body) => {
walkAST(init, callback)
walkAST(condition, callback)
walkAST(update, callback)
walkAST(body, callback)
}
case _ => return
}
}
}
| MoeOrganization/moe | src/main/scala/org/moe/interpreter/guts/Utils.scala | Scala | mit | 5,381 |
Subsets and Splits