code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package controllers import java.nio.file.Paths import javax.inject.{Inject, Named} import controllers.GitHubUser._ import play.api.Logger import play.api.i18n.MessagesApi import play.api.libs.json.Json import play.api.libs.ws.WSClient import play.api.mvc.{Action, Controller} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal case class GUser(login: String, name: Option[String], avatar_url: String) case class AccessToken(access_token: String) class GitHubUser @Inject()(val messagesApi: MessagesApi, @Named("Secret") secret: String) (implicit ec: ExecutionContext, ws: WSClient) extends Controller with GitHubServer { private implicit val guserReads = Json.reads[GUser] private implicit val tokenReads = Json.reads[AccessToken] def getLogin = Action { Redirect(ws.url("https://github.com/login/oauth/authorize") .withQueryString( "client_id" -> clientId, "state" -> secret ) .uri .toString) } def githubCallback(code: String, state: String) = Action.async { def checkSecret(state: String) = { if (state == secret) Future.successful("Ok") else Future.failed(new RuntimeException("GitHub 'state' value is not recognized")) } def access(code: String): Future[String] = { getToken(code).map(response => response.json.validate[AccessToken].asOpt.get.access_token) } def fetchUser(token: String) = query(token, Paths.get("user")) .map(response => response.json.validate[GUser].asOpt.get) (for { _ <- checkSecret(state) token <- access(code) userInfo <- fetchUser(token) } yield { Redirect(routes.DevGymApp.index).withSession( loginName -> userInfo.login, userName -> userInfo.name.getOrElse(""), avatarUrl -> userInfo.avatar_url) }) recover { case NonFatal(e) => Logger.error("GitHub OAuth callback failed.", e) Redirect(routes.DevGymApp.index).flashing(flashToUser -> messagesApi(cannotLoginViaGitHub)) } } } object GitHubUser { val cannotLoginViaGitHub = "cannotLoginViaGitHub" }
DmytroOrlov/devgym
server/app/controllers/GitHubUser.scala
Scala
apache-2.0
2,137
/** * Copyright 2013 Gianluca Amato * * This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains * JANDOM is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * JANDOM is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty ofa * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with JANDOM. If not, see <http://www.gnu.org/licenses/>. */ package it.unich.jandom.domains.numerical.ppl import com.google.caliper.SimpleBenchmark import it.unich.jandom.domains.numerical._ import parma_polyhedra_library._ /** * This benchmark compare some operations on the interval domain. This * shows that PPL is very slow in Jandom, and Reflexive PPL is even * slower. * @author Gianluca Amato <[email protected]> * */ class BoxDoubleBenchmark extends SimpleBenchmark { private val numvars = 100 private val numpoints = 10 private val BoxDouble = BoxDoubleDomain() PPLInitializer def timePPL(reps: Int) { for (iter <- 1 to reps) { // create an empty box val db = new Double_Box(numvars, Degenerate_Element.EMPTY) // initialize a list of linear form (one for each variable) val vars = new Array[Linear_Expression_Variable](numvars) for (v <- 0 until numvars) vars(v) = new Linear_Expression_Variable(new Variable(v)) // initialize the linear form x_1 + ... + x_n val diagonal = vars.reduceRight[Linear_Expression](_.sum(_)) val gs = new Generator_System() for (i <- 1 to numpoints) { val point = Generator.point(diagonal times (new Coefficient(i)), new Coefficient(1)) gs.clear gs.add(point) val point_box = new Double_Box(gs) db.upper_bound_assign(point_box) } } } def timePPL2(reps: Int) { for (iter <- 1 to reps) { val db = new Double_Box(numvars, Degenerate_Element.EMPTY) val v0 = new Variable(0) val vlast = new Variable(numvars - 1) val expr = (new Linear_Expression_Variable(v0)) sum (new Linear_Expression_Variable(vlast)) val gs = new Generator_System() val point = Generator.point(expr, new Coefficient(1)) gs.add(point) db.upper_bound_assign(new Double_Box(gs)) val denominator = new Coefficient(1) for (i <- 1 to numpoints) { val dbnew = new Double_Box(db) dbnew.affine_image(v0, expr, denominator) db.upper_bound_assign(dbnew) } } } def timeJandomNoPPLOptimized(reps: Int) { for (iter <- 1 to reps) { var db = BoxDouble.bottom(numvars) for (i <- 1 to numpoints) { val point = Array.fill(numvars)(i.toDouble) db = db union BoxDouble(point) } println(db) } } def timeJandomNoPPL(reps: Int) { for (iter <- 1 to reps) { var db = BoxDouble.bottom(numvars) val zero = Array.fill(numvars)(0.0) val full = BoxDouble.top(numvars) for (i <- 1 to numpoints) { val point = (0 until numvars).foldLeft(full) { (box, v) => box.linearAssignment(v, i.toDouble) } db = db union point } } } def timeJandomPPL(reps: Int) { val PPLBoxDouble = PPLBoxDoubleDomain() for (iter <- 1 to reps) { var db = PPLBoxDouble.bottom(numvars) val full = PPLBoxDouble.top(numvars) for (i <- 1 to numpoints) { val point = (0 until numvars).foldLeft(full) { (box, v) => box.linearAssignment(v, i.toDouble) } db = db union point } } } def timeJandomPPLReflexive(reps: Int) { for (iter <- 1 to reps) { val domain = PPLDomain[Octagonal_Shape_double]() var db = domain.bottom(numvars) val full = domain.top(numvars) for (i <- 1 to numpoints) { val point = (0 until numvars).foldLeft(full) { (box, v) => box.linearAssignment(v, i.toDouble) } db = db union point } } } def timeJandomPPLMacro(reps: Int) { for (iter <- 1 to reps) { // we explicitly type domain in order to avoid generation of existential types. val domain: NumericalDomain = PPLDomainMacro[Double_Box] var db = domain.bottom(numvars) val zero = Array.fill(numvars)(0.0) val full = domain.top(numvars) for (i <- 1 to numpoints) { val point = (0 until numvars).foldLeft(full) { (box, v) => box.linearAssignment(v, i.toDouble) } db = db union point } } } }
rubino22/JDBeta
extended/src/test/ppl/it/unich/jandom/domains/numerical/ppl/BoxDoubleBenchmark.scala
Scala
lgpl-3.0
4,775
/* * Copyright 2008 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions * and limitations under the License. */ package net.liftweb.http.rest import _root_.net.liftweb._ import util._ import Helpers._ import _root_.scala.xml.{NodeSeq, Text, Elem, UnprefixedAttribute, Null, Node} /** * Mix this trait into your REST service provider to convert between different * response and a LiftResponse */ trait XMLApiHelper { implicit def boolToResponse(in: Boolean): LiftResponse = buildResponse(in, Empty, <xml:group/>) implicit def canBoolToResponse(in: Box[Boolean]): LiftResponse = buildResponse(in openOr false, in match { case Failure(msg, _, _) => Full(Text(msg)) case _ => Empty }, <xml:group/>) implicit def pairToResponse(in: (Boolean, String)): LiftResponse = buildResponse(in._1, Full(Text(in._2)), <xml:group/>) protected def operation: Option[NodeSeq] = (for (req <- S.request) yield req.path.partPath match { case _ :: name :: _ => name case _ => "" }).map(Text) implicit def nodeSeqToResponse(in: NodeSeq): LiftResponse = buildResponse(true, Empty, in) implicit def listElemToResponse(in: Seq[Node]): LiftResponse = buildResponse(true, Empty, in) implicit def canNodeToResponse(in: Box[NodeSeq]): LiftResponse = in match { case Full(n) => buildResponse(true, Empty, n) case Failure(msg, _, _) => buildResponse(false, Full(Text(msg)), Text("")) case _ => buildResponse(false, Empty, Text("")) } implicit def putResponseInBox(in: LiftResponse): Box[LiftResponse] = Full(in) /** * The method that wraps the outer-most tag around the body */ def createTag(in: NodeSeq): Elem /** * The attribute name for success */ def successAttrName = "success" /** * The attribute name for operation */ def operationAttrName = "operation" /** * The attribute name for any msg attribute */ def msgAttrName = "msg" /** * Build the Response based on Success, an optional message * and the body */ protected def buildResponse(success: Boolean, msg: Box[NodeSeq], body: NodeSeq): LiftResponse = XmlResponse(createTag(body) % (successAttrName -> success) % (new UnprefixedAttribute(operationAttrName, operation, Null)) % (new UnprefixedAttribute(msgAttrName, msg, Null))) }
andreum/liftweb
lift/src/main/scala/net/liftweb/http/rest/XMLApiHelper.scala
Scala
apache-2.0
2,859
package text.vector import text.parser.{SentenceSplitter, Tokenizer} import scala.collection.mutable /** * @author ynupc * Created on 2016/05/22 */ object FrequencyVectorGenerator extends VectorGenerator[FrequencyVector] { override def getVectorFromText(text: String): FrequencyVector = { getVector( //Seq[Seq[(String, Int)]]からList[Set[(String, Int)]]を経由してSeq[(String, Int)]に変換した。 { for (sentence <- SentenceSplitter.split(text)) yield { getVectorFromSentence(sentence).vector.toSet } }.toList.flatten ) } override def getVectorFromSentence(sentence: String): FrequencyVector = { getVector( Tokenizer.tokenize(Option(sentence)).zipAll(Seq[Int](), "", 1) ) } def getVector(terms: Seq[(String, Int)]): FrequencyVector = { val vector: mutable.Map[String, Int] = mutable.Map[String, Int]() def add(term: String, frequency: Int): Unit = { if (vector contains term) { vector(term) += frequency } else { vector(term) = frequency } } terms foreach { case (term, frequency) => add(term, frequency) } val frequencyVector: FrequencyVector = new FrequencyVector(vector) VectorType.get match { case VectorType.Binary => frequencyVector. toBinaryVector. toFrequencyVector case VectorType.Frequency | VectorType.None => frequencyVector } } }
ynupc/scalastringcourseday6
src/main/scala/text/vector/FrequencyVectorGenerator.scala
Scala
apache-2.0
1,493
/* Copyright 2009-2011 Jay Conrod * * This file is part of Tungsten. * * Tungsten is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 2 of * the License, or (at your option) any later version. * * Tungsten is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with Tungsten. If not, see * <http://www.gnu.org/licenses/>. */ package tungsten import scala.util.matching.Regex import java.io._ import Utilities._ final object Loader { def loadAndLinkProgram(file: File) = { val modules = loadModuleAndDependencies(file, ModuleType.PROGRAM, Version.MIN, Version.MAX, Nil) val program = Linker.linkModules(modules, "default", ModuleType.PROGRAM, Version.MIN, None, Nil, Nil) val errors = program.validateProgram if (!errors.isEmpty) { throw new IOException("validation errors in program %s:\\n%s". format(file, errors.mkString("\\n"))) } program } def loadDependenciesForModule(module: Module, directory: File, alreadyLoaded: List[Module]): List[Module] = { assert(alreadyLoaded.contains(module)) (alreadyLoaded /: module.dependencies) { (loaded, dependency) => if (loaded.exists(_.name == dependency.name)) loaded else { val libraryFile = findModuleFile(dependency, directory, module.searchPaths) loadModuleAndDependencies(libraryFile, ModuleType.LIBRARY, dependency.minVersion, dependency.maxVersion, loaded) } } } def loadModuleAndDependencies(file: File, ty: ModuleType, minVersion: Version, maxVersion: Version, alreadyLoaded: List[Module]): List[Module] = { val module = ModuleIO.readBinary(file) if (alreadyLoaded.exists(_.name == module.name)) alreadyLoaded else { if (module.ty != ty) throw new IOException("file %s was supposed to be a %s module".format(file.getName, ty)) if (module.version < minVersion) { throw new IOException("library %s has version %s which is less than the minimum, %s". format(file, module.version, minVersion)) } if (module.version > maxVersion) { throw new IOException("library %s has version %s which is greater than the maximum, %s". format(file, module.version, maxVersion)) } val errors = module.validate if (!errors.isEmpty) { throw new IOException("validation errors in module %s:\\n%s". format(file, errors.mkString("\\n"))) } loadDependenciesForModule(module, file.getParentFile, module :: alreadyLoaded) } } def findModuleFile(dependency: ModuleDependency, directory: File, searchPaths: List[File]): File = { val files = searchPaths.view.flatMap { path => val dir = if (!path.isAbsolute) new File(directory, path.toString) else path val contents = dir.listFiles if (contents != null) contents.view else Nil.view } def isMatchingLibrary(file: File): Boolean = { file.getName match { case LibraryName(nameStr, versionStr) => { try { val libName = symbolFromString(nameStr) val libVersion = parseVersion(versionStr) libName == dependency.name && dependency.minVersion <= libVersion && libVersion <= dependency.maxVersion } catch { case _ => false } } case _ => false } } files.find(isMatchingLibrary _) match { case Some(file) => file case None => throw new IOException("could not find library: " + dependency) } } val LibraryName = new Regex("([A-Za-z0-9_$.]+)-([0-9.]+).wl") }
jayconrod/tungsten
core/src/main/scala/tungsten/Loader.scala
Scala
gpl-2.0
4,741
package sangria.execution.deferred import java.util.concurrent.atomic.AtomicInteger import sangria.ast import sangria.execution.{DeferredWithInfo, Executor} import sangria.macros._ import sangria.schema._ import sangria.util.{FutureResultSupport, Pos} import sangria.util.SimpleGraphQlSupport._ import scala.concurrent.{ExecutionContext, Future} import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec class DeferredResolverSpec extends AnyWordSpec with Matchers with FutureResultSupport { private[this] def deferredResolver(implicit ec: ExecutionContext): Unit = { case class LoadCategories(ids: Seq[String]) extends Deferred[Seq[String]] lazy val CategoryType: ObjectType[Unit, String] = ObjectType( "Category", () => fields[Unit, String]( Field("name", StringType, resolve = c => s"Cat ${c.value}"), Field("descr", StringType, resolve = c => s"Cat ${c.value} descr"), Field("self", CategoryType, resolve = c => c.value), Field("selfFut", CategoryType, resolve = c => Future(c.value)), Field( "selfFutComplex", CategoryType, complexity = Some((_, _, _) => 1000), resolve = c => Future(c.value)), Field( "children", ListType(CategoryType), arguments = Argument("count", IntType) :: Nil, resolve = c => LoadCategories((1 to c.arg[Int]("count")).map(i => s"${c.value}.$i")) ), Field( "childrenComplex", ListType(CategoryType), complexity = Some((_, _, _) => 1000), arguments = Argument("count", IntType) :: Nil, resolve = c => LoadCategories((1 to c.arg[Int]("count")).map(i => s"${c.value}.$i")) ), Field( "childrenFut", ListType(CategoryType), arguments = Argument("count", IntType) :: Nil, resolve = c => DeferredFutureValue( Future.successful(LoadCategories((1 to c.arg[Int]("count")).map(i => s"${c.value}.$i")))) ) ) ) val QueryType = ObjectType( "Query", fields[Unit, Unit]( Field( "root", CategoryType, resolve = _ => DeferredValue(LoadCategories(Seq("root"))).map(_.head)), Field( "rootFut", CategoryType, resolve = _ => DeferredFutureValue(Future.successful(LoadCategories(Seq("root")))).map(_.head)), Field( "fail1", OptionType(CategoryType), resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head)), Field( "fail2", OptionType(CategoryType), resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head)) ) ) val MutationType = ObjectType( "Mutation", fields[Unit, Unit]( Field( "root", OptionType(CategoryType), resolve = _ => DeferredValue(LoadCategories(Seq("root"))).map(_.head)), Field( "fail1", OptionType(CategoryType), resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head)), Field( "fail2", OptionType(CategoryType), resolve = _ => DeferredValue(LoadCategories(Seq("fail"))).map(_.head)) ) ) class MyDeferredResolver extends DeferredResolver[Any] { val callsCount = new AtomicInteger(0) val valueCount = new AtomicInteger(0) override val includeDeferredFromField : Option[(Field[_, _], Vector[ast.Field], Args, Double) => Boolean] = Some((_, _, _, complexity) => complexity < 100) override def groupDeferred[T <: DeferredWithInfo](deferred: Vector[T]): Vector[Vector[T]] = { val (expensive, cheap) = deferred.partition(_.complexity > 100) Vector(expensive, cheap) } override def resolve(deferred: Vector[Deferred[Any]], ctx: Any, queryState: Any)(implicit ec: ExecutionContext): Vector[Future[Seq[String]]] = { callsCount.getAndIncrement() valueCount.addAndGet(deferred.size) deferred.map { case LoadCategories(ids) if ids contains "fail" => Future.failed(new IllegalStateException("foo")) case LoadCategories(ids) => Future.successful(ids) } } } val schema = Schema(QueryType, Some(MutationType)) def exec(query: ast.Document) = { val resolver = new MyDeferredResolver val result = Executor.execute(schema, query, deferredResolver = resolver).await resolver -> result } "result in a single resolution of once level" in { val query = graphql""" { root { name children(count: 5) { children(count: 5) { children(count: 5) { children(count: 5) { children(count: 5) { name } } childrenFut(count: 2) { children(count: 2) { name } } self { children(count: 3) { children(count: 3) { name } } } selfFut { children(count: 3) { children(count: 3) { name } } } } } } } } """ val (resolver, _) = exec(query) resolver.callsCount.get should be(6) resolver.valueCount.get should be(2157) } "do not wait for future values" in { val query = graphql""" { root { name children(count: 3) { s1: selfFutComplex { children(count: 5) { children(count: 5) { name } } } s2: selfFutComplex { children(count: 5) { children(count: 5) { name } } } selfFut { children(count: 5) { children(count: 5) { name } } } selfFut { children(count: 5) { children(count: 5) { name } } } } } } """ val (resolver, _) = exec(query) resolver.callsCount.get should be(16) resolver.valueCount.get should be(56) } "Group complex/expensive deferred values together" in { val query = graphql""" { rootFut { name c1: childrenComplex(count: 5) { self { childrenFut(count: 5) { name } } } c2: childrenComplex(count: 5) { self { childrenFut(count: 5) { name } } } childrenFut(count: 5) { self { childrenFut(count: 5) { name } } } } } """ val (resolver, r) = exec(query) resolver.callsCount.get should be(5) resolver.valueCount.get should be(19) } "failed queries should be handled appropriately" in checkContainsErrors( schema, (), """ { fail1 {name} root {name} fail2 {name} } """, Map("fail1" -> null, "root" -> Map("name" -> "Cat root"), "fail2" -> null), List("foo" -> List(Pos(3, 11)), "foo" -> List(Pos(5, 11))), resolver = new MyDeferredResolver ) "failed mutations should be handled appropriately" in checkContainsErrors( schema, (), """ mutation { fail1 {name} root {name} fail2 {name} } """, Map("fail1" -> null, "root" -> Map("name" -> "Cat root"), "fail2" -> null), List("foo" -> List(Pos(3, 11)), "foo" -> List(Pos(5, 11))), resolver = new MyDeferredResolver ) } "DeferredResolver" when { "using standard execution context" should { behave.like(deferredResolver(ExecutionContext.Implicits.global)) } "using sync execution context" should { behave.like(deferredResolver(sync.executionContext)) } } }
sangria-graphql/sangria
modules/core/src/test/scala/sangria/execution/deferred/DeferredResolverSpec.scala
Scala
apache-2.0
9,072
package org.bitcoins.spvnode.serializers.messages.control import org.bitcoins.core.util.BitcoinSUtil import org.scalatest.{FlatSpec, MustMatchers} /** * Created by chris on 8/26/16. */ class RawFilterAddMessageSerializerTest extends FlatSpec with MustMatchers { //https://bitcoin.org/en/developer-reference#filteradd val hex = "20" + "fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26af16cb0b" "RawFilterAddMessageSerializer" must "deserialize a message in the bitcoin developer reference" in { val filterAddMsg = RawFilterAddMessageSerializer.read(hex) filterAddMsg.elementSize.hex must be ("20") BitcoinSUtil.encodeHex(filterAddMsg.element) must be ("fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26af16cb0b") } it must "serialize a filter add message" in { val filterAddMsg = RawFilterAddMessageSerializer.read(hex) filterAddMsg.hex must be (hex) } }
Christewart/bitcoin-s-spv-node
src/test/scala/org/bitcoins/spvnode/serializers/messages/control/RawFilterAddMessageSerializerTest.scala
Scala
mit
909
/** * Copyright 2015 Thomson Reuters * * Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ package k.grid.monitoring import akka.actor._ import k.grid.Grid import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ /** * Created by michael on 7/5/15. */ case object PingActors case class ActorReport(name : String, latency : Long = 0L) case class ActiveActors(actors : Set[ActorReport], host : String = "") case object CheckIfFinished class ActorsCrawler extends Actor { private[this] var s = Set.empty[ActorReport] private[this] var senderVar : ActorRef = _ private[this] var lastUpdate : Long = Long.MaxValue private[this] def currentTime = System.currentTimeMillis override def receive: Receive = { case ActorIdentity(cId, refO) => refO match { case Some(ref) => val sentIn = cId.asInstanceOf[Long] s = s + ActorReport(ref.path.toString, currentTime - sentIn) lastUpdate = currentTime context.system.actorSelection(ref.path / "*") ! Identify(currentTime) case None => } case CheckIfFinished => if(currentTime - lastUpdate > 1000){ senderVar ! ActiveActors(s, Grid.thisMember.address) self ! PoisonPill } case PingActors => senderVar = sender() context.system.actorSelection("/user/*") ! Identify(currentTime) context.system.scheduler.schedule(0.seconds, 1.seconds, self, CheckIfFinished) } }
nruppin/CM-Well
server/cmwell-grid/src/main/scala/k/grid/monitoring/ActorsCrawler.scala
Scala
apache-2.0
2,005
package org.jetbrains.plugins.scala.testingSupport.scalatest.singleTest import org.jetbrains.plugins.scala.testingSupport.scalatest.generators.FlatSpecGenerator /** * @author Roman.Shein * @since 20.01.2015. */ trait FlatSpecSingleTestTest extends FlatSpecGenerator { val flatSpecTestPath = List("[root]", "FlatSpecTest", "A FlatSpecTest", "should be able to run single test") def testFlatSpec() { addFlatSpec() runTestByLocation(7, 1, flatSpecFileName, checkConfigAndSettings(_, flatSpecClassName, "A FlatSpecTest should be able to run single test"), root => checkResultTreeHasExactNamedPath(root, flatSpecTestPath:_*) && checkResultTreeDoesNotHaveNodes(root, "should not run other tests"), debug = true ) } }
triggerNZ/intellij-scala
test/org/jetbrains/plugins/scala/testingSupport/scalatest/singleTest/FlatSpecSingleTestTest.scala
Scala
apache-2.0
764
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.utils.tf.loaders import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.utils.T import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest import scala.util.Random class SliceLoadTFSerialTest extends ModuleSerializationTest { override def test(): Unit = { val sliceLoadTF = new SliceLoadTF[Float]().setName("sliceLoadTF") val input = T(Tensor[Float](3, 2, 3).apply1(_ => Random.nextFloat()), Tensor[Int](T(0, 1, 1)), Tensor[Int](T(2, -1, 1))) runSerializationTest(sliceLoadTF, input) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/SliceLoadTFSpec.scala
Scala
apache-2.0
1,219
/* * Copyright 2011 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.zeromq import com.sun.jna.{Library, Native, NativeLong, Pointer} import com.sun.jna.ptr.LongByReference import jnr.constants.platform.Errno import com.sun.jna.Memory import com.sun.jna.NativeLong import com.sun.jna.Pointer import com.sun.jna.ptr.LongByReference import java.util.{ Arrays, HashSet => JHashSet } import java.lang.{ Long ⇒ JLong, Integer ⇒ JInteger } import scala.beans.BeanProperty import scala.annotation.tailrec import concurrent.duration.{Duration, FiniteDuration} object ZeroMQ { /** Socket types */ val ZMQ_PAIR = 0 val ZMQ_PUB = 1 val ZMQ_SUB = 2 val ZMQ_REQ = 3 val ZMQ_REP = 4 val ZMQ_DEALER = 5 val ZMQ_ROUTER = 6 val ZMQ_PULL = 7 val ZMQ_PUSH = 8 val ZMQ_XPUB = 9 val ZMQ_XSUB = 10 /** Send / receive options */ val ZMQ_NOBLOCK = 1 val ZMQ_SNDMORE = 2 /** Socket options */ val ZMQ_HWM = 1 val ZMQ_SWAP = 3 val ZMQ_AFFINITY = 4 val ZMQ_IDENTITY = 5 val ZMQ_SUBSCRIBE = 6 val ZMQ_UNSUBSCRIBE = 7 val ZMQ_RATE = 8 val ZMQ_RECOVERY_IVL = 9 val ZMQ_MCAST_LOOP = 10 val ZMQ_SNDBUF = 11 val ZMQ_RCVBUF = 12 val ZMQ_RCVMORE = 13 val ZMQ_FD = 14 val ZMQ_EVENTS = 15 val ZMQ_TYPE = 16 val ZMQ_LINGER = 17 val ZMQ_RECONNECT_IVL = 18 val ZMQ_BACKLOG = 19 val ZMQ_RECONNECT_IVL_MAX = 21 val ZMQ_MAXMSGSIZE = 22 val ZMQ_SNDHWM = 23 val ZMQ_RCVHWM = 24 val ZMQ_MULTICAST_HOPS = 25 val ZMQ_RCVTIMEO = 27 val ZMQ_SNDTIMEO = 28 /** Built-in devices */ val ZMQ_STREAMER = 1 val ZMQ_FORWARDER = 2 val ZMQ_QUEUE = 3 /** Unix errors */ val EINVAL = Errno.EINVAL.intValue val EAGAIN = Errno.EAGAIN.intValue /** ZMQ errors */ val ZMQ_HAUSNUMERO = 156384712 val EFSM = ZMQ_HAUSNUMERO + 51 val ENOCOMPATPROTO = ZMQ_HAUSNUMERO + 52 val ETERM = ZMQ_HAUSNUMERO + 53 /** ZMQ message definition */ val ZMQ_MAX_VSM_SIZE = 30 val ZMQ_DELIMITER = 31 val ZMQ_VSM = 32 val ZMQ_MSG_MORE = 1 val ZMQ_MSG_SHARED = 128 val ZMQ_MSG_MASK = 129 /** IO multiplexing */ val ZMQ_POLLIN = 1: Short val ZMQ_POLLOUT = 2: Short val ZMQ_POLLERR = 4: Short def loadLibrary(): ZeroMQLibrary = Native.loadLibrary("zmq", classOf[ZeroMQLibrary]).asInstanceOf[ZeroMQLibrary] } trait ZeroMQLibrary extends Library { def zmq_bind(socket: Pointer, endpoint: String): Int def zmq_close(socket: Pointer): Int def zmq_connect(socket: Pointer, endpoint: String): Int def zmq_device(device: Int, frontend: Pointer, backend: Pointer): Int def zmq_errno: Int def zmq_getsockopt(socket: Pointer, option_name: Int, option_value: Pointer, option_len: LongByReference): Int def zmq_init(io_threads: Int): Pointer def zmq_msg_init(msg: zmq_msg_t): Int def zmq_msg_close(msg: zmq_msg_t): Int def zmq_msg_copy(dest: zmq_msg_t, src: zmq_msg_t): Int def zmq_msg_data(msg: zmq_msg_t): Pointer def zmq_msg_init_data(msg: zmq_msg_t, data: Pointer, size: NativeLong, ffn: zmq_free_fn, hint: Pointer): Int def zmq_msg_init_size(msg: zmq_msg_t, size: NativeLong): Int def zmq_msg_move(dest: zmq_msg_t, src: zmq_msg_t): Int def zmq_msg_size(msg: zmq_msg_t): Int def zmq_poll(items: Array[zmq_pollitem_t], nitems: Int, timeout: NativeLong): Int def zmq_recv(socket: Pointer, msg: zmq_msg_t, flags: Int): Int def zmq_send(socket: Pointer, msg: zmq_msg_t, flags: Int): Int def zmq_setsockopt(socket: Pointer, option_name: Int, option_value: Pointer, option_len: NativeLong): Int def zmq_socket(context: Pointer, socket_type: Int): Pointer def zmq_strerror(errnum: Int): String def zmq_term(context: Pointer): Int def zmq_version(major: Array[Int], minor: Array[Int], patch: Array[Int]): Unit } /** * ZMQException is used throughout the API to indicate failure * @param message the message to be used * @param errorCode the 0MQ Error Code */ final case class ZMQException(message: String, @BeanProperty val errorCode: Int) extends RuntimeException(message) /** * Offers an API similar to that of jzmq [1] written by Gonzalo Diethelm. * <p/> * 1. https://github.com/zeromq/jzmq */ object ZMQ { /** * Creates a composite version number * @param major the major version of 0MQ * @param minor the minor version of 0MQ * @param patch the patch version of 0MQ * @return major * 10000 + minor * 100 + patch */ def makeVersion(major: Int, minor: Int, patch: Int): Int = major * 10000 + minor * 100 + patch /** * Creates a new 0MQ Context with the specified number of IO Threads * @param ioThreads the number of ioThreads the Context should have * @return a newly created Context */ def context(ioThreads: Int): Context = new Context(ioThreads) private final val zmq: ZeroMQLibrary = ZeroMQ.loadLibrary @BeanProperty final val (majorVersion: Int, minorVersion: Int, patchVersion: Int, fullVersion: Int, versionString: String) = { val ma, mi, pa = Array[Int](0) zmq.zmq_version(ma, mi, pa) (ma(0), mi(0), pa(0), makeVersion(ma(0), mi(0), pa(0)), "%d.%d.%d".format(ma(0), mi(0), pa(0))) } final val NOBLOCK = ZeroMQ.ZMQ_NOBLOCK final val DONTWAIT = ZeroMQ.ZMQ_NOBLOCK final val PAIR = ZeroMQ.ZMQ_PAIR final val SNDMORE = ZeroMQ.ZMQ_SNDMORE final val PUB = ZeroMQ.ZMQ_PUB final val SUB = ZeroMQ.ZMQ_SUB final val REQ = ZeroMQ.ZMQ_REQ final val REP = ZeroMQ.ZMQ_REP final val XREQ = ZeroMQ.ZMQ_DEALER final val XREP = ZeroMQ.ZMQ_ROUTER final val DEALER = ZeroMQ.ZMQ_DEALER final val ROUTER = ZeroMQ.ZMQ_ROUTER final val PULL = ZeroMQ.ZMQ_PULL final val PUSH = ZeroMQ.ZMQ_PUSH final val STREAMER = ZeroMQ.ZMQ_STREAMER final val FORWARDER = ZeroMQ.ZMQ_FORWARDER final val QUEUE = ZeroMQ.ZMQ_QUEUE /** * Represents a 0MQ Context * @param ioThreads the number of IO Threads this Context should have */ class Context(ioThreads: Int) { protected[zeromq] final val ptr: Pointer = zmq.zmq_init(ioThreads) /** * Terminates this Context */ def term(): Unit = zmq.zmq_term(ptr) /** * Creates a new Socket in this Context with the given Socket Type * @param `type` the type of Socket, see: PAIR, PUB, SUB, REQ, REP, DEALER, ROUTER, PULL, PUSH, XPUB, XSUB * @return a newly created Socket of the given type */ def socket(`type`: Int): Socket = new Socket(this, `type`) /** * Creates a new Poller for this Context with default size of Sockets, 32 * @return a newly created Poller with the given size */ def poller(): Poller = poller(32) /** * Creates a new Poller for this Context * @return a newly created Poller with the given size */ def poller(size: Int): Poller = new Poller(this, size) } private final val versionBelow210 = fullVersion < makeVersion(2, 1, 0) private final val versionAtleast210 = !versionBelow210 private final val versionBelow220 = fullVersion < makeVersion(2, 2, 0) private final val versionAtleast220 = !versionBelow220 private final val versionBelow300 = fullVersion < makeVersion(3, 0, 0) private final val versionAtleast300 = !versionBelow300 /** * Represents a 0MQ Socket * @param context which Context the Socket belongs to * @param `type` the Socket Type (http://api.zeromq.org/2-1:zmq-socket) */ class Socket(context: ZMQ.Context, `type`: Int) { import ZeroMQ._ private[zeromq] final val ptr: Pointer = zmq.zmq_socket(context.ptr, `type`) private final val messageDataBuffer = new JHashSet[Pointer] with zmq_free_fn { override def invoke(data: Pointer, memory: Pointer): Unit = remove(memory) } /** * Closes this 0MQ Socket */ def close(): Unit = zmq.zmq_close(ptr) /** * Retrieves the Socket Type * @return -1 if version < 2.1, or the Socket Type */ def getType(): Int = if (versionBelow210) -1 else getLongSockopt(ZMQ_TYPE).asInstanceOf[Int] /** * Retrieves the Linger value for this Socket * @return -1 if version < 2.1, or the Linger Value */ def getLinger(): Int = if (versionBelow210) -1 else getIntSockopt(ZMQ_LINGER) /** * Retrieves the Reconnect Interval for this Socket * @return -1 if version < 2.1, or the Reconnect Interval */ def getReconnectIVL(): Int = if (versionBelow210) -1 else getIntSockopt(ZMQ_RECONNECT_IVL) /** * Retrieves the Backlog number for this Socket * @return -1 if version < 2.1, or the Backlog number */ def getBacklog(): Int = if (versionBelow210) -1 else getIntSockopt(ZMQ_BACKLOG) /** * Retrieves the Maximum Reconnect Interval for this Socket * @return -1 if version < 2.1, or the Maximum Reconnect Interval */ def getReconnectIVLMax(): Int = if (versionBelow210) -1 else getIntSockopt(ZMQ_RECONNECT_IVL_MAX) /** * Retrieves the Maximum Message Size for this Socket * @return -1 if version < 3.0, or the Maximum Message Size */ def getMaxMsgSize(): Long = if (versionBelow300) -1 else getLongSockopt(ZMQ_MAXMSGSIZE) /** * Retrieves the Send High Water Mark for this Socket * @return -1 if version < 3.0, or the Send High Water Mark */ def getSndHWM(): Int = if (versionBelow300) -1 else getIntSockopt(ZMQ_SNDHWM) /** * Retrieves the Receive High Water Mark for this Socket * @return -1 if version < 3.0, or the Receive High Water Mark */ def getRcvHWM(): Int = if (versionBelow300) -1 else getIntSockopt(ZMQ_RCVHWM) /** * Retrieves the High Water Mark for this Socket * @return -1 if version < 3.0, or the High Water Mark */ def getHWM(): Long = if (versionBelow300) getLongSockopt(ZMQ_HWM) else -1 /** * Retrieves the Swap size in bytes for this Socket * @return -1 if version < 3.0, or the Swap size in bytes */ def getSwap(): Long = if (versionBelow300) -1 else getLongSockopt(ZMQ_SWAP) /** * Retrieves the Affinity for this Socket * @return the Affinity */ def getAffinity(): Long = getLongSockopt(ZMQ_AFFINITY) /** * Retrieves the Identity of this Socket * @return the Identity of this Socket */ def getIdentity(): Array[Byte] = getBytesSockopt(ZMQ_IDENTITY) /** * Retrieves the multicast data rate for this Socket * @return the multicast data rate */ def getRate(): Long = getLongSockopt(ZMQ_RATE) /** * Retrieves the Recovery Interval for this Socket * @return the Recovery Interval in seconds */ def getRecoveryInterval(): Long = getLongSockopt(ZMQ_RECOVERY_IVL) /** * Retrieves whether this Socket has Multicast Loop enabled * @return false if version >= 3.0, or whether this Socket has Multicast Loop enabled */ def hasMulticastLoop(): Boolean = if (versionAtleast300) false else getLongSockopt(ZMQ_MCAST_LOOP) != 0 /** * Sets the maximum number of hops for multicast messages * @param mcast_hops the maximum number of hops */ def setMulticastHops(mcast_hops: Long): Unit = setLongSockopt(ZMQ_MCAST_LOOP, mcast_hops) /** * Retrieves the maximum number of hops for multicast messages for this Socket * @return -1 if version < 3.0, or the maximum number of hops for multicast messages */ def getMulticastHops(): Long = if (versionBelow300) -1 else getLongSockopt(ZMQ_MCAST_LOOP) /** * Sets the Receive Timeout for this Socket, if the 0MQ version is at least 2.2 * @param timeout in millis, -1 for infinity and 0 for no timeout */ def setReceiveTimeOut(timeout: Int): Unit = if (versionAtleast220) setIntSockopt(ZMQ_RCVTIMEO, timeout) /** * Retrieves the Receive Timeout for this Socket * @return -1 if version < 2.2, or the Receive Timeout */ def getReceiveTimeOut(): Int = if (versionBelow220) -1 else getIntSockopt(ZMQ_RCVTIMEO) /** * Sets the Send Timeout for this Socket, if the 0MQ version is at least 2.2 * @param timeout in milliseconds, -1 for infinity and 0 for no timeout */ def setSendTimeOut(timeout: Int): Unit = if (versionAtleast220) setIntSockopt(ZMQ_SNDTIMEO, timeout) /** * Retrieves the Send Timeout for this Socket * @return -1 if version < 2.2, or the Send Timeout */ def getSendTimeOut(): Int = if (versionBelow220) -1 else getIntSockopt(ZMQ_SNDTIMEO) /** * Retrieves the Send Buffer Size for this Socket * @return the Send Buffer Size */ def getSendBufferSize(): Long = getLongSockopt(ZMQ_SNDBUF) /** * Sets the Send Buffer Size for this Socket * @param sndbuf size in bytes */ def setSendBufferSize(sndbuf: Long): Unit = setLongSockopt(ZMQ_SNDBUF, sndbuf) /** * Retrieves the Receive Buffer Size for this Socket * @return the Receive Buffer Size */ def getReceiveBufferSize(): Long = getLongSockopt(ZMQ_RCVBUF) /** * Retrieves whether the last message that was received was a partial message with more to follow * @return true if more data is to follow, false if not */ def hasReceiveMore(): Boolean = getLongSockopt(ZMQ_RCVMORE) != 0 /** * Retrieves the File Descriptor for this Socket * @return -1 if version < 2.1, or the File Descriptor */ def getFD(): Long = if (versionBelow210) -1 else getLongSockopt(ZMQ_FD) /** * Retrieves the Event State for this Socket * @return -1 if version < 2.1, or a bit mask of ZMQ_POLLIN and ZMQ_POLLOUT depending if reading and/or writing is possible */ def getEvents(): Long = if (versionBelow210) -1 else getLongSockopt(ZMQ_EVENTS) /** * Sets the Linger period if the 0MQ version is at least 2.1 for this Socket * @param linger the linger period in millis, 0 to indicate no linger */ def setLinger(linger: Int): Unit = if (versionAtleast210) setIntSockopt(ZMQ_LINGER, linger) /** * Sets the Reconnect Interval, if the 0MQ version is at least 2.1, for this Socket * @param reconnectIVL in milliseconds, -1 indicates no reconnection */ def setReconnectIVL(reconnectIVL: Int): Unit = if (versionAtleast210) setIntSockopt(ZMQ_RECONNECT_IVL, reconnectIVL) /** * Sets the Backlog of connections, if the 0MQ version is at least 2.1, for this Socket * @param backlog in number of connections */ def setBacklog(backlog: Int): Unit = if (versionAtleast210) setIntSockopt(ZMQ_BACKLOG, backlog) /** * Sets the Maximum Reconnect Interval, if the 0MQ version is at least 2.1, for this Socket * @param reconnectIVLMax in milliseconds, 0 for no backoff, values less than reconnectIVL will be ignored */ def setReconnectIVLMax(reconnectIVLMax: Int): Unit = if (versionAtleast210) setIntSockopt(ZMQ_RECONNECT_IVL_MAX, reconnectIVLMax) /** * Sets the Maximum Message Size, if the 0MQ version is at least 3.0, for this Socket * @param maxMsgSize in bytes, -1 for no limit */ def setMaxMsgSize(maxMsgSize: Long): Unit = if (versionAtleast300) setLongSockopt(ZMQ_MAXMSGSIZE, maxMsgSize) /** * Sets the Send High Water Mark, if the 0MQ version is at least 3.0, for this Socket * @param sndHWM in number of messages */ def setSndHWM(sndHWM: Int): Unit = if (versionAtleast300) setIntSockopt(ZMQ_SNDHWM, sndHWM) /** * Sets the Receive High Water Mark, if the 0MQ version is at least 3.0, for this Socket * @param rcvHWM in number of messages */ def setRcvHWM(rcvHWM: Int): Unit = if (versionAtleast300) setIntSockopt(ZMQ_RCVHWM, rcvHWM) /** * Sets the High Water Mark, if the 0MQ version is < 3.0, for this Socket * @param hwm in number of messages, 0 means no limit */ def setHWM(hwm: Long): Unit = if (versionBelow300) setLongSockopt(ZMQ_HWM, hwm) /** * Sets the Swap, if the 0MQ version is > 3.0, for this Socket * @param swap in number of bytes */ def setSwap(swap: Long): Unit = if (versionAtleast300) setLongSockopt(ZMQ_SWAP, swap) /** * Sets the Affinity for this Socket * @param affinity a bit mask representing which IO Threads to assign affinity towards */ def setAffinity(affinity: Long): Unit = setLongSockopt(ZMQ_AFFINITY, affinity) /** * Sets the Identity of this Socket * @param identity at least 1 byte and at most 255 bytes */ def setIdentity(identity: Array[Byte]): Unit = setBytesSockopt(ZMQ_IDENTITY, identity) /** * Sets the Data Rate for multicast transports for this Socket * @param rate in kbits per second */ def setRate(rate: Long): Unit = setLongSockopt(ZMQ_RATE, rate) /** * Sets the Recovery Interval for this Socket * @param recoveryIVL in seconds */ def setRecoveryInterval(recoveryIVL: Long): Unit = setLongSockopt(ZMQ_RECOVERY_IVL, recoveryIVL) /** * Sets Multicast Loop, if 0MQ version < 3.0, for this Socket * @param mcast_loop true to enable, false to disable */ def setMulticastLoop(mcast_loop: Boolean): Unit = if (versionBelow300) setLongSockopt(ZMQ_MCAST_LOOP, if (mcast_loop) 1 else 0) /** * Sets the Receive Buffer Size for this Socket * @param rcvbuf in bytes, 0 means use OS default */ def setReceiveBufferSize(rcvbuf: Long): Unit = setLongSockopt(ZMQ_RCVBUF, rcvbuf) /** * Subscribes this Socket to a type of messages * @param topic empty array for all messages, non-empty array to prefix match inbound messages */ def subscribe(topic: Array[Byte]): Unit = setBytesSockopt(ZMQ_SUBSCRIBE, topic) /** * Unsubscribes this Socket from a type of messages * @param topic empty array for all messages, non-empty array to prefix match inbound messages */ def unsubscribe(topic: Array[Byte]): Unit = setBytesSockopt(ZMQ_UNSUBSCRIBE, topic) /** * Binds this Socket to an address * @param addr the address to bind to, according to: http://api.zeromq.org/2-1:zmq-bind */ def bind(addr: String): Unit = { if (zmq.zmq_bind(ptr, addr) != 0) { val errno = zmq.zmq_errno throw new ZMQException(zmq.zmq_strerror(errno), errno) } } /** * Connects this Socket to an address * @param addr the address to connect to, according to: http://api.zeromq.org/2-1:zmq-connect */ def connect(addr: String): Unit = { if (zmq.zmq_connect(ptr, addr) != 0) { val errno = zmq.zmq_errno throw new ZMQException(zmq.zmq_strerror(errno), errno) } } /** * Sends the given message to this Socket, see the following for more details: http://api.zeromq.org/2-1:zmq-send * @param msg the bytes to send * @param flags ZMQ_NOBLOCK or ZMQ_SNDMORE or both * @return true if send succeeded, false if NOBLOCK requested and EAGAIN returned by send call */ def send(msg: Array[Byte], flags: Int): Boolean = { val message = newZmqMessage(msg) if (zmq.zmq_send(ptr, message, flags) == 0) { if (zmq.zmq_msg_close(message) != 0) raiseZMQException() else true } else if (zmq.zmq_errno == EAGAIN) { if (zmq.zmq_msg_close(message) != 0) raiseZMQException() else false } else { zmq.zmq_msg_close(message) raiseZMQException() } } /** * Receives a message from this Socket, see the following for more details: http://api.zeromq.org/3-2:zmq-recv * @param flags * @return null if NOBLOCK was requested and EAGAIN was returned by recv call, else the bytes received */ def recv(flags: Int): Array[Byte] = { val message = newZmqMessage() if (zmq.zmq_recv(ptr, message, flags) == 0) { val dataByteArray = zmq.zmq_msg_data(message).getByteArray(0, zmq.zmq_msg_size(message)) if (zmq.zmq_msg_close(message) != 0) raiseZMQException() else dataByteArray } else if (zmq.zmq_errno == EAGAIN) { if (zmq.zmq_msg_close(message) != 0) raiseZMQException() else null } else { zmq.zmq_msg_close(message) raiseZMQException() } } override protected def finalize: Unit = close() private def getLongSockopt(option: Int): Long = { val value = new Memory(JLong.SIZE / 8) val length = new LongByReference(JLong.SIZE / 8) zmq.zmq_getsockopt(ptr, option, value, length) value.getLong(0) } private def setLongSockopt(option: Int, optval: Long): Unit = { val value = new Memory(JLong.SIZE / 8) value.setLong(0, optval) zmq.zmq_setsockopt(ptr, option, value, new NativeLong(JLong.SIZE / 8)) } private def getIntSockopt(option: Int): Int = { val value = new Memory(JInteger.SIZE / 8) zmq.zmq_getsockopt(ptr, option, value, new LongByReference(JInteger.SIZE / 8)) value.getInt(0) } private def setIntSockopt(option: Int, optval: Int): Unit = { val value = new Memory(JInteger.SIZE / 8) value.setInt(0, optval) zmq.zmq_setsockopt(ptr, option, value, new NativeLong(JInteger.SIZE / 8)) } private def getBytesSockopt(option: Int): Array[Byte] = { val value = new Memory(1024) val length = new LongByReference(1024) zmq.zmq_getsockopt(ptr, option, value, length) value.getByteArray(0, length.getValue.intValue) } private def setBytesSockopt(option: Int, optval: Array[Byte]): Unit = zmq.zmq_setsockopt(ptr, option, if (optval.length == 0) Pointer.NULL else { val mem = new Memory(optval.length) mem.write(0, optval, 0, optval.length) mem }, new NativeLong(optval.length)) private def newZmqMessage(msg: Array[Byte] = null): zmq_msg_t = { val message = new zmq_msg_t if (msg eq null) { if (zmq.zmq_msg_init(message) != 0) raiseZMQException() } else { msg.length match { case 0 ⇒ if (zmq.zmq_msg_init_size(message, new NativeLong(0)) != 0) raiseZMQException() case len ⇒ val mem = new Memory(len) mem.write(0, msg, 0, len) if (zmq.zmq_msg_init_data(message, mem, new NativeLong(len), messageDataBuffer, mem) != 0) raiseZMQException() else messageDataBuffer.add(mem) } } message } private def raiseZMQException(errno: Int = zmq.zmq_errno): Nothing = throw new ZMQException(zmq.zmq_strerror(errno), errno) } /** * A Poller is a constructs which makes it easy to which Sockets have inbound or outbound messages pending * @param context the 0MQ context this Poller belongs to * @param size the initial size of the Poller, in number of Sockets */ class Poller (context: ZMQ.Context, size: Int) { @BeanProperty var timeout: FiniteDuration = Duration(-1, "ms") private var nextEventIndex: Int = 0 private var maxEventCount: Int = size private var curEventCount: Int = 0 private var sockets: Array[ZMQ.Socket] = new Array(size) private var events: Array[Short] = new Array(size) private var revents: Array[Short] = new Array(size) private var freeSlots: List[Int] = Nil /** * Registers the specified Socket to all of ZMQ_POLLIN, ZMQ_POLLOUT and ZMQ_POLLERR * @param socket the socket which to register to this Poller * @return the index of the registered socket */ def register(socket: ZMQ.Socket): Int = register(socket, ZeroMQ.ZMQ_POLLIN | ZeroMQ.ZMQ_POLLOUT | ZeroMQ.ZMQ_POLLERR) /** * Registers the specified Socket to the specified types of Events * @param socket the socket which to register to this Poller * @return the index of the registered socket */ def register(socket: ZMQ.Socket, numEvents: Int): Int = { require(numEvents <= Short.MaxValue, "numEvents must be less or equal to Short.MaxValue") require(numEvents >= Short.MinValue, "numEvents must be greater or equal to Short.MinValue") //FIXME handle the case where Socket is already registered val pos: Int = freeSlots match { case Nil ⇒ if (nextEventIndex >= maxEventCount) { val newMaxEventCount: Int = maxEventCount + 16 if (newMaxEventCount > Short.MaxValue) throw new IllegalStateException("maxEventCount may not grow past Short.MaxValue!") sockets = Arrays.copyOf(sockets, newMaxEventCount) events = Arrays.copyOf(events, newMaxEventCount) revents = Arrays.copyOf(revents, newMaxEventCount) maxEventCount = newMaxEventCount } val p = nextEventIndex nextEventIndex += 1 p case free :: tail ⇒ freeSlots = tail free } sockets(pos) = socket events(pos) = numEvents.asInstanceOf[Short] curEventCount += 1 pos } /** * Unregisters the specified Socket from this Poller * @param socket the socket which to unregister from this Poller */ def unregister(socket: ZMQ.Socket): Unit = if (socket ne null) { @tailrec def unreg(index: Int): Boolean = if (index >= nextEventIndex || index >= sockets.length) false else if(sockets(index) eq socket) { sockets(index) = null events(index) = 0: Short revents(index) = 0: Short freeSlots ::= index curEventCount -= 1 true } else unreg(index + 1) unreg(0) } /** * a means to obtain the Socket at a given index * @param index the index for the Socket * @return the Socket at the given index, non-existing indices or absence of Socket at given index will yield null */ def getSocket(index: Int): ZMQ.Socket = if ((index < 0 || index >= nextEventIndex)) null else sockets(index) /** * Returns the current max size of this Poller */ def getSize(): Int = maxEventCount /** * Returns the index to the next event */ def getNext(): Int = nextEventIndex /** * Polls the registered Sockets using the current timeout of this Poller * @return how many items during the poll that yielded revents */ def poll(): Long = poll(this.timeout) /** * Polls the registered Sockets using a speficied timeout * @param timeout the timeout for the poll operation * @return how many items during the poll that yielded revents */ def poll(timeout: FiniteDuration): Long = { Arrays.fill(revents, 0, nextEventIndex, 0: Short) curEventCount match { case 0 ⇒ 0 case expectedEvents ⇒ // Goes through the items and either inits or collects revents depending on the "init" parameter, // returns the number of items procressed def withItems(items: Array[zmq_pollitem_t], init: Boolean): Int = { var itemIndex = 0 var socketIndex = 0 while (socketIndex < sockets.length) { if (sockets(socketIndex) ne null) { val item = items(itemIndex) if (init) { item.socket = sockets(socketIndex).ptr item.fd = 0 item.events = events(socketIndex) item.revents = 0: Short } else { revents(socketIndex) = items(itemIndex).revents } itemIndex += 1 } socketIndex += 1 } itemIndex } val items = new zmq_pollitem_t().toArray(expectedEvents).asInstanceOf[Array[zmq_pollitem_t]] withItems(items, init = true) match { case `expectedEvents` ⇒ val result: Int = zmq.zmq_poll( items, expectedEvents, new NativeLong(if (versionAtleast300) timeout.toMicros else timeout.toMillis) ) withItems(items, init = false) result case _ ⇒ 0 // Bail out } } } private def poll_mask(index: Int, mask: Int): Boolean = if ((mask <= 0 || index < 0 || index >= nextEventIndex)) false else (revents(index) & mask) > 0 /** * Returns whether there are any ZMQ_POLLIN events to consume * @param index * @return true if ZMQ_POLLIN is set for revents at the given index, false if not */ def pollin(index: Int): Boolean = poll_mask(index, ZeroMQ.ZMQ_POLLIN) /** * Returns whether there are any ZMQ_POLLOUT events to consume * @param index * @return true if ZMQ_POLLOUT is set for revents at the given index, false if not */ def pollout(index: Int): Boolean = poll_mask(index, ZeroMQ.ZMQ_POLLOUT) /** * Returns whether there are any ZMQ_POLLERR events to consume * @param index * @return true if ZMQ_POLLERR is set for revents at the given index, false if not */ def pollerr(index: Int): Boolean = poll_mask(index, ZeroMQ.ZMQ_POLLERR) } }
valotrading/zeromq-scala-binding
src/main/scala/org/zeromq/ZeroMQLibrary.scala
Scala
apache-2.0
29,807
package com.softwaremill.macwire.packages.child import com.softwaremill.macwire._ import com.softwaremill.macwire.packages.A trait ChildModule { val a = wire[A] }
guersam/macwire
tests2/src/test/scala/com/softwaremill/macwire/packages/child/ChildModule.scala
Scala
apache-2.0
167
package com.eigengo.lift.exercise import java.nio.ByteBuffer import org.scalatest.{FlatSpec, Matchers} import scodec.bits.ByteVector import scalaz.{-\\/, \\/-} class MultiPacketDecoderTest extends FlatSpec with Matchers { import com.eigengo.lift.exercise.RichArray._ /// writes payload of the given size at sloc and content. /// note that we pass size and content explicitly to allow us to construct /// badly formed payload private def payload(size: Int, sloc: Byte, content: Array[Byte]): Array[Byte] = { val sizeh = (size >> 8).toByte val sizel = (size & 0xff00 >> 8).toByte Array(sizeh, sizel, sloc) ++ content } /// generate incoming message for the given slocs, sizes and content private def generate(ts: Long, slocs: List[Byte], size: Byte ⇒ Int, content: (Byte, Int) ⇒ Array[Byte]): Array[Byte] = { val header: Array[Byte] = Array.apply(0xca.toByte, 0xb1.toByte, slocs.size.toByte) val timestamp: Array[Byte] = encodeTimestamp(ts) val payloads = slocs.map { sloc ⇒ val s = size(sloc); payload(s, sloc, content(sloc, s)) } payloads.foldLeft(header ++ timestamp)(_ ++ _) } private def encodeTimestamp(ts: Long): Array[Byte] = { val ts0 = ((ts & 0xff000000) >> 24).toByte val ts1 = ((ts & 0x00ff0000) >> 16).toByte val ts2 = ((ts & 0x0000ff00) >> 8).toByte val ts3 = (ts & 0x000000ff).toByte Array(ts0, ts1, ts2, ts3) } private def constSize(s: Int)(sloc: Byte): Int = s private def constContent(b: Byte)(sloc: Byte, size: Int): Array[Byte] = Array.fill(size)(b) private def badContent(sloc: Byte, size: Int): Array[Byte] = Array.empty "Single valid packet" should "decode" in { val \\/-(x) = MultiPacketDecoder.decode(ByteBuffer.wrap(generate(12345678, List(0x01), constSize(1), constContent(0x00)))) x.timestamp should be(12345678) x.packets(0).payload.getByte(0) should be(0) } "Multiple valid, max size packets" should "decode" in { val in = generate(12345678, List(0x01, 0x02, 0x03, 0x04, 0x7f), constSize(65535), constContent(0x7f)) val \\/-(x) = MultiPacketDecoder.decode(ByteBuffer.wrap(in)) x.packets.size should be (5) x.packets.foreach(_.payload.getByte(0) should be(0x7f)) } "Very badly malformed input" should "fail decoding" in { val -\\/("No viable input: size < 10.") = MultiPacketDecoder.decode(ByteBuffer.wrap(Array.empty)) val -\\/("Incorrect header. Expected -13647, got 0.") = MultiPacketDecoder.decode(ByteBuffer.wrap(Array.fill(10)(0))) val -\\/("No content.") = MultiPacketDecoder.decode(ByteBuffer.wrap(Array[Byte](0xca.toByte, 0xb1.toByte, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00))) } "Malformed content" should "fail decoding" in { val in = generate(12345678, List(0x01, 0x02, 0x03, 0x04, 0x7f), constSize(65535), badContent) val -\\/("Incomplete or truncated input. (65535 bytes payload of packet 0.)") = MultiPacketDecoder.decode(ByteBuffer.wrap(in)) } "Real small MultiPacket from iOS" should "decode well" in { val arr = formNSDataString("cab10400 00000200 07020002 02010041 42000702 01020201 00313200 09020202 02020061 62616300 07010002 02010023 24") val \\/-(decoded) = MultiPacketDecoder.decode(ByteBuffer.wrap(arr)) decoded.timestamp should be (2) val phoneData = decoded.packets.filter(_.sourceLocation == SensorDataSourceLocationWaist) val pebbleData = decoded.packets.filter(_.sourceLocation == SensorDataSourceLocationWrist) phoneData.size should be (3) pebbleData.size should be (1) pebbleData(0).payload should be (ByteVector(0x00, 0x02, 0x02, 0x01, 0x00, 0x23, 0x24).toBitVector) phoneData(0).payload should be (ByteVector(0x00, 0x02, 0x02, 0x01, 0x00, 0x41, 0x42).toBitVector) phoneData(1).payload should be (ByteVector(0x01, 0x02, 0x02, 0x01, 0x00, 0x31, 0x32).toBitVector) phoneData(2).payload should be (ByteVector(0x02, 0x02, 0x02, 0x02, 0x00, 0x61, 0x62, 0x61, 0x63).toBitVector) } "Real big MultiPacket from iOS" should "decode well" in { val rootDecoder = RootSensorDataDecoder(AccelerometerDataDecoder, RotationDataDecoder) val bb = ByteBuffer.wrap(fromInputStream(getClass.getResourceAsStream("/ad-bd.mp"))) val \\/-(decoded) = MultiPacketDecoder.decode(bb) decoded.packets.foreach { pwl ⇒ val \\/-(samples) = rootDecoder.decodeAll(pwl.payload) } } }
eigengo/lift
server/exercise/src/test/scala/com/eigengo/lift/exercise/MultiPacketDecoderTest.scala
Scala
apache-2.0
4,354
/*********************************************************************** * Copyright (c) 2013-2017 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.hbase.filters import com.typesafe.scalalogging.LazyLogging import org.apache.hadoop.hbase.exceptions.DeserializationException import org.apache.hadoop.hbase.filter.{Filter, FilterBase} import org.apache.hadoop.hbase.{Cell, CellUtil} import org.locationtech.geomesa.index.filters.{Z2Filter, Z3Filter} class Z3HBaseFilter(filt: Z3Filter) extends FilterBase with LazyLogging { override def filterKeyValue(v: Cell): Filter.ReturnCode = { logger.trace("In filterKeyValue()") if (filt.inBounds(v.getRowArray, v.getRowOffset, v.getRowLength)) { Filter.ReturnCode.INCLUDE } else { Filter.ReturnCode.SKIP } } override def toByteArray: Array[Byte] = { logger.trace("Serializing Z3HBaseFilter") Z3Filter.toByteArray(filt) } } object Z3HBaseFilter extends LazyLogging { @throws[DeserializationException] def parseFrom(pbBytes: Array[Byte]): Filter = { logger.debug("Deserializing Z3HBaseFilter") new Z3HBaseFilter(Z3Filter.fromByteArray(pbBytes)) } } class Z2HBaseFilter(filt: Z2Filter) extends FilterBase with LazyLogging { override def filterKeyValue(v: Cell): Filter.ReturnCode = { // TODO GEOMESA-1805 can we avoid the clone? logger.trace("In filterKeyValue()") if (filt.inBounds(CellUtil.cloneRow(v))) { Filter.ReturnCode.INCLUDE } else { Filter.ReturnCode.SKIP } } override def toByteArray: Array[Byte] = { logger.trace("Serializing Z2HBaseFilter") Z2Filter.toByteArray(filt) } } object Z2HBaseFilter extends LazyLogging { @throws[DeserializationException] def parseFrom(pbBytes: Array[Byte]): Filter = { logger.debug("Deserializing Z2HBaseFilter") new Z2HBaseFilter(Z2Filter.fromByteArray(pbBytes)) } }
spandanagrawal/geomesa
geomesa-hbase/geomesa-hbase-datastore/src/main/scala/org/locationtech/geomesa/hbase/filters/HBaseZFilters.scala
Scala
apache-2.0
2,222
package blended.jms.bridge.internal import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.stream.Materializer import blended.container.context.api.ContainerIdentifierService import blended.jms.bridge._ import blended.jms.bridge.internal.BridgeController.{AddConnectionFactory, RemoveConnectionFactory} import blended.jms.utils.{IdAwareConnectionFactory, JmsDestination} import blended.streams.transaction.FlowHeaderConfig import blended.streams.{StreamController, StreamControllerConfig} import blended.util.config.Implicits._ import blended.util.logging.Logger import com.typesafe.config.Config import scala.collection.JavaConverters._ import scala.util.{Failure, Success} import scala.concurrent.duration._ private[bridge] object BridgeControllerConfig { def create( cfg : Config, internalCf : IdAwareConnectionFactory, idSvc: ContainerIdentifierService ) : BridgeControllerConfig = { val headerCfg = FlowHeaderConfig.create(idSvc) val providerList = cfg.getConfigList("provider").asScala.map { p => BridgeProviderConfig.create(idSvc, p).get }.toList val inboundList : List[InboundConfig ]= cfg.getConfigList("inbound", List.empty).map { i => InboundConfig.create(idSvc, i).get } providerList.filter(_.internal) match { case Nil => throw new Exception("Exactly one provider must be marked as the internal provider for the JMS bridge.") case _ :: Nil => case _ => throw new Exception("Exactly one provider must be marked as the internal provider for the JMS bridge.") } val registry = new BridgeProviderRegistry(providerList) BridgeControllerConfig( internalCf = internalCf, registry = registry, headerCfg = headerCfg, inbound = inboundList, idSvc = idSvc, rawConfig = cfg ) } } private[bridge] case class BridgeControllerConfig( internalCf : IdAwareConnectionFactory, registry : BridgeProviderRegistry, headerCfg : FlowHeaderConfig, inbound : List[InboundConfig], idSvc : ContainerIdentifierService, rawConfig : Config ) object BridgeController{ case class AddConnectionFactory(cf : IdAwareConnectionFactory) case class RemoveConnectionFactory(cf : IdAwareConnectionFactory) def props(ctrlCfg: BridgeControllerConfig)(implicit system : ActorSystem, materializer: Materializer) : Props = Props(new BridgeController(ctrlCfg)) } class BridgeController(ctrlCfg: BridgeControllerConfig)(implicit system : ActorSystem, materializer: Materializer) extends Actor{ private[this] val log = Logger[BridgeController] // This is the map of active streams private[this] var streams : Map[String, ActorRef] = Map.empty private[this] def createInboundStream(in : InboundConfig, cf : IdAwareConnectionFactory, internal: Boolean) : Unit = { val toDest = if (internal) { JmsDestination.create(ctrlCfg.registry.internalProvider.get.inbound.asString).get } else { JmsDestination.create( ctrlCfg.registry.internalProvider.get.inbound.asString + "." + cf.vendor + "." + cf.provider ).get } val inCfg = JmsStreamConfig( inbound = true, fromCf = cf, fromDest = in.from, toCf = ctrlCfg.internalCf, toDest = Some(toDest), listener = in.listener, selector = in.selector, registry = ctrlCfg.registry, headerCfg = ctrlCfg.headerCfg, trackTransaction = TrackTransaction.On, subscriberName = in.subscriberName, header = in.header, idSvc = Some(ctrlCfg.idSvc), rawConfig = ctrlCfg.rawConfig, sessionRecreateTimeout = in.sessionRecreateTimeout ) val streamCfg: StreamControllerConfig = new JmsStreamBuilder(inCfg).streamCfg streams += (streamCfg.name -> context.actorOf(StreamController.props(streamCfg))) } private[this] def createOutboundStream(cf : IdAwareConnectionFactory, internal : Boolean) : Unit = { val fromDest = if (internal) { JmsDestination.create(ctrlCfg.registry.internalProvider.get.outbound.asString).get } else { JmsDestination.create( ctrlCfg.registry.internalProvider.get.outbound.asString + "." + cf.vendor + "." + cf.provider ).get } // TODO: Make listener count configurable val outCfg = JmsStreamConfig( inbound = false, headerCfg = ctrlCfg.headerCfg, fromCf = ctrlCfg.internalCf, fromDest = fromDest, toCf = cf, toDest = None, listener = 3, selector = None, registry = ctrlCfg.registry, trackTransaction = TrackTransaction.FromMessage, subscriberName = None, header = List.empty, rawConfig = ctrlCfg.rawConfig, sessionRecreateTimeout = 1.second ) val streamCfg: StreamControllerConfig = new JmsStreamBuilder(outCfg).streamCfg streams += (streamCfg.name -> context.actorOf(StreamController.props(streamCfg))) } override def receive: Receive = { case AddConnectionFactory(cf) => log.info(s"Adding connection factory [${cf.id}]") ctrlCfg.registry.internalProvider match { case Success(p) => val internal = p.vendor == cf.vendor && p.provider == cf.provider // Create inbound streams for all matching inbound configs val inbound : List[InboundConfig] = ctrlCfg.inbound.filter { in => ProviderFilter(in.vendor, in.provider).matches(cf) } log.debug(s"Creating Streams for inbound destinations : [${inbound.mkString(",")}]") inbound.foreach { in => createInboundStream(in, cf, internal) } createOutboundStream(cf, internal) case Failure(_) => log.warn("No internal JMS provider found in config") } case RemoveConnectionFactory(cf) => log.info(s"Removing connection factory [${cf.vendor}:${cf.provider}]") streams.filter{ case (key, _) => key.startsWith(cf.id) }.foreach { case (id, stream) => log.info(s"Stopping stream [$id]") stream ! StreamController.Stop streams -= id } } }
lefou/blended
blended.jms.bridge/src/main/scala/blended/jms/bridge/internal/BridgeController.scala
Scala
apache-2.0
6,045
package test object Main { def main (args: Array[String]) { //server app imitation while (true){ Thread.sleep(1000L) } } }
joescii/sbt-native-packager
src/sbt-test/rpm/test-executableScriptName/src/main/scala/test/Main.scala
Scala
bsd-2-clause
145
/* * Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics package snowplow package enrich package common package enrichments.registry // Maven Artifact import org.apache.maven.artifact.versioning.DefaultArtifactVersion // Java import java.lang.{Float => JFloat} // Scala import scala.util.control.NonFatal // Scalaz import scalaz._ import Scalaz._ // Joda time import org.joda.time.{DateTime, DateTimeZone} // json4s import org.json4s.{DefaultFormats, JObject, JValue} import org.json4s.Extraction import org.json4s.JsonDSL._ // Iglu import iglu.client.SchemaKey // Scala-Weather import com.snowplowanalytics.weather.providers.openweather.OwmCacheClient import com.snowplowanalytics.weather.providers.openweather.Responses._ // Iglu import iglu.client.SchemaCriterion // This project import utils.ScalazJson4sUtils import enrichments.EventEnrichments /** * Companion object. Lets us create an WeatherEnrichment instance from a JValue */ object WeatherEnrichmentConfig extends ParseableEnrichment { implicit val formats = DefaultFormats val supportedSchema = SchemaCriterion("com.snowplowanalytics.snowplow.enrichments", "weather_enrichment_config", "jsonschema", 1, 0) def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[WeatherEnrichment] = isParseable(config, schemaKey).flatMap { conf => { (for { apiKey <- ScalazJson4sUtils.extract[String](config, "parameters", "apiKey") cacheSize <- ScalazJson4sUtils.extract[Int](config, "parameters", "cacheSize") geoPrecision <- ScalazJson4sUtils.extract[Int](config, "parameters", "geoPrecision") apiHost <- ScalazJson4sUtils.extract[String](config, "parameters", "apiHost") timeout <- ScalazJson4sUtils.extract[Int](config, "parameters", "timeout") enrich = WeatherEnrichment(apiKey, cacheSize, geoPrecision, apiHost, timeout) } yield enrich).toValidationNel } } } /** * Contains weather enrichments based on geo coordinates and time * * @param apiKey weather provider API KEY * @param cacheSize amount of days with prefetched weather * @param geoPrecision rounder for geo lat/long floating, which allows to use * more spatial precise weather stamps * @param apiHost address of weather provider's API host * @param timeout timeout in seconds to fetch weather from server */ case class WeatherEnrichment(apiKey: String, cacheSize: Int, geoPrecision: Int, apiHost: String, timeout: Int) extends Enrichment { private lazy val client = OwmCacheClient(apiKey, cacheSize, geoPrecision, apiHost, timeout) private val schemaUri = "iglu:org.openweathermap/weather/jsonschema/1-0-0" private implicit val formats = DefaultFormats /** * Get weather context as JSON for specific event * Any non-fatal error will return failure and thus whole event will be * filtered out in future * * @param latitude enriched event optional latitude (probably null) * @param longitude enriched event optional longitude (probably null) * @param time enriched event optional time (probably null) * @return weather stamp as self-describing JSON object */ // It accepts Java Float (JFloat) instead of Scala's because it will throw NullPointerException // on conversion step if `EnrichedEvent` has nulls as geo_latitude or geo_longitude def getWeatherContext(latitude: Option[JFloat], longitude: Option[JFloat], time: Option[DateTime]): Validation[String, JObject] = try { getWeather(latitude, longitude, time).map(addSchema) } catch { case NonFatal(exc) => exc.toString.fail } /** * Get weather stamp as JSON received from OpenWeatherMap and extracted with Scala Weather * * @param latitude enriched event optional latitude * @param longitude enriched event optional longitude * @param time enriched event optional time * @return weather stamp as JSON object */ private def getWeather(latitude: Option[JFloat], longitude: Option[JFloat], time: Option[DateTime]): Validation[String, JObject] = (latitude, longitude, time) match { case (Some(lat), Some(lon), Some(t)) => getCachedOrRequest(lat, lon, (t.getMillis / 1000).toInt).flatMap { weatherStamp => val transformedWeather = transformWeather(weatherStamp) Extraction.decompose(transformedWeather) match { case obj: JObject => obj.success case _ => s"Couldn't transform weather object $transformedWeather into JSON".fail // Shouldn't ever happen } } case _ => s"One of required event fields missing. latitude: $latitude, longitude: $longitude, tstamp: $time".fail } /** * Return weather, convert disjunction to validation and stringify error * * @param latitude event latitude * @param longitude event longitude * @param timestamp event timestamp * @return optional weather stamp */ private def getCachedOrRequest(latitude: Float, longitude: Float, timestamp: Int): Validation[String, Weather] = client.getCachedOrRequest(latitude, longitude, timestamp) match { case Right(w) => w.success case Left(e) => e.toString.failure } /** * Add Iglu URI to JSON Object * * @param context weather context as JSON Object * @return JSON Object wrapped as Self-describing JSON */ private def addSchema(context: JObject): JObject = ("schema", schemaUri) ~ (("data", context)) /** * Apply all necessary transformations (currently only dt(epoch -> db timestamp) * from `weather.providers.openweather.Responses.Weather` to `TransformedWeather` * for further JSON decomposition * * @param origin original OpenWeatherMap Weather stamp * @return tranfsormed weather */ private[enrichments] def transformWeather(origin: Weather): TransformedWeather = { val time = new DateTime(origin.dt.toLong * 1000, DateTimeZone.UTC).toString TransformedWeather(origin.main, origin.wind, origin.clouds, origin.rain, origin.snow, origin.weather, time) } } /** * Copy of `com.snowplowanalytics.weather.providers.openweather.Responses.Weather` intended to * execute typesafe (as opposed to JSON) transformation */ private[enrichments] case class TransformedWeather(main: MainInfo, wind: Wind, clouds: Clouds, rain: Option[Rain], snow: Option[Snow], weather: List[WeatherCondition], dt: String)
RetentionGrid/snowplow
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/WeatherEnrichment.scala
Scala
apache-2.0
7,498
package codecheck.github.transport.asynchttp20 import org.asynchttpclient.{AsyncHttpClient, Response => AsyncHttpResponse, AsyncCompletionHandler, BoundRequestBuilder} import codecheck.github.transport.{Transport, Request, Response, CompletionHandler} class AsyncHttp20Transport(client: AsyncHttpClient) extends Transport{ def prepareGet(url: String): Request = new AsyncHttp20Request(client.prepareGet(url)) def preparePost(url: String): Request = new AsyncHttp20Request(client.preparePost(url)) def preparePut(url: String): Request = new AsyncHttp20Request(client.preparePut(url)) def prepareDelete(url: String): Request = new AsyncHttp20Request(client.prepareDelete(url)) def close: Unit = client.close() } class AsyncHttp20Request(request: BoundRequestBuilder) extends Request { def setBody(body: String): Request = { request.setBody(body) this } def setHeader(name: String, value: String): Request = { request.setHeader(name, value) this } def setFollowRedirect(b: Boolean): Request = { request.setFollowRedirect(b) this } def addFormParam(name: String, value: String): Request = { request.addFormParam(name, value) this } def execute(handler: CompletionHandler): Unit = { request.execute(new AsyncCompletionHandler[AsyncHttpResponse]() { def onCompleted(res: AsyncHttpResponse) = { handler.onCompleted(new AsyncHttp20Response(res)) res } override def onThrowable(t: Throwable): Unit = { handler.onThrowable(t) super.onThrowable(t) } }) } } class AsyncHttp20Response(response: AsyncHttpResponse) extends Response { def getResponseBody: Option[String] = Option(response.getResponseBody()) def getStatusCode: Int = response.getStatusCode }
code-check/github-api-scala
src/main/scala/codecheck/github/transport/asynchttp20/AsyncHttp20Transport.scala
Scala
mit
1,797
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package unittest.io.pivotal.gemfire.spark.connector import com.gemstone.gemfire.cache.Region import io.pivotal.gemfire.spark.connector.{GemFireConnection, GemFireConnectionConf} import org.apache.spark.rdd.RDD import org.apache.spark.streaming.dstream.DStream import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{Matchers, FunSuite} import org.mockito.Matchers.{eq => mockEq, any => mockAny} import scala.reflect.ClassTag class GemFireDStreamFunctionsTest extends FunSuite with Matchers with MockitoSugar { test("test GemFirePairDStreamFunctions Implicit") { import io.pivotal.gemfire.spark.connector.streaming._ val mockDStream = mock[DStream[(Int, String)]] // the implicit make the following line valid val pairDStream: GemFirePairDStreamFunctions[Int, String] = mockDStream pairDStream shouldBe a[GemFirePairDStreamFunctions[_, _]] } test("test GemFireDStreamFunctions Implicit") { import io.pivotal.gemfire.spark.connector.streaming._ val mockDStream = mock[DStream[String]] // the implicit make the following line valid val dstream: GemFireDStreamFunctions[String] = mockDStream dstream shouldBe a[GemFireDStreamFunctions[_]] } def createMocks[K, V](regionPath: String) (implicit kt: ClassTag[K], vt: ClassTag[V], m: Manifest[Region[K, V]]) : (String, GemFireConnectionConf, GemFireConnection, Region[K, V]) = { val mockConnection = mock[GemFireConnection] val mockConnConf = mock[GemFireConnectionConf] val mockRegion = mock[Region[K, V]] when(mockConnConf.getConnection).thenReturn(mockConnection) when(mockConnConf.locators).thenReturn(Seq.empty) (regionPath, mockConnConf, mockConnection, mockRegion) } test("test GemFirePairDStreamFunctions.saveToGemfire()") { import io.pivotal.gemfire.spark.connector.streaming._ val (regionPath, mockConnConf, mockConnection, mockRegion) = createMocks[String, String]("test") val mockDStream = mock[DStream[(String, String)]] mockDStream.saveToGemfire(regionPath, mockConnConf) verify(mockConnConf).getConnection verify(mockConnection).validateRegion[String, String](regionPath) verify(mockDStream).foreachRDD(mockAny[(RDD[(String, String)]) => Unit]) } test("test GemFireDStreamFunctions.saveToGemfire()") { import io.pivotal.gemfire.spark.connector.streaming._ val (regionPath, mockConnConf, mockConnection, mockRegion) = createMocks[String, Int]("test") val mockDStream = mock[DStream[String]] mockDStream.saveToGemfire[String, Int](regionPath, (s: String) => (s, s.length), mockConnConf) verify(mockConnConf).getConnection verify(mockConnection).validateRegion[String, String](regionPath) verify(mockDStream).foreachRDD(mockAny[(RDD[String]) => Unit]) } }
sshcherbakov/incubator-geode
gemfire-spark-connector/gemfire-spark-connector/src/test/scala/unittest/io/pivotal/gemfire/spark/connector/GemFireDStreamFunctionsTest.scala
Scala
apache-2.0
3,605
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.api.scala.function.util import org.apache.flink.api.common.functions.RuntimeContext import org.apache.flink.configuration.Configuration import org.apache.flink.streaming.api.functions.windowing.{ProcessWindowFunction => JProcessWindowFunction} import org.apache.flink.streaming.api.functions.windowing.{RichProcessWindowFunction => JRichProcessWindowFunction} import org.apache.flink.streaming.api.functions.windowing.{RichProcessAllWindowFunction => JRichProcessAllWindowFunction} import org.apache.flink.streaming.api.functions.windowing.{ProcessAllWindowFunction => JProcessAllWindowFunction} import org.apache.flink.streaming.api.scala.function.{ProcessWindowFunction => ScalaProcessWindowFunction} import org.apache.flink.streaming.api.scala.function.{ProcessAllWindowFunction => ScalaProcessAllWindowFunction} import org.apache.flink.streaming.api.scala.function.{RichProcessWindowFunction => ScalaRichProcessWindowFunction} import org.apache.flink.streaming.api.scala.function.{RichProcessAllWindowFunction => ScalaRichProcessAllWindowFunction} import org.apache.flink.streaming.api.windowing.windows.Window import org.apache.flink.util.Collector import scala.collection.JavaConverters._ /** * A wrapper function that exposes a Scala ProcessWindowFunction * as a ProcessWindowFunction function. * * The Scala and Java Window functions differ in their type of "Iterable": * - Scala WindowFunction: scala.Iterable * - Java WindowFunction: java.lang.Iterable */ final class ScalaProcessWindowFunctionWrapper[IN, OUT, KEY, W <: Window]( private[this] val func: ScalaProcessWindowFunction[IN, OUT, KEY, W]) extends JRichProcessWindowFunction[IN, OUT, KEY, W] { override def process( key: KEY, context: JProcessWindowFunction[IN, OUT, KEY, W]#Context, elements: java.lang.Iterable[IN], out: Collector[OUT]): Unit = { val ctx = new func.Context { override def window = context.window override def currentProcessingTime = context.currentProcessingTime override def currentWatermark = context.currentWatermark override def windowState = context.windowState() override def globalState = context.globalState() } func.process(key, ctx, elements.asScala, out) } override def clear(context: JProcessWindowFunction[IN, OUT, KEY, W]#Context): Unit = { val ctx = new func.Context { override def window = context.window override def currentProcessingTime = context.currentProcessingTime override def currentWatermark = context.currentWatermark override def windowState = context.windowState() override def globalState = context.globalState() } func.clear(ctx) } override def setRuntimeContext(t: RuntimeContext): Unit = { super.setRuntimeContext(t) func match { case rfunc: ScalaRichProcessWindowFunction[IN, OUT, KEY, W] => rfunc.setRuntimeContext(t) case _ => } } override def open(parameters: Configuration): Unit = { super.open(parameters) func match { case rfunc: ScalaRichProcessWindowFunction[IN, OUT, KEY, W] => rfunc.open(parameters) case _ => } } override def close(): Unit = { super.close() func match { case rfunc: ScalaRichProcessWindowFunction[IN, OUT, KEY, W] => rfunc.close() case _ => } } } /** * A wrapper function that exposes a Scala ProcessWindowFunction * as a ProcessWindowFunction function. * * The Scala and Java Window functions differ in their type of "Iterable": * - Scala WindowFunction: scala.Iterable * - Java WindowFunction: java.lang.Iterable */ final class ScalaProcessAllWindowFunctionWrapper[IN, OUT, W <: Window]( private[this] val func: ScalaProcessAllWindowFunction[IN, OUT, W]) extends JRichProcessAllWindowFunction[IN, OUT, W] { override def process( context: JProcessAllWindowFunction[IN, OUT, W]#Context, elements: java.lang.Iterable[IN], out: Collector[OUT]): Unit = { val ctx = new func.Context { override def window = context.window override def windowState = context.windowState() override def globalState = context.globalState() } func.process(ctx, elements.asScala, out) } override def clear(context: JProcessAllWindowFunction[IN, OUT, W]#Context): Unit = { val ctx = new func.Context { override def window = context.window override def windowState = context.windowState() override def globalState = context.globalState() } func.clear(ctx) } override def setRuntimeContext(t: RuntimeContext): Unit = { super.setRuntimeContext(t) func match { case rfunc : ScalaRichProcessAllWindowFunction[IN, OUT, W] => rfunc.setRuntimeContext(t) case _ => } } override def open(parameters: Configuration): Unit = { super.open(parameters) func match { case rfunc : ScalaRichProcessAllWindowFunction[IN, OUT, W] => rfunc.open(parameters) case _ => } } override def close(): Unit = { super.close() func match { case rfunc : ScalaRichProcessAllWindowFunction[IN, OUT, W] => rfunc.close() case _ => } } }
hwstreaming/flink
flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/function/util/ScalaProcessWindowFunctionWrapper.scala
Scala
apache-2.0
6,014
package org.jetbrains.plugins.scala package debugger.evaluation.evaluator import java.io.File import java.net.URI import java.util import com.intellij.codeInsight.CodeInsightUtilCore import com.intellij.debugger.engine.evaluation._ import com.intellij.debugger.engine.evaluation.expression.{ExpressionEvaluator, Modifier} import com.intellij.debugger.engine.{ContextUtil, DebugProcess, SuspendContextImpl} import com.intellij.debugger.jdi.VirtualMachineProxyImpl import com.intellij.debugger.{DebuggerInvocationUtil, EvaluatingComputable} import com.intellij.openapi.module.ModuleUtilCore import com.intellij.openapi.project.Project import com.intellij.openapi.util.io.FileUtil import com.intellij.psi.{PsiElement, PsiFileFactory} import com.sun.jdi._ import org.jetbrains.plugins.scala.debugger.evaluation._ import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlock, ScBlockStatement} import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunctionDefinition import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory import scala.annotation.tailrec import scala.collection.JavaConverters._ /** * Nikolay.Tropin * 2014-10-09 */ class ScalaCompilingEvaluator(psiContext: PsiElement, fragment: ScalaCodeFragment) extends ExpressionEvaluator { import org.jetbrains.plugins.scala.debugger.evaluation.evaluator.ScalaCompilingEvaluator._ private val project = inReadAction(psiContext.getProject) private val generatedClass = GeneratedClass(fragment, psiContext) private var classLoader: ClassLoaderReference = null override def getValue: Value = null override def getModifier: Modifier = null override def evaluate(evaluationContext: EvaluationContext): Value = { val process: DebugProcess = evaluationContext.getDebugProcess try { if (classLoader == null || classLoader.isCollected) classLoader = getClassLoader(evaluationContext) } catch { case e: Exception => throw new EvaluateException("Error creating evaluation class loader:\\n " + e, e) } try { defineClasses(generatedClass.compiledClasses, evaluationContext, process, classLoader) } catch { case e: Exception => throw new EvaluateException("Error during classes definition:\\n " + e, e) } try { val evaluator = callEvaluator(evaluationContext) evaluationContext.asInstanceOf[EvaluationContextImpl].setClassLoader(classLoader) evaluator.evaluate(evaluationContext) } catch { case e: Exception => throw new EvaluateException("Error during generated code invocation:\\n " + e, e) } } private def callEvaluator(evaluationContext: EvaluationContext): ExpressionEvaluator = { DebuggerInvocationUtil.commitAndRunReadAction(project, new EvaluatingComputable[ExpressionEvaluator] { override def compute(): ExpressionEvaluator = { val callCode = new TextWithImportsImpl(CodeFragmentKind.CODE_BLOCK, generatedClass.callText) val codeFragment = new ScalaCodeFragmentFactory().createCodeFragment(callCode, generatedClass.getAnchor, project) ScalaEvaluatorBuilder.build(codeFragment, ContextUtil.getSourcePosition(evaluationContext)) } }) } private def defineClasses(classes: Seq[OutputFileObject], context: EvaluationContext, process: DebugProcess, classLoader: ClassLoaderReference): Unit = { if (classes.isEmpty) throw EvaluationException("Could not compile generated class") val proxy: VirtualMachineProxyImpl = process.getVirtualMachineProxy.asInstanceOf[VirtualMachineProxyImpl] def alreadyDefined(clsName: String) = { proxy.classesByName(clsName).asScala.exists(refType => refType.isPrepared) } val classLoaderType = classLoader.referenceType.asInstanceOf[ClassType] val defineMethod: Method = classLoaderType.concreteMethodByName("defineClass", "(Ljava/lang/String;[BII)Ljava/lang/Class;") for (cls <- classes if !alreadyDefined(cls.origName)) { val bytes: Array[Byte] = cls.toByteArray val args: util.ArrayList[Value] = new util.ArrayList[Value] val name: StringReference = proxy.mirrorOf(cls.origName) keep(name, context) args.add(name) args.add(mirrorOf(bytes, context, process)) args.add(proxy.mirrorOf(0)) args.add(proxy.mirrorOf(bytes.length)) process.invokeMethod(context, classLoader, defineMethod, args) process.findClass(context, cls.origName, classLoader) } } private def getClassLoader(context: EvaluationContext): ClassLoaderReference = { val process = context.getDebugProcess val loaderClass = process.findClass(context, "java.net.URLClassLoader", context.getClassLoader).asInstanceOf[ClassType] val ctorMethod = loaderClass.concreteMethodByName("<init>", "([Ljava/net/URL;Ljava/lang/ClassLoader;)V") val threadReference: ThreadReference = context.getSuspendContext.getThread.getThreadReference val args = util.Arrays.asList(createURLArray(context), context.getClassLoader) val reference = loaderClass.newInstance(threadReference, ctorMethod, args, ClassType.INVOKE_SINGLE_THREADED) .asInstanceOf[ClassLoaderReference] keep(reference, context) reference } } object ScalaCompilingEvaluator { private def keep(reference: ObjectReference, context: EvaluationContext) { context.getSuspendContext.asInstanceOf[SuspendContextImpl].keep(reference) } private def createURLArray(context: EvaluationContext): ArrayReference = { val process = context.getDebugProcess val arrayType = process.findClass(context, "java.net.URL[]", context.getClassLoader).asInstanceOf[ArrayType] val arrayRef = arrayType.newInstance(1) keep(arrayRef, context) val classType = process.findClass(context, "java.net.URL", context.getClassLoader).asInstanceOf[ClassType] val proxy: VirtualMachineProxyImpl = process.getVirtualMachineProxy.asInstanceOf[VirtualMachineProxyImpl] val threadReference: ThreadReference = context.getSuspendContext.getThread.getThreadReference val url = proxy.mirrorOf("file:a") keep(url, context) val ctorMethod = classType.concreteMethodByName("<init>", "(Ljava/lang/String;)V") val reference = classType.newInstance(threadReference, ctorMethod, util.Arrays.asList(url), ClassType.INVOKE_SINGLE_THREADED) keep(reference, context) arrayRef.setValues(util.Arrays.asList(reference)) arrayRef } private def mirrorOf(bytes: Array[Byte], context: EvaluationContext, process: DebugProcess): ArrayReference = { val arrayClass: ArrayType = process.findClass(context, "byte[]", context.getClassLoader).asInstanceOf[ArrayType] val reference: ArrayReference = process.newInstance(arrayClass, bytes.length) keep(reference, context) bytes.zipWithIndex.foreach { case (b, i) => reference.setValue(i, process.getVirtualMachineProxy.asInstanceOf[VirtualMachineProxyImpl].mirrorOf(bytes(i))) case _ => } reference } } class OutputFileObject(file: File, val origName: String) { private def getUri(name: String): URI = { URI.create("memo:///" + name.replace('.', '/') + ".class") } def getName: String = getUri(origName).getPath def toByteArray: Array[Byte] = FileUtil.loadFileBytes(file) } private class GeneratedClass(fragment: ScalaCodeFragment, context: PsiElement, id: Int) { private val project: Project = context.getProject val generatedClassName = "GeneratedEvaluatorClass$" + id val generatedMethodName = "invoke" val callText = s"new $generatedClassName().$generatedMethodName()" var compiledClasses: Seq[OutputFileObject] = null private var anchor: PsiElement = null def getAnchor = anchor init() private def init(): Unit = { val file = context.getContainingFile val copy = PsiFileFactory.getInstance(project).createFileFromText(file.getName, file.getFileType, file.getText, file.getModificationStamp, false) val range = context.getTextRange val copyContext: PsiElement = CodeInsightUtilCore.findElementInRange(copy, range.getStartOffset, range.getEndOffset, context.getClass, file.getLanguage) if (copyContext == null) throw EvaluationException("Could not evaluate due to a change in a source file") val clazz = localClass(fragment, copyContext) addLocalClass(copyContext, clazz) compileGeneratedClass(copy.getText) } private def compileGeneratedClass(fileText: String): Unit = { val module = inReadAction(ModuleUtilCore.findModuleForPsiElement(context)) if (module == null) throw EvaluationException("Could not evaluate due to a change in a source file") val helper = EvaluatorCompileHelper.EP_NAME.getExtensions.headOption.getOrElse { ScalaEvaluatorCompileHelper.instance(project) } val compiled = helper.compile(fileText, module) compiledClasses = compiled.collect { case (f, name) if name.contains(generatedClassName) => new OutputFileObject(f, name) } } private def addLocalClass(context: PsiElement, scClass: ScClass): Unit = { @tailrec def findAnchorAndParent(elem: PsiElement): (ScBlockStatement, PsiElement) = elem match { case (stmt: ScBlockStatement) childOf (b: ScBlock) => (stmt, b) case (stmt: ScBlockStatement) childOf (funDef: ScFunctionDefinition) if funDef.body == Some(stmt) => (stmt, funDef) case (elem: PsiElement) childOf (other: ScBlockStatement) => findAnchorAndParent(other) case (stmt: ScBlockStatement) childOf (nonExpr: PsiElement) => (stmt, nonExpr) case _ => throw EvaluationException("Could not compile local class in this context") } var (prevParent, parent) = findAnchorAndParent(context) val needBraces = parent match { case _: ScBlock | _: ScTemplateBody => false case _ => true } if (needBraces) { val newBlock = ScalaPsiElementFactory.createExpressionWithContextFromText(s"{\\n${prevParent.getText}\\n}", prevParent.getContext, prevParent) parent = prevParent.replace(newBlock) parent match { case bl: ScBlock => anchor = bl.statements(0) case _ => throw EvaluationException("Could not compile local class in this context") } } else { anchor = prevParent } val newInstance = ScalaPsiElementFactory.createExpressionWithContextFromText(s"new $generatedClassName()", anchor.getContext, anchor) parent.addBefore(scClass, anchor) parent.addBefore(ScalaPsiElementFactory.createNewLine(context.getManager), anchor) parent.addBefore(newInstance, anchor) parent.addBefore(ScalaPsiElementFactory.createNewLine(context.getManager), anchor) } private def localClass(fragment: ScalaCodeFragment, context: PsiElement) = { val fragmentImports = fragment.importsToString().split(",").filter(!_.isEmpty).map("import _root_." + _) val importsText = fragmentImports.mkString("\\n") //todo type parameters? val text = s"""|class $generatedClassName { | def $generatedMethodName() = { | $importsText | | ${fragment.getText} | } |}""".stripMargin ScalaPsiElementFactory.createTemplateDefinitionFromText(text, context.getContext, context).asInstanceOf[ScClass] } } private object GeneratedClass { var counter = 0 def apply(fragment: ScalaCodeFragment, context: PsiElement) = { counter += 1 new GeneratedClass(fragment, context, counter) } }
triggerNZ/intellij-scala
src/org/jetbrains/plugins/scala/debugger/evaluation/evaluator/ScalaCompilingEvaluator.scala
Scala
apache-2.0
11,631
package typeclass.syntax import typeclass.Functor object functor { // implicit class FunctorOps }
julien-truffaut/Typeclass
exercise/src/main/scala/typeclass/syntax/functor.scala
Scala
mit
101
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ // GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp. package scala /** A function of 16 parameters. * */ trait Function16[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, +R] extends AnyRef { self => /** Apply the body of this function to the arguments. * @return the result of function application. */ def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16): R /** Creates a curried version of this function. * * @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)` */ @annotation.unspecialized def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => R = { (x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)).curried } /** Creates a tupled version of this function: instead of 16 arguments, * it accepts a single [[scala.Tuple16]] argument. * * @return a function `f` such that `f((x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)) == f(Tuple16(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)` */ @annotation.unspecialized def tupled: ((T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)) => R = { case ((x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)) => apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16) } override def toString(): String = "<function16>" }
lrytz/scala
src/library/scala/Function16.scala
Scala
apache-2.0
2,241
package pl.touk.nussknacker.engine.api.context.transformation sealed trait NodeDependencyValue case class OutputVariableNameValue(name: String) extends NodeDependencyValue case class TypedNodeDependencyValue(value: Any) extends NodeDependencyValue
TouK/nussknacker
components-api/src/main/scala/pl/touk/nussknacker/engine/api/context/transformation/NodeDependencyValue.scala
Scala
apache-2.0
250
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.compiler.v2_3.helpers import java.lang.{Iterable => JavaIterable} import java.util.{Map => JavaMap} import scala.collection.JavaConverters._ import scala.collection.{Map, Seq} object IsCollection extends CollectionSupport { def unapply(x: Any):Option[Iterable[Any]] = { val collection = isCollection(x) if (collection) { Some(makeTraversable(x)) } else { None } } } trait CollectionSupport { def singleOr[T](in:Iterator[T], or: => Exception):Iterator[T] = new Iterator[T] { var used = false def hasNext: Boolean = in.hasNext def next(): T = { if(used) { throw or } used = true in.next() } } class NoValidValuesExceptions extends Exception def isCollection(x: Any) = castToIterable.isDefinedAt(x) def liftAsCollection[T](test: PartialFunction[Any, T])(input: Any): Option[Iterable[T]] = try { input match { case single if test.isDefinedAt(single) => Some(Seq(test(single))) case IsCollection(coll) => val mappedCollection = coll map { case elem if test.isDefinedAt(elem) => test(elem) case _ => throw new NoValidValuesExceptions } Some(mappedCollection) case _ => None } } catch { case _: NoValidValuesExceptions => None } def asCollectionOf[T](test: PartialFunction[Any, T])(input: Iterable[Any]): Option[Iterable[T]] = Some(input map { (elem: Any) => if (test.isDefinedAt(elem)) test(elem) else return None }) def makeTraversable(z: Any): Iterable[Any] = if (castToIterable.isDefinedAt(z)) { castToIterable(z) } else { if (z == null) Iterable() else Iterable(z) } protected def castToIterable: PartialFunction[Any, Iterable[Any]] = { case x: Array[_] => x case x: Map[_, _] => Iterable(x) case x: JavaMap[_, _] => Iterable(x.asScala) case x: Traversable[_] => x.toIterable case x: JavaIterable[_] => x.asScala.map { case y: JavaMap[_, _] => y.asScala case y => y } } implicit class RichSeq[T](inner: Seq[T]) { def foldMap[A](acc: A)(f: (A, T) => (A, T)): (A, Seq[T]) = { val builder = Seq.newBuilder[T] var current = acc for (element <- inner) { val (newAcc, newElement) = f(current, element) current = newAcc builder += newElement } (current, builder.result()) } def asNonEmptyOption = if (inner.isEmpty) None else Some(inner) } }
HuangLS/neo4j
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/helpers/CollectionSupport.scala
Scala
apache-2.0
3,342
package com.twitter_typesafe.config.impl import org.junit.Assert._ import org.junit.Test class ConfigNodeTest extends TestUtils { private def singleTokenNodeTest(token: Token) { val node = configNodeSingleToken(token); assertEquals(node.render(), token.tokenText()) } private def keyNodeTest(path: String) { val node = configNodeKey(path) assertEquals(path, node.render()) } private def simpleValueNodeTest(token: Token) { val node = configNodeSimpleValue(token) assertEquals(node.render(), token.tokenText()) } private def fieldNodeTest(key: ConfigNodePath, value: AbstractConfigNodeValue, newValue: AbstractConfigNodeValue) { val keyValNode = nodeKeyValuePair(key, value) assertEquals(key.render() + " : " + value.render(), keyValNode.render()) assertEquals(key.render, keyValNode.path().render()) assertEquals(value.render, keyValNode.value().render()) val newKeyValNode = keyValNode.replaceValue(newValue) assertEquals(key.render() + " : " + newValue.render(), newKeyValNode.render()) assertEquals(newValue.render(), newKeyValNode.value().render()) } private def topLevelValueReplaceTest(value: AbstractConfigNodeValue, newValue: AbstractConfigNodeValue, key: String = "foo") { val complexNodeChildren = List(nodeOpenBrace, nodeKeyValuePair(configNodeKey(key), value), nodeCloseBrace) val complexNode = configNodeObject(complexNodeChildren) val newNode = complexNode.setValueOnPath(key, newValue) val origText = "{" + key + " : " + value.render() + "}" val finalText = "{" + key + " : " + newValue.render() + "}" assertEquals(origText, complexNode.render()) assertEquals(finalText, newNode.render()) } private def replaceDuplicatesTest(value1: AbstractConfigNodeValue, value2: AbstractConfigNodeValue, value3: AbstractConfigNodeValue) { val key = configNodeKey("foo") val keyValPair1 = nodeKeyValuePair(key, value1) val keyValPair2 = nodeKeyValuePair(key, value2) val keyValPair3 = nodeKeyValuePair(key, value3) val complexNode = configNodeObject(List(keyValPair1, keyValPair2, keyValPair3)) val origText = keyValPair1.render() + keyValPair2.render() + keyValPair3.render() val finalText = key.render() + " : 15" assertEquals(origText, complexNode.render()) assertEquals(finalText, complexNode.setValueOnPath("foo", nodeInt(15)).render()) } private def nonExistentPathTest(value: AbstractConfigNodeValue) { val node = configNodeObject(List(nodeKeyValuePair(configNodeKey("bar"), nodeInt(15)))) assertEquals("bar : 15", node.render()) val newNode = node.setValueOnPath("foo", value) val finalText = "bar : 15, foo : " + value.render() assertEquals(finalText, newNode.render()) } @Test def createBasicConfigNode() { //Ensure a ConfigNodeSingleToken can handle all its required token types singleTokenNodeTest(Tokens.START) singleTokenNodeTest(Tokens.END) singleTokenNodeTest(Tokens.OPEN_CURLY) singleTokenNodeTest(Tokens.CLOSE_CURLY) singleTokenNodeTest(Tokens.OPEN_SQUARE) singleTokenNodeTest(Tokens.CLOSE_SQUARE) singleTokenNodeTest(Tokens.COMMA) singleTokenNodeTest(Tokens.EQUALS) singleTokenNodeTest(Tokens.COLON) singleTokenNodeTest(Tokens.PLUS_EQUALS) singleTokenNodeTest(tokenUnquoted(" ")) singleTokenNodeTest(tokenWhitespace(" ")) singleTokenNodeTest(tokenLine(1)) singleTokenNodeTest(tokenCommentDoubleSlash(" this is a double slash comment ")) singleTokenNodeTest(tokenCommentHash(" this is a hash comment ")) } @Test def createConfigNodeSetting() { //Ensure a ConfigNodeSetting can handle the normal key types keyNodeTest("foo") keyNodeTest("\"Hello I am a key how are you today\"") } @Test def pathNodeSubpath() { val origPath = "a.b.c.\"@$%#@!@#$\".\"\".1234.5678" val pathNode = configNodeKey(origPath) assertEquals(origPath, pathNode.render()) assertEquals("c.\"@$%#@!@#$\".\"\".1234.5678", pathNode.subPath(2).render()) assertEquals("5678", pathNode.subPath(6).render()) } @Test def createConfigNodeSimpleValue() { //Ensure a ConfigNodeSimpleValue can handle the normal value types simpleValueNodeTest(tokenInt(10)) simpleValueNodeTest(tokenLong(10000)) simpleValueNodeTest(tokenDouble(3.14159)) simpleValueNodeTest(tokenFalse) simpleValueNodeTest(tokenTrue) simpleValueNodeTest(tokenNull) simpleValueNodeTest(tokenString("Hello my name is string")) simpleValueNodeTest(tokenUnquoted("mynameisunquotedstring")) simpleValueNodeTest(tokenKeySubstitution("c.d")) simpleValueNodeTest(tokenOptionalSubstitution(tokenUnquoted("x.y"))) simpleValueNodeTest(tokenSubstitution(tokenUnquoted("a.b"))) } @Test def createConfigNodeField() { // Supports Quoted and Unquoted keys fieldNodeTest(configNodeKey("\"abc\""), nodeInt(123), nodeInt(245)) fieldNodeTest(configNodeKey("abc"), nodeInt(123), nodeInt(245)) // Can replace value with values of different types fieldNodeTest(configNodeKey("\"abc\""), nodeInt(123), nodeString("I am a string")) fieldNodeTest(configNodeKey("\"abc\""), nodeInt(123), configNodeObject(List(nodeOpenBrace, nodeCloseBrace))) } @Test def replaceNodes() { //Ensure simple values can be replaced by other simple values topLevelValueReplaceTest(nodeInt(10), nodeInt(15)) topLevelValueReplaceTest(nodeLong(10000), nodeInt(20)) topLevelValueReplaceTest(nodeDouble(3.14159), nodeLong(10000)) topLevelValueReplaceTest(nodeFalse, nodeTrue) topLevelValueReplaceTest(nodeTrue, nodeNull) topLevelValueReplaceTest(nodeNull, nodeString("Hello my name is string")) topLevelValueReplaceTest(nodeString("Hello my name is string"), nodeUnquotedText("mynameisunquotedstring")) topLevelValueReplaceTest(nodeUnquotedText("mynameisunquotedstring"), nodeKeySubstitution("c.d")) topLevelValueReplaceTest(nodeInt(10), nodeOptionalSubstitution(tokenUnquoted("x.y"))) topLevelValueReplaceTest(nodeInt(10), nodeSubstitution(tokenUnquoted("a.b"))) topLevelValueReplaceTest(nodeSubstitution(tokenUnquoted("a.b")), nodeInt(10)) // Ensure arrays can be replaced val array = configNodeArray(List(nodeOpenBracket, nodeInt(10), nodeSpace, nodeComma, nodeSpace, nodeInt(15), nodeCloseBracket)) topLevelValueReplaceTest(nodeInt(10), array) topLevelValueReplaceTest(array, nodeInt(10)) topLevelValueReplaceTest(array, configNodeObject(List(nodeOpenBrace, nodeCloseBrace))) // Ensure objects can be replaced val nestedMap = configNodeObject(List(nodeOpenBrace, nodeKeyValuePair(configNodeKey("abc"), configNodeSimpleValue(tokenString("a string"))), nodeCloseBrace)) topLevelValueReplaceTest(nestedMap, nodeInt(10)) topLevelValueReplaceTest(nodeInt(10), nestedMap) topLevelValueReplaceTest(array, nestedMap) topLevelValueReplaceTest(nestedMap, array) topLevelValueReplaceTest(nestedMap, configNodeObject(List(nodeOpenBrace, nodeCloseBrace))) // Ensure concatenations can be replaced val concatenation = configNodeConcatenation(List(nodeInt(10), nodeSpace, nodeString("Hello"))) topLevelValueReplaceTest(concatenation, nodeInt(12)) topLevelValueReplaceTest(nodeInt(12), concatenation) topLevelValueReplaceTest(nestedMap, concatenation) topLevelValueReplaceTest(concatenation, nestedMap) topLevelValueReplaceTest(array, concatenation) topLevelValueReplaceTest(concatenation, array) //Ensure a key with format "a.b" will be properly replaced topLevelValueReplaceTest(nodeInt(10), nestedMap, "foo.bar") } @Test def removeDuplicates() { val emptyMapNode = configNodeObject(List(nodeOpenBrace, nodeCloseBrace)) val emptyArrayNode = configNodeArray(List(nodeOpenBracket, nodeCloseBracket)) //Ensure duplicates of a key are removed from a map replaceDuplicatesTest(nodeInt(10), nodeTrue, nodeNull) replaceDuplicatesTest(emptyMapNode, emptyMapNode, emptyMapNode) replaceDuplicatesTest(emptyArrayNode, emptyArrayNode, emptyArrayNode) replaceDuplicatesTest(nodeInt(10), emptyMapNode, emptyArrayNode) } @Test def addNonExistentPaths() { nonExistentPathTest(nodeInt(10)) nonExistentPathTest(configNodeArray(List(nodeOpenBracket, nodeInt(15), nodeCloseBracket))) nonExistentPathTest(configNodeObject(List(nodeOpenBrace, nodeKeyValuePair(configNodeKey("foo"), nodeDouble(3.14)), nodeCloseBrace))) } @Test def replaceNestedNodes() { // Test that all features of node replacement in a map work in a complex map containing nested maps val origText = "foo : bar\nbaz : {\n\t\"abc.def\" : 123\n\t//This is a comment about the below setting\n\n\tabc : {\n\t\t" + "def : \"this is a string\"\n\t\tghi : ${\"a.b\"}\n\t}\n}\nbaz.abc.ghi : 52\nbaz.abc.ghi : 53\n}" val lowestLevelMap = configNodeObject(List(nodeOpenBrace, nodeLine(6), nodeWhitespace("\t\t"), nodeKeyValuePair(configNodeKey("def"), configNodeSimpleValue(tokenString("this is a string"))), nodeLine(7), nodeWhitespace("\t\t"), nodeKeyValuePair(configNodeKey("ghi"), configNodeSimpleValue(tokenKeySubstitution("a.b"))), nodeLine(8), nodeWhitespace("\t"), nodeCloseBrace)) val higherLevelMap = configNodeObject(List(nodeOpenBrace, nodeLine(2), nodeWhitespace("\t"), nodeKeyValuePair(configNodeKey("\"abc.def\""), configNodeSimpleValue(tokenInt(123))), nodeLine(3), nodeWhitespace("\t"), nodeCommentDoubleSlash(("This is a comment about the below setting")), nodeLine(4), nodeLine(5), nodeWhitespace("\t"), nodeKeyValuePair(configNodeKey("abc"), lowestLevelMap), nodeLine(9), nodeCloseBrace)) val origNode = configNodeObject(List(nodeKeyValuePair(configNodeKey("foo"), configNodeSimpleValue(tokenUnquoted("bar"))), nodeLine(1), nodeKeyValuePair(configNodeKey("baz"), higherLevelMap), nodeLine(10), nodeKeyValuePair(configNodeKey("baz.abc.ghi"), configNodeSimpleValue(tokenInt(52))), nodeLine(11), nodeKeyValuePair(configNodeKey("baz.abc.ghi"), configNodeSimpleValue(tokenInt(53))), nodeLine(12), nodeCloseBrace)) assertEquals(origText, origNode.render()) val finalText = "foo : bar\nbaz : {\n\t\"abc.def\" : true\n\t//This is a comment about the below setting\n\n\tabc : {\n\t\t" + "def : false\n\t\t\n\t\t\"this.does.not.exist@@@+$#\" : {\n\t\t end : doesnotexist\n\t\t}\n\t}\n}\n\nbaz.abc.ghi : randomunquotedString\n}" //Can replace settings in nested maps // Paths with quotes in the name are treated as a single Path, rather than multiple sub-paths var newNode = origNode.setValueOnPath("baz.\"abc.def\"", configNodeSimpleValue(tokenTrue)) newNode = newNode.setValueOnPath("baz.abc.def", configNodeSimpleValue(tokenFalse)) // Repeats are removed from nested maps newNode = newNode.setValueOnPath("baz.abc.ghi", configNodeSimpleValue(tokenUnquoted("randomunquotedString"))) // Missing paths are added to the top level if they don't appear anywhere, including in nested maps newNode = newNode.setValueOnPath("baz.abc.\"this.does.not.exist@@@+$#\".end", configNodeSimpleValue(tokenUnquoted("doesnotexist"))) // The above operations cause the resultant map to be rendered properly assertEquals(finalText, newNode.render()) } }
twitter-forks/config
config/src/test/scala/com/typesafe/config/impl/ConfigNodeTest.scala
Scala
apache-2.0
12,100
package org.bowlerframework.view.scalate import reflect.BeanProperty import org.bowlerframework.{RequestScope, Request} import collection.mutable.HashMap /** * Retrieves a Template based on a request and it's contents, headers and/or path. * the order of layouts and templates added matters, as the selectors are held in order and the first match will return a result. <br/> * Also holds Scalate configurations, such as TemplateResolver, preference order of Scalate template types (ssp, jade, mustache etc), root packages/folders for views and layouts etc. */ object TemplateRegistry { var templateTypePreference = List(".mustache", ".ssp", ".jade", ".scaml") var templateResolver: TemplateResolver = new ClasspathTemplateResolver var rootViewPackageOrFolder = "/views" var rootLayoutPackageOrFolder = "/layouts" val controllerLayouts = new HashMap[Class[_], Layout] var defaultLayout: Function1[Request, Option[Layout]] = {(request) => None} var suffixResolver: Function1[Request, List[String]] = {(request) => List[String]()} def getSuffixes(request: Request): List[String] = suffixResolver(request) }
rkpandey/Bowler
core/src/main/scala/org/bowlerframework/view/scalate/TemplateRegistry.scala
Scala
bsd-3-clause
1,134
package com.xenopsconsulting.gamedayapi.batch import org.slf4j.LoggerFactory import net.noerd.prequel._ import net.noerd.prequel.DatabaseConfig import scala.Some import com.xenopsconsulting.gamedayapi.Game object MySQLDatabaseImporter { private val _log = LoggerFactory.getLogger(getClass) val database = DatabaseConfig( driver = "com.mysql.jdbc.Driver", jdbcURL = "jdbc:mysql://localhost:3306/scala_gameday_api", username = "gameday" ) def createTables() { _log.info("Recreating pitch table") database.transaction { tx => tx.execute("DROP TABLE IF EXISTS pitches;") tx.execute("CREATE TABLE pitches (gid varchar(40), year int, inning int, half varchar(6), at_bat_num int, at_bat_b int, " + "at_bat_s int, at_bat_o int, at_bat_start_tfs int, batter int, stand char(1), b_height varchar(4), pitcher int, " + "p_throws char(1), at_bat_des varchar(400), at_bat_event varchar(20), pitch_des varchar(40), pitch_id int, " + "pitch_type char(2), type_confidence DECIMAL(4, 3), pitch_tfs int, pitch_x DECIMAL(5, 2), pitch_y DECIMAL(5, 2), " + "pitch_sv_id varchar(40), pitch_start_speed DECIMAL(4, 1), pitch_end_speed DECIMAL(4, 1), sz_top DECIMAL(3, 2), " + "sz_bottom DECIMAL(3, 2), pfx_x DECIMAL(4, 2), pfx_z DECIMAL(4, 2), px DECIMAL(4, 3), pz DECIMAL(4, 3), " + "x0 DECIMAL(5, 3), y0 DECIMAL(5, 3), z0 DECIMAL(5, 3), vx0 DECIMAL(4, 2), vy0 DECIMAL(6, 3), vz0 DECIMAL(5, 3), " + "ax DECIMAL(5, 3), ay DECIMAL(5, 3), az DECIMAL(5, 3), break_y DECIMAL(3, 1), break_angle DECIMAL(4, 1), " + "break_length DECIMAL(3, 1), zone int, spin_dir DECIMAL(6, 3), spin_rate DECIMAL(7, 3));") } } def importPitchesByYearsAndTeam(years: List[Int], team: String) { val fetcher: Fetcher = new Fetcher() fetcher.fetchByYearsAndTeam(years, team, importPitches) } def importPitchesByYearAndTeam(year: Int, team: String) { val fetcher: Fetcher = new Fetcher() fetcher.fetchByYearAndTeam(year, team, importPitches) } def importPitchesByYear(year: Int) { val fetcher: Fetcher = new Fetcher() fetcher.fetchByYear(year, importPitches) } def importPitches(game: Game) { database.transaction { tx => val gid: String = game.fetchStrategy.gid(); _log.info(gid) val gameCount = tx.selectInt("select count(*) from pitches where gid = ?", StringFormattable(gid)) if (gameCount > 0) return for (pitch <- game.pitches()) { val atBat = pitch.atBat try { tx.execute("insert into pitches values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?," + " ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", StringFormattable(gid), IntFormattable(game.year()), IntFormattable(atBat.inning.num.toInt), StringFormattable(atBat.half), IntFormattable(atBat.num().toInt), IntFormattable(atBat.b().toInt), IntFormattable(atBat.s().toInt), IntFormattable(atBat.o().toInt), Nullable(getIntFormattable(atBat.startTfs())), IntFormattable(atBat.batter().toInt), StringFormattable(atBat.stand()), StringFormattable(atBat.bHeight()), IntFormattable(atBat.pitcher().id().toInt), StringFormattable(atBat.pThrows()), StringFormattable(atBat.des()), StringFormattable(atBat.event()), StringFormattable(pitch.des()), IntFormattable(pitch.id().toInt), StringFormattable(pitch.pitchType()), Nullable(getFloatFormattable(pitch.typeConfidence())), Nullable(getIntFormattable(pitch.tfs())), FloatFormattable(pitch.x().toFloat), FloatFormattable(pitch.y().toFloat), StringFormattable(pitch.svId()), Nullable(getFloatFormattable(pitch.startSpeed())), Nullable(getFloatFormattable(pitch.endSpeed())), Nullable(getFloatFormattable(pitch.szTop())), Nullable(getFloatFormattable(pitch.szBot())), Nullable(getFloatFormattable(pitch.pfxX())), Nullable(getFloatFormattable(pitch.pfxZ())), Nullable(getFloatFormattable(pitch.px())), Nullable(getFloatFormattable(pitch.pz())), Nullable(getFloatFormattable(pitch.x0())), Nullable(getFloatFormattable(pitch.y0())), Nullable(getFloatFormattable(pitch.z0())), Nullable(getFloatFormattable(pitch.vx0())), Nullable(getFloatFormattable(pitch.vy0())), Nullable(getFloatFormattable(pitch.vz0())), Nullable(getFloatFormattable(pitch.ax())), Nullable(getFloatFormattable(pitch.ay())), Nullable(getFloatFormattable(pitch.az())), Nullable(getFloatFormattable(pitch.breakY())), Nullable(getFloatFormattable(pitch.breakAngle())), Nullable(getFloatFormattable(pitch.breakLength())), Nullable(getIntFormattable(pitch.zone())), Nullable(getFloatFormattable(pitch.spinDir())), Nullable(getFloatFormattable(pitch.spinRate())) ) } catch { case e: Throwable => { e.printStackTrace() } } } } } private def getFloatFormattable(value: Option[Float]):Option[Formattable] = { if (value.isDefined) { Some(FloatFormattable(value.get)) } else { None } } private def getIntFormattable(value: Option[Int]):Option[Formattable] = { if (value.isDefined) { Some(IntFormattable(value.get)) } else { None } } }
ecopony/scala-gameday-api
src/main/scala/com/xenopsconsulting/gamedayapi/batch/MySQLDatabaseImporter.scala
Scala
mit
5,383
package net.dinkla.lbnn.geom /** * Created by dinkla on 19/06/15. */ trait Point[T] { def dimension(): Int def ith(i: Int): T //type P //val origin: P }
jdinkla/location-based-nearest-neighbours
src/main/scala/net/dinkla/lbnn/geom/Point.scala
Scala
apache-2.0
177
package myUtils import java.util.UUID import com.github.tminglei.slickpg.PgRangeSupportUtils import play.api.data.format.Formats import play.api.data.format.Formatter import play.api.data.FormError import com.vividsolutions.jts.io.{WKTReader, WKTWriter} import com.vividsolutions.jts.geom.Geometry import play.api.libs.json._ import org.joda.time.LocalDateTime /** * my play form data formatters */ object MyFormats { def jsonFormat: Formatter[JsValue] = new Formatter[JsValue] { override val format = Some(("format.json", Nil)) def bind(key: String, data: Map[String, String]) = parsing(Json.parse(_), "error.json", Nil)(key, data) def unbind(key: String, value: JsValue) = Map(key -> Json.stringify(value)) } /// def jodaDateTimeFormat: Formatter[LocalDateTime] = new Formatter[LocalDateTime] { override val format = Some(("format.datetime", Nil)) def bind(key: String, data: Map[String, String]) = parsing(LocalDateTime.parse, "error.datetime", Nil)(key, data) def unbind(key: String, value: LocalDateTime) = Map(key -> value.toString) } /// def uuidFormat: Formatter[UUID] = new Formatter[UUID] { override val format = Some(("format.uuid", Nil)) def bind(key: String, data: Map[String, String]) = parsing(UUID.fromString, "error.uuid", Nil)(key, data) def unbind(key: String, value: UUID) = Map(key -> value.toString) } def rangeFormat[T](parseFn: (String => T)): Formatter[Range[T]] = new Formatter[Range[T]] { override val format = Some(("format.range", Nil)) def bind(key: String, data: Map[String, String]) = parsing(PgRangeSupportUtils.mkRangeFn(parseFn), "error.range", Nil)(key, data) def unbind(key: String, value: Range[T]) = Map(key -> value.toString) } /// def strMapFormat = new Formatter[Map[String, String]] { override val format = Some(("format.jsonmap", Seq("{key1:value1, key2:value2, ...}"))) def bind(key: String, data: Map[String, String]) = parsing(fromJsonStr(_).getOrElse(Map.empty[String,String]), "error.jsonmap", Nil)(key, data) def unbind(key: String, value: Map[String,String]) = Map(key -> toJsonStr(value)) } implicit private val mapReads = Reads.mapReads[String] implicit private val mapWrites = Writes.mapWrites[String] def toJsonStr(v: Map[String,String]): String = Json.stringify(Json.toJson(v)) def fromJsonStr(s: String): Option[Map[String,String]] = Option(Json.fromJson(Json.parse(s)).get) /// def geometryFormat[T <: Geometry]: Formatter[T] = new Formatter[T] { override val format = Some(("format.geometry", Nil)) def bind(key: String, data: Map[String, String]) = parsing(fromWKT[T], "error.geometry", Nil)(key, data) def unbind(key: String, value: T) = Map(key -> toWKT(value)) } ////////////////////////////////////////////////////////////////////////// private val wktWriterHolder = new ThreadLocal[WKTWriter] private val wktReaderHolder = new ThreadLocal[WKTReader] private def toWKT(geom: Geometry): String = { if (wktWriterHolder.get == null) wktWriterHolder.set(new WKTWriter()) wktWriterHolder.get.write(geom) } private def fromWKT[T](wkt: String): T = { if (wktReaderHolder.get == null) wktReaderHolder.set(new WKTReader()) wktReaderHolder.get.read(wkt).asInstanceOf[T] } /** * (copy from [[play.api.data.format.Formats#parsing]]) * Helper for formatters binders * @param parse Function parsing a String value into a T value, throwing an exception in case of failure * @param errMsg Error to set in case of parsing failure * @param errArgs Arguments for error message * @param key Key name of the field to parse * @param data Field data */ private def parsing[T](parse: String => T, errMsg: String, errArgs: Seq[Any])( key: String, data: Map[String, String]): Either[Seq[FormError], T] = { Formats.stringFormat.bind(key, data).right.flatMap { s => scala.util.control.Exception.allCatch[T] .either(parse(s)) .left.map(e => Seq(FormError(key, errMsg, errArgs))) } } }
hardmettle/slick-postgress-samples
app/myUtils/MyFormats.scala
Scala
apache-2.0
4,105
package group.matsen import scala.collection.Bag package object phylohmc { type Pattern = Map[Taxon, Int] val Pattern = Map type Patterns = Bag[Pattern] val Patterns = Bag }
armanbilge/phyloHMC
src/main/scala/group/matsen/phylohmc/package.scala
Scala
agpl-3.0
186
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.producer.async import kafka.common._ import kafka.message.{NoCompressionCodec, Message, ByteBufferMessageSet} import kafka.producer._ import kafka.serializer.Encoder import kafka.utils.{CoreUtils, Logging, SystemTime} import org.apache.kafka.common.errors.{LeaderNotAvailableException, UnknownTopicOrPartitionException} import org.apache.kafka.common.protocol.Errors import scala.util.Random import scala.collection.{Seq, Map} import scala.collection.mutable.{ArrayBuffer, HashMap, Set} import java.util.concurrent.atomic._ import kafka.api.{TopicMetadata, ProducerRequest} import org.apache.kafka.common.utils.Utils class DefaultEventHandler[K,V](config: ProducerConfig, private val partitioner: Partitioner, private val encoder: Encoder[V], private val keyEncoder: Encoder[K], private val producerPool: ProducerPool, private val topicPartitionInfos: HashMap[String, TopicMetadata] = new HashMap[String, TopicMetadata]) extends EventHandler[K,V] with Logging { val isSync = ("sync" == config.producerType) val correlationId = new AtomicInteger(0) val brokerPartitionInfo = new BrokerPartitionInfo(config, producerPool, topicPartitionInfos) private val topicMetadataRefreshInterval = config.topicMetadataRefreshIntervalMs private var lastTopicMetadataRefreshTime = 0L private val topicMetadataToRefresh = Set.empty[String] private val sendPartitionPerTopicCache = HashMap.empty[String, Int] private val producerStats = ProducerStatsRegistry.getProducerStats(config.clientId) private val producerTopicStats = ProducerTopicStatsRegistry.getProducerTopicStats(config.clientId) def handle(events: Seq[KeyedMessage[K,V]]) { val serializedData = serialize(events) serializedData.foreach { keyed => val dataSize = keyed.message.payloadSize producerTopicStats.getProducerTopicStats(keyed.topic).byteRate.mark(dataSize) producerTopicStats.getProducerAllTopicsStats.byteRate.mark(dataSize) } var outstandingProduceRequests = serializedData var remainingRetries = config.messageSendMaxRetries + 1 val correlationIdStart = correlationId.get() debug("Handling %d events".format(events.size)) while (remainingRetries > 0 && outstandingProduceRequests.size > 0) { topicMetadataToRefresh ++= outstandingProduceRequests.map(_.topic) if (topicMetadataRefreshInterval >= 0 && SystemTime.milliseconds - lastTopicMetadataRefreshTime > topicMetadataRefreshInterval) { CoreUtils.swallowError(brokerPartitionInfo.updateInfo(topicMetadataToRefresh.toSet, correlationId.getAndIncrement)) sendPartitionPerTopicCache.clear() topicMetadataToRefresh.clear lastTopicMetadataRefreshTime = SystemTime.milliseconds } outstandingProduceRequests = dispatchSerializedData(outstandingProduceRequests) if (outstandingProduceRequests.size > 0) { info("Back off for %d ms before retrying send. Remaining retries = %d".format(config.retryBackoffMs, remainingRetries-1)) // back off and update the topic metadata cache before attempting another send operation Thread.sleep(config.retryBackoffMs) // get topics of the outstanding produce requests and refresh metadata for those CoreUtils.swallowError(brokerPartitionInfo.updateInfo(outstandingProduceRequests.map(_.topic).toSet, correlationId.getAndIncrement)) sendPartitionPerTopicCache.clear() remainingRetries -= 1 producerStats.resendRate.mark() } } if(outstandingProduceRequests.size > 0) { producerStats.failedSendRate.mark() val correlationIdEnd = correlationId.get() error("Failed to send requests for topics %s with correlation ids in [%d,%d]" .format(outstandingProduceRequests.map(_.topic).toSet.mkString(","), correlationIdStart, correlationIdEnd-1)) throw new FailedToSendMessageException("Failed to send messages after " + config.messageSendMaxRetries + " tries.", null) } } private def dispatchSerializedData(messages: Seq[KeyedMessage[K,Message]]): Seq[KeyedMessage[K, Message]] = { val partitionedDataOpt = partitionAndCollate(messages) partitionedDataOpt match { case Some(partitionedData) => val failedProduceRequests = new ArrayBuffer[KeyedMessage[K, Message]] for ((brokerid, messagesPerBrokerMap) <- partitionedData) { if (logger.isTraceEnabled) { messagesPerBrokerMap.foreach(partitionAndEvent => trace("Handling event for Topic: %s, Broker: %d, Partitions: %s".format(partitionAndEvent._1, brokerid, partitionAndEvent._2))) } val messageSetPerBrokerOpt = groupMessagesToSet(messagesPerBrokerMap) messageSetPerBrokerOpt match { case Some(messageSetPerBroker) => val failedTopicPartitions = send(brokerid, messageSetPerBroker) failedTopicPartitions.foreach(topicPartition => { messagesPerBrokerMap.get(topicPartition) match { case Some(data) => failedProduceRequests.appendAll(data) case None => // nothing } }) case None => // failed to group messages messagesPerBrokerMap.values.foreach(m => failedProduceRequests.appendAll(m)) } } failedProduceRequests case None => // failed to collate messages messages } } def serialize(events: Seq[KeyedMessage[K,V]]): Seq[KeyedMessage[K,Message]] = { val serializedMessages = new ArrayBuffer[KeyedMessage[K,Message]](events.size) events.foreach{e => try { if(e.hasKey) serializedMessages += new KeyedMessage[K,Message](topic = e.topic, key = e.key, partKey = e.partKey, message = new Message(key = keyEncoder.toBytes(e.key), bytes = encoder.toBytes(e.message))) else serializedMessages += new KeyedMessage[K,Message](topic = e.topic, key = e.key, partKey = e.partKey, message = new Message(bytes = encoder.toBytes(e.message))) } catch { case t: Throwable => producerStats.serializationErrorRate.mark() if (isSync) { throw t } else { // currently, if in async mode, we just log the serialization error. We need to revisit // this when doing kafka-496 error("Error serializing message for topic %s".format(e.topic), t) } } } serializedMessages } def partitionAndCollate(messages: Seq[KeyedMessage[K,Message]]): Option[Map[Int, collection.mutable.Map[TopicAndPartition, Seq[KeyedMessage[K,Message]]]]] = { val ret = new HashMap[Int, collection.mutable.Map[TopicAndPartition, Seq[KeyedMessage[K,Message]]]] try { for (message <- messages) { val topicPartitionsList = getPartitionListForTopic(message) val partitionIndex = getPartition(message.topic, message.partitionKey, topicPartitionsList) val brokerPartition = topicPartitionsList(partitionIndex) // postpone the failure until the send operation, so that requests for other brokers are handled correctly val leaderBrokerId = brokerPartition.leaderBrokerIdOpt.getOrElse(-1) var dataPerBroker: HashMap[TopicAndPartition, Seq[KeyedMessage[K,Message]]] = null ret.get(leaderBrokerId) match { case Some(element) => dataPerBroker = element.asInstanceOf[HashMap[TopicAndPartition, Seq[KeyedMessage[K,Message]]]] case None => dataPerBroker = new HashMap[TopicAndPartition, Seq[KeyedMessage[K,Message]]] ret.put(leaderBrokerId, dataPerBroker) } val topicAndPartition = TopicAndPartition(message.topic, brokerPartition.partitionId) var dataPerTopicPartition: ArrayBuffer[KeyedMessage[K,Message]] = null dataPerBroker.get(topicAndPartition) match { case Some(element) => dataPerTopicPartition = element.asInstanceOf[ArrayBuffer[KeyedMessage[K,Message]]] case None => dataPerTopicPartition = new ArrayBuffer[KeyedMessage[K,Message]] dataPerBroker.put(topicAndPartition, dataPerTopicPartition) } dataPerTopicPartition.append(message) } Some(ret) }catch { // Swallow recoverable exceptions and return None so that they can be retried. case ute: UnknownTopicOrPartitionException => warn("Failed to collate messages by topic,partition due to: " + ute.getMessage); None case lnae: LeaderNotAvailableException => warn("Failed to collate messages by topic,partition due to: " + lnae.getMessage); None case oe: Throwable => error("Failed to collate messages by topic, partition due to: " + oe.getMessage); None } } private def getPartitionListForTopic(m: KeyedMessage[K,Message]): Seq[PartitionAndLeader] = { val topicPartitionsList = brokerPartitionInfo.getBrokerPartitionInfo(m.topic, correlationId.getAndIncrement) debug("Broker partitions registered for topic: %s are %s" .format(m.topic, topicPartitionsList.map(p => p.partitionId).mkString(","))) val totalNumPartitions = topicPartitionsList.length if(totalNumPartitions == 0) throw new NoBrokersForPartitionException("Partition key = " + m.key) topicPartitionsList } /** * Retrieves the partition id and throws an UnknownTopicOrPartitionException if * the value of partition is not between 0 and numPartitions-1 * @param topic The topic * @param key the partition key * @param topicPartitionList the list of available partitions * @return the partition id */ private def getPartition(topic: String, key: Any, topicPartitionList: Seq[PartitionAndLeader]): Int = { val numPartitions = topicPartitionList.size if(numPartitions <= 0) throw new UnknownTopicOrPartitionException("Topic " + topic + " doesn't exist") val partition = if(key == null) { // If the key is null, we don't really need a partitioner // So we look up in the send partition cache for the topic to decide the target partition val id = sendPartitionPerTopicCache.get(topic) id match { case Some(partitionId) => // directly return the partitionId without checking availability of the leader, // since we want to postpone the failure until the send operation anyways partitionId case None => val availablePartitions = topicPartitionList.filter(_.leaderBrokerIdOpt.isDefined) if (availablePartitions.isEmpty) throw new LeaderNotAvailableException("No leader for any partition in topic " + topic) val index = Utils.abs(Random.nextInt) % availablePartitions.size val partitionId = availablePartitions(index).partitionId sendPartitionPerTopicCache.put(topic, partitionId) partitionId } } else partitioner.partition(key, numPartitions) if(partition < 0 || partition >= numPartitions) throw new UnknownTopicOrPartitionException("Invalid partition id: " + partition + " for topic " + topic + "; Valid values are in the inclusive range of [0, " + (numPartitions-1) + "]") trace("Assigning message of topic %s and key %s to a selected partition %d".format(topic, if (key == null) "[none]" else key.toString, partition)) partition } /** * Constructs and sends the produce request based on a map from (topic, partition) -> messages * * @param brokerId the broker that will receive the request * @param messagesPerTopic the messages as a map from (topic, partition) -> messages * @return the set (topic, partitions) messages which incurred an error sending or processing */ private def send(brokerId: Int, messagesPerTopic: collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]) = { if(brokerId < 0) { warn("Failed to send data since partitions %s don't have a leader".format(messagesPerTopic.map(_._1).mkString(","))) messagesPerTopic.keys.toSeq } else if(messagesPerTopic.size > 0) { val currentCorrelationId = correlationId.getAndIncrement val producerRequest = new ProducerRequest(currentCorrelationId, config.clientId, config.requestRequiredAcks, config.requestTimeoutMs, messagesPerTopic) var failedTopicPartitions = Seq.empty[TopicAndPartition] try { val syncProducer = producerPool.getProducer(brokerId) debug("Producer sending messages with correlation id %d for topics %s to broker %d on %s:%d" .format(currentCorrelationId, messagesPerTopic.keySet.mkString(","), brokerId, syncProducer.config.host, syncProducer.config.port)) val response = syncProducer.send(producerRequest) debug("Producer sent messages with correlation id %d for topics %s to broker %d on %s:%d" .format(currentCorrelationId, messagesPerTopic.keySet.mkString(","), brokerId, syncProducer.config.host, syncProducer.config.port)) if(response != null) { if (response.status.size != producerRequest.data.size) throw new KafkaException("Incomplete response (%s) for producer request (%s)".format(response, producerRequest)) if (logger.isTraceEnabled) { val successfullySentData = response.status.filter(_._2.error == Errors.NONE.code) successfullySentData.foreach(m => messagesPerTopic(m._1).foreach(message => trace("Successfully sent message: %s".format(if(message.message.isNull) null else message.message.toString())))) } val failedPartitionsAndStatus = response.status.filter(_._2.error != Errors.NONE.code).toSeq failedTopicPartitions = failedPartitionsAndStatus.map(partitionStatus => partitionStatus._1) if(failedTopicPartitions.size > 0) { val errorString = failedPartitionsAndStatus .sortWith((p1, p2) => p1._1.topic.compareTo(p2._1.topic) < 0 || (p1._1.topic.compareTo(p2._1.topic) == 0 && p1._1.partition < p2._1.partition)) .map{ case(topicAndPartition, status) => topicAndPartition.toString + ": " + Errors.forCode(status.error).exceptionName }.mkString(",") warn("Produce request with correlation id %d failed due to %s".format(currentCorrelationId, errorString)) } failedTopicPartitions } else { Seq.empty[TopicAndPartition] } } catch { case t: Throwable => warn("Failed to send producer request with correlation id %d to broker %d with data for partitions %s" .format(currentCorrelationId, brokerId, messagesPerTopic.map(_._1).mkString(",")), t) messagesPerTopic.keys.toSeq } } else { List.empty } } private def groupMessagesToSet(messagesPerTopicAndPartition: collection.mutable.Map[TopicAndPartition, Seq[KeyedMessage[K, Message]]]) = { /** enforce the compressed.topics config here. * If the compression codec is anything other than NoCompressionCodec, * Enable compression only for specified topics if any * If the list of compressed topics is empty, then enable the specified compression codec for all topics * If the compression codec is NoCompressionCodec, compression is disabled for all topics */ try { val messagesPerTopicPartition = messagesPerTopicAndPartition.map { case (topicAndPartition, messages) => val rawMessages = messages.map(_.message) (topicAndPartition, config.compressionCodec match { case NoCompressionCodec => debug("Sending %d messages with no compression to %s".format(messages.size, topicAndPartition)) new ByteBufferMessageSet(NoCompressionCodec, rawMessages: _*) case _ => config.compressedTopics.size match { case 0 => debug("Sending %d messages with compression codec %d to %s" .format(messages.size, config.compressionCodec.codec, topicAndPartition)) new ByteBufferMessageSet(config.compressionCodec, rawMessages: _*) case _ => if (config.compressedTopics.contains(topicAndPartition.topic)) { debug("Sending %d messages with compression codec %d to %s" .format(messages.size, config.compressionCodec.codec, topicAndPartition)) new ByteBufferMessageSet(config.compressionCodec, rawMessages: _*) } else { debug("Sending %d messages to %s with no compression as it is not in compressed.topics - %s" .format(messages.size, topicAndPartition, config.compressedTopics.toString)) new ByteBufferMessageSet(NoCompressionCodec, rawMessages: _*) } } } ) } Some(messagesPerTopicPartition) } catch { case t: Throwable => error("Failed to group messages", t); None } } def close() { if (producerPool != null) producerPool.close } }
Mszak/kafka
core/src/main/scala/kafka/producer/async/DefaultEventHandler.scala
Scala
apache-2.0
18,171
package edu.gemini.osgi.tools import java.io._ import scala.io.Source object FileUtils { /** List the files immediately contained in a parent directory. */ def listFiles(parentDir: File): List[File] = if (parentDir.isDirectory) parentDir.listFiles.toList else Nil /** List of immediate child directories of the given directory. */ def childDirs(parentDir: File): List[File] = listFiles(parentDir).filter(_.isDirectory) /** Finds the first file contained in root with the given name. */ def findFile(root: File, name: String): Option[File] = { def go(fs: List[File]): Option[File] = fs match { case Nil => None case (h :: t) => if (h.getName == name) Some(h) else go(listFiles(h) ++ t) } go(List(root)) } /** Converts a File into a List of its path elements. */ def pathToList(f: File): List[String] = f.getAbsolutePath.split(File.separator).toList /** Computes a relative file path needed to traverse from the "fromDir" to the "to" file. */ def relativePath(fromDir: File, to: File): String = { require(fromDir.isDirectory, "fromDir is not a directory") val fromPath = pathToList(fromDir) val toPath = pathToList(to) // Remove the common prefix from the two paths. val diff = fromPath.zipAll(toPath, "", "") dropWhile { case (a, b) => a == b } // Get the remaining "from" and "to" path suffixes val (f, t) = diff.unzip // Anything left over in the "from" path (if anything), gets converted to // ".." parent directory. Anything left over in the "to" path is then // appended. val rmEmpty: Traversable[String] => Traversable[String] = _.filterNot(_ == "") val path = (rmEmpty(f) map { _ => ".." }) ++ rmEmpty(t) path.mkString(File.separator) } /** Writes a string to file. */ def save(str: String, to: File): Unit = { val pw = new PrintWriter(to) str.lines.foreach(pw.println) pw.close() } /** Writes an xml hunk to a file, pretty-printed. */ def save(n: xml.Node, to: File): Unit = save(new xml.PrettyPrinter(240, 2).format(n), to) /** A file filter for jar files . */ val jarFilter: FileFilter = new FileFilter { def accept(f: File): Boolean = f.getName.endsWith(".jar") } }
arturog8m/ocs
project/src/main/scala/edu/gemini/osgi/tools/FileUtils.scala
Scala
bsd-3-clause
2,297
package com.wavesplatform.it.sync.transactions import com.typesafe.config.Config import com.wavesplatform.account.AddressScheme import com.wavesplatform.api.http.ApiError.StateCheckFailed import com.wavesplatform.it.NodeConfigs import com.wavesplatform.it.api.SyncHttpApi._ import com.wavesplatform.it.api.TransactionInfo import com.wavesplatform.it.sync._ import com.wavesplatform.it.transactions.BaseTransactionSuite import com.wavesplatform.test._ import com.wavesplatform.transaction.assets.ReissueTransaction class ReissueTransactionSuite extends BaseTransactionSuite { test("asset reissue changes issuer's asset balance; issuer's waves balance is decreased by fee") { for (v <- reissueTxSupportedVersions) { val (balance, effectiveBalance) = miner.accountBalances(firstAddress) val issuedAssetId = sender.issue(firstKeyPair, "name2", "description2", someAssetAmount, decimals = 2, reissuable = true, issueFee).id nodes.waitForHeightAriseAndTxPresent(issuedAssetId) miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee) miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount) val reissueTx = sender.reissue(firstKeyPair, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueReducedFee, version = v) nodes.waitForHeightAriseAndTxPresent(reissueTx.id) if (v > 2) { reissueTx.chainId shouldBe Some(AddressScheme.current.chainId) sender.transactionInfo[TransactionInfo](reissueTx.id).chainId shouldBe Some(AddressScheme.current.chainId) } miner.assertBalances(firstAddress, balance - issueFee - reissueReducedFee, effectiveBalance - issueFee - reissueReducedFee) miner.assertAssetBalance(firstAddress, issuedAssetId, 2 * someAssetAmount) } miner.transactionsByAddress(firstAddress, limit = 100) .count(_._type == ReissueTransaction.typeId) shouldBe reissueTxSupportedVersions.length } test("can't reissue not reissuable asset") { for (v <- reissueTxSupportedVersions) { val (balance, effectiveBalance) = miner.accountBalances(firstAddress) val issuedAssetId = sender.issue(firstKeyPair, "name2", "description2", someAssetAmount, decimals = 2, reissuable = false, issueFee).id nodes.waitForHeightAriseAndTxPresent(issuedAssetId) miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee) miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount) assertBadRequestAndMessage( sender.reissue(firstKeyPair, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueReducedFee, version = v), "Asset is not reissuable" ) nodes.waitForHeightArise() miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount) miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee) } } test("not able to reissue if cannot pay fee - less than required") { for (v <- reissueTxSupportedVersions) { val issuedAssetId = sender.issue(firstKeyPair, "name3", "description3", someAssetAmount, decimals = 2, reissuable = true, issueFee).id nodes.waitForHeightAriseAndTxPresent(issuedAssetId) assertApiError(sender.reissue(firstKeyPair, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueReducedFee - 1, version = v)) { error => error.id shouldBe StateCheckFailed.Id error.message should include(s"Fee for ReissueTransaction (${reissueReducedFee - 1} in WAVES) does not exceed minimal value of $reissueReducedFee WAVES.") } } } test("not able to reissue if cannot pay fee - insufficient funds") { for (v <- reissueTxSupportedVersions) { val (balance, effectiveBalance) = miner.accountBalances(firstAddress) val reissueFee = effectiveBalance + 1.waves val issuedAssetId = sender.issue(firstKeyPair, "name4", "description4", someAssetAmount, decimals = 2, reissuable = true, issueFee).id nodes.waitForHeightAriseAndTxPresent(issuedAssetId) assertBadRequestAndMessage( sender.reissue(firstKeyPair, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueFee, version = v), "Accounts balance errors" ) nodes.waitForHeightArise() miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount) miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee) } } override protected def nodeConfigs: Seq[Config] = NodeConfigs.newBuilder .overrideBase(_.quorum(0)) .withDefault(1) .withSpecial(_.nonMiner) .buildNonConflicting() }
wavesplatform/Waves
node-it/src/test/scala/com/wavesplatform/it/sync/transactions/ReissueTransactionSuite.scala
Scala
mit
4,650
package com.themillhousegroup.gatsby.stubby import com.dividezero.stubby.core.model.StubExchange import java.util.concurrent.atomic.AtomicBoolean import scala.concurrent.Future import com.typesafe.scalalogging.slf4j.Logging trait CanAddStubExchanges { /** * @return true if the exchange was added */ def addExchange(requestName: String, se: StubExchange) = addExchanges(requestName, Seq(se)) def addExchanges(requestName: String, ses: Seq[StubExchange]): Boolean } trait CanRemoveStubExchanges { /** * @return true at least one exchange was removed */ def removeExchange(prefix: String): Boolean } trait EnforcesMutualExclusion { this: Logging => val token = new AtomicBoolean(false) val loopWaitMillis = 1000 var currentLockHolder: Option[String] = None import scala.concurrent.ExecutionContext.Implicits.global /** * @return Future(true) if we got direct access, Future(false) if we had to wait */ def acquireLock(taskName: String): Future[Boolean] = { logger.debug(s"acquireLock entered for $taskName") // hack impl Future { var hadToWait = false while (!token.compareAndSet(false, true)) { hadToWait = true logger.debug(s"Awaiting lock for $taskName because the current holder is $currentLockHolder") Thread.sleep(loopWaitMillis) } logger.debug(s"ACQUIRED Lock for $taskName") currentLockHolder = Some(taskName) hadToWait } } /** * @return true if we were the holder, and we released it */ def releaseLock(taskName: String): Boolean = { currentLockHolder.filter(_ == taskName).fold { logger.warn(s"Can't release lock; $taskName is not the holder: $currentLockHolder") false } { holder => logger.debug(s"Releasing lock for $holder") currentLockHolder = None token.set(false) true } } } trait RuntimeStubbing extends CanAddStubExchanges with CanRemoveStubExchanges with EnforcesMutualExclusion with Logging
themillhousegroup/gatsby
src/main/scala/com/themillhousegroup/gatsby/stubby/StubbyTraits.scala
Scala
mit
2,002
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package config import javax.inject.Inject import play.api.i18n.Messages import uk.gov.hmrc.play.bootstrap.config.ServicesConfig trait PlaContext { def getPageHelpPartial()(messages: Messages): String def assetsUrl: String } class PlaContextImpl @Inject()(servicesConfig: ServicesConfig) extends PlaContext { override def getPageHelpPartial()(messages: Messages): String = s"${servicesConfig.baseUrl("contact-frontend")}/contact/problem_reports" override def assetsUrl: String = s"${servicesConfig.getString("assets.url")}${servicesConfig.getString("assets.version")}/" }
hmrc/pensions-lifetime-allowance-frontend
app/config/PlaContext.scala
Scala
apache-2.0
1,188
/* This file is part of Rimbot. Rimbot is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Rimbot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Rimbot. If not, see <http://www.gnu.org/licenses/>. */ package net.fgsquad.rimbot case class Colony(val ingame: List[String], val ded: List[String], val queue: Queue[String]) object Colony { def showstring(seq: Seq[String]): String = { seq.toList match { case last :: next :: tail => seq.reverse.tail.reverse.mkString(", ") ++ " and " ++ seq.reverse.head case head :: Nil => head case _ => "nobody" } } def joinqueue(colony: Colony)(name: String): Either[String, Colony] = { if (colony.queue.any(colonist => name == colonist)) Left[String, Colony]("already queued") else if (colony.ded.exists(d => name == d)) Left[String, Colony]("already ded") else if (colony.ingame.exists(i => name == i)) Left[String, Colony]("already in game") else Right[String, Colony](new Colony(colony.ingame, colony.ded, colony.queue.enqueue(name))) } def show(colony: Colony): List[String] = { val ingame = colony.ingame match { case Nil => "There are no viewer colonists in game." case colonist :: Nil => s"lone colonist $colonist represents chat in this colony." case _ => s"colonists ${showstring(colony.ingame)} represent chat in this colony." } val dead = colony.ded match { case Nil => None case head :: Nil => Some(s"$head has met their demise.") case _ => Some(s"${showstring(colony.ded)} are ded.") } val totalqueue = colony.queue.toList val queueline = totalqueue match { case Nil => "Nobody is queued up to join the colony." case head :: Nil => s"Only $head is queued to join the colony." case _ => s"${showstring(totalqueue)} are waiting in line to join their doom. Eh, the colony." } ingame :: dead.foldLeft(List(queueline))((agg, dedline) => dedline :: agg) } def showcompact(colony: Colony) = { val ingame = colony.ingame match { case Nil => "There are no viewer colonists in game." case colonist :: Nil => s"Lone colonist $colonist represents chat in this colony." case _ => s"There are ${colony.ingame.length} colonists in the colony." } val dead = colony.ded match { case Nil => None case head :: Nil => Some(s"$head has met their demise.") case l => Some(s"${l.length} colonists are dead") } val totalqueue = colony.queue.toList val queueline = totalqueue match { case Nil => "Nobody is queued up to join the colony." case head :: Nil => s"Only $head is queued to join the colony." case l if l.length < 5 => s"${showstring(totalqueue)} are waiting in line to join their doom. Eh, the colony." case l => s"${showstring(l.take(3))} and ${l.length - 3} other viewers are waiting in line to join their doom. Eh, the colony." } ingame :: dead.foldLeft(List(queueline))((agg, dedline) => dedline :: agg) } def newcolony(oldcolony: Colony) = new Colony(Nil, Nil, oldcolony.queue) def die(colony: Colony)(colonist: String): Option[Colony] = if (colony.ingame.contains(colonist)) { Some(new Colony(colony.ingame.filterNot(name => name == colonist), colonist :: colony.ded, colony.queue)) } else { None } def recruit(colony: Colony): Option[(String, Colony)] = colony.queue.dequeue.map { case (colonist, newqueue) => (colonist, new Colony(colonist :: colony.ingame, colony.ded, newqueue)) } def apply() = new Colony(Nil, Nil, Queue.empty) def recruitFrom(colony: Colony, chatters: List[String]): Option[(String, Colony)] = { val fromlist: Option[(String, Queue[String])] = Queue.takeFrom(colony.queue, chatters) fromlist.map(sq => (sq._1, new Colony(sq._1 :: colony.ingame, colony.ded, sq._2))).orElse(recruit(colony)) } }
martijnhoekstra/rimbot
src/main/scala/Colony.scala
Scala
agpl-3.0
4,333
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.integration.spark.testsuite.dataload import java.math.BigDecimal import scala.collection.mutable.ArrayBuffer import org.apache.spark.sql.Row import org.scalatest.BeforeAndAfterAll import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.metadata.CarbonMetadata import org.apache.spark.sql.test.util.QueryTest import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.util.CarbonProperties class TestLoadDataGeneral extends QueryTest with BeforeAndAfterAll { override def beforeAll { sql("DROP TABLE IF EXISTS loadtest") sql( """ | CREATE TABLE loadtest(id int, name string, city string, age int) | STORED BY 'org.apache.carbondata.format' """.stripMargin) } private def checkSegmentExists( segmentId: String, datbaseName: String, tableName: String): Boolean = { val carbonTable = CarbonMetadata.getInstance().getCarbonTable(datbaseName + "_" + tableName) val partitionPath = CarbonStorePath.getCarbonTablePath(storeLocation, carbonTable.getCarbonTableIdentifier).getPartitionDir("0") val fileType: FileFactory.FileType = FileFactory.getFileType(partitionPath) val carbonFile = FileFactory.getCarbonFile(partitionPath, fileType) val segments: ArrayBuffer[String] = ArrayBuffer() carbonFile.listFiles.foreach { file => segments += CarbonTablePath.DataPathUtil.getSegmentId(file.getAbsolutePath + "/dummy") } segments.contains(segmentId) } test("test data loading CSV file") { val testData = s"$resourcesPath/sample.csv" sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest") checkAnswer( sql("SELECT COUNT(*) FROM loadtest"), Seq(Row(6)) ) } test("test data loading CSV file without extension name") { val testData = s"$resourcesPath/sample" sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest") checkAnswer( sql("SELECT COUNT(*) FROM loadtest"), Seq(Row(10)) ) } test("test data loading GZIP compressed CSV file") { val testData = s"$resourcesPath/sample.csv.gz" sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest") checkAnswer( sql("SELECT COUNT(*) FROM loadtest"), Seq(Row(14)) ) } test("test data loading BZIP2 compressed CSV file") { val testData = s"$resourcesPath/sample.csv.bz2" sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest") checkAnswer( sql("SELECT COUNT(*) FROM loadtest"), Seq(Row(18)) ) } test("test data loading CSV file with delimiter char \\\\017") { val testData = s"$resourcesPath/sample_withDelimiter017.csv" sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest options ('delimiter'='\\\\017')") checkAnswer( sql("SELECT COUNT(*) FROM loadtest"), Seq(Row(22)) ) } test("test data loading with invalid values for mesasures") { val testData = s"$resourcesPath/invalidMeasures.csv" sql("drop table if exists invalidMeasures") sql("CREATE TABLE invalidMeasures (country String, salary double, age decimal(10,2)) STORED BY 'carbondata'") sql(s"LOAD DATA LOCAL INPATH '$testData' into table invalidMeasures options('Fileheader'='country,salary,age')") checkAnswer( sql("SELECT * FROM invalidMeasures"), Seq(Row("India",null,new BigDecimal("22.44")), Row("Russia",null,null), Row("USA",234.43,null)) ) } test("test data loading into table whose name has '_'") { sql("DROP TABLE IF EXISTS load_test") sql(""" CREATE TABLE load_test(id int, name string, city string, age int) STORED BY 'org.apache.carbondata.format' """) val testData = s"$resourcesPath/sample.csv" try { sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test") sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test") } catch { case ex: Exception => assert(false) } assert(checkSegmentExists("0", "default", "load_test")) assert(checkSegmentExists("1", "default", "load_test")) sql("DROP TABLE load_test") } test("test data loading into table with Single Pass") { sql("DROP TABLE IF EXISTS load_test_singlepass") sql(""" CREATE TABLE load_test_singlepass(id int, name string, city string, age int) STORED BY 'org.apache.carbondata.format' """) val testData = s"$resourcesPath/sample.csv" try { sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test_singlepass options ('SINGLE_PASS'='TRUE')") } catch { case ex: Exception => assert(false) } checkAnswer( sql("SELECT id,name FROM load_test_singlepass where name='eason'"), Seq(Row(2,"eason")) ) sql("DROP TABLE load_test_singlepass") } test("test load data with decimal type and sort intermediate files as 1") { sql("drop table if exists carbon_table") CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT, "1") .addProperty(CarbonCommonConstants.SORT_SIZE, "1") .addProperty(CarbonCommonConstants.DATA_LOAD_BATCH_SIZE, "1") sql("create table if not exists carbonBigDecimal (ID Int, date Timestamp, country String, name String, phonetype String, serialname String, salary decimal(27, 10)) STORED BY 'org.apache.carbondata.format'") sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/decimalBoundaryDataCarbon.csv' into table carbonBigDecimal") CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT, CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE) .addProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL) .addProperty(CarbonCommonConstants.DATA_LOAD_BATCH_SIZE, CarbonCommonConstants.DATA_LOAD_BATCH_SIZE_DEFAULT) sql("drop table if exists carbon_table") } override def afterAll { sql("DROP TABLE if exists loadtest") sql("drop table if exists invalidMeasures") CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT, CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE) .addProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL) .addProperty(CarbonCommonConstants.DATA_LOAD_BATCH_SIZE, CarbonCommonConstants.DATA_LOAD_BATCH_SIZE_DEFAULT) } }
HuaweiBigData/carbondata
integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
Scala
apache-2.0
7,354
package scala.c.engine class RadixSort extends StandardTest { "radix sort test 1" should "print the correct results" in { val code = """ //#include <stdio.h> #include <limits.h> //#include <stdlib.h> typedef unsigned uint; #define swap(a, b) { tmp = a; a = b; b = tmp; } #define each(i, x) for (i = 0; i < x; i++) /* sort unsigned ints */ static void rad_sort_u(uint *from, uint *to, uint bit) { if (!bit || to < from + 1) return; uint *ll = from, *rr = to - 1, tmp; // while (1) { // /* find left most with bit, and right most without bit, swap */ // while (ll < rr && !(*ll & bit)) ll++; // while (ll < rr && (*rr & bit)) rr--; // if (ll >= rr) break; // swap(*ll, *rr); // } if (!(bit & *ll) && ll < to) ll++; bit >>= 1; //rad_sort_u(from, ll, bit); //rad_sort_u(ll, to, bit); } /* sort signed ints: flip highest bit, sort as unsigned, flip back */ static void radix_sort(int *a, const int len) { int i; uint *x = (uint*) a; each(i, len) x[i] ^= INT_MIN; rad_sort_u(x, x + len, INT_MIN); each(i, len) x[i] ^= INT_MIN; } static inline void radix_sort_unsigned(uint *a, const int len) { rad_sort_u(a, a + len, (uint)INT_MIN); } int main(void) { int len = 16, i; int x[16] = {64, 285, 27, 501, 348, -293, 487, 542, 1645, 4, 8523, 7625, 184, 5792, 45803, 274}; radix_sort(x, len); each(i, len) printf("%d\\n", x[i]); return 0; } """ checkResults(code) } } class BubbleSort extends StandardTest { "bubble sort test 1" should "print the correct results" in { val code = """ int main() { int array[5] = {7,4,0,2,6}, n = 5, c, d, swap; for (c = 0 ; c < ( n - 1 ); c++) { for (d = 0 ; d < n - c - 1; d++) { if (array[d] > array[d+1]) /* For decreasing order use < */ { swap = array[d]; array[d] = array[d+1]; array[d+1] = swap; } } } for ( c = 0 ; c < n ; c++ ) printf("%d\\n", array[c]); return 0; } """ checkResults(code) } } class InsertionSort extends StandardTest { "insertion sort test 1" should "print the correct results" in { val code = """ void insertion_sort(int *a, int n) { int i = 0; for(i = 1; i < n; ++i) { int tmp = a[i]; int j = i; while(j > 0 && tmp < a[j - 1]) { a[j] = a[j - 1]; --j; } a[j] = tmp; } } int main () { int a[] = {4, 65, 2, -31, 0, 99, 2, 83, 782, 1}; int n = sizeof a / sizeof a[0]; int i; for (i = 0; i < n; i++) printf("%d\\n", a[i]); insertion_sort(a, n); } """ checkResults(code) } } class StrandSort extends StandardTest { "strand sort test 1" should "print the correct results" in { // http://rosettacode.org/wiki/Sorting_algorithms/Quicksort#C val code = """ #include <stdio.h> typedef struct node_t *node, node_t; struct node_t { int v; node next; }; typedef struct { node head, tail; } slist; void push(slist *l, node e) { if (!l->head) l->head = e; if (l->tail) l->tail->next = e; l->tail = e; } node removehead(slist *l) { node e = l->head; if (e) { l->head = e->next; e->next = 0; } return e; } void join(slist *a, slist *b) { push(a, b->head); a->tail = b->tail; } void merge(slist *a, slist *b) { slist r = {0}; while (a->head && b->head) push(&r, removehead(a->head->v <= b->head->v ? a : b)); join(&r, a->head ? a : b); *a = r; b->head = b->tail = 0; } void sort(int *ar, int len) { node_t all[len]; // array to list int i = 0; for (i = 0; i < len; i++) all[i].v = ar[i], all[i].next = i < len - 1 ? all + i + 1 : 0; slist list = {all, all + len - 1}, rem, strand = {0}, res = {0}; node e = 0; for (e = 0; list.head; list = rem) { int hh = 0; // FOR SOME REASON NEED THIS FOR NO ERRORS (clobbering?) rem.head = rem.tail = 0; while ((e = removehead(&list))) push((!strand.head || e->v >= strand.tail->v) ? &strand : &rem, e); merge(&res, &strand); } // list to array int z = 0; for (z = 0; res.head; z++, res.head = res.head->next) ar[z] = res.head->v; } void show(const char *title, int *x, int len) { printf("%s ", title); int i = 0; for (i = 0; i < len; i++) printf("%3d ", x[i]); putchar('\\n'); } int main(void) { int x[] = {-2,0,-2,5,5,3,-1,-3,5,5,0,2,-4,4,2}; # define SIZE sizeof(x)/sizeof(int) show("before sort:", x, SIZE); sort(x, sizeof(x)/sizeof(int)); show("after sort: ", x, SIZE); } """ checkResults(code) } } class QuickSort extends StandardTest { "quick sort test 1" should "print the correct results" in { // http://rosettacode.org/wiki/Sorting_algorithms/Quicksort#C val code = """ void quick_sort (int *a, int n) { int i, j, p, t; if (n < 2) return; p = a[n / 2]; for (i = 0, j = n - 1;; i++, j--) { while (a[i] < p) i++; while (p < a[j]) j--; if (i >= j) break; t = a[i]; a[i] = a[j]; a[j] = t; } quick_sort(a, i); quick_sort(a + i, n - i); } int main (void) { int a[] = {4, 65, 2, -31, 0, 99, 2, 83, 782, 1}; int n = sizeof a / sizeof a[0]; int i; for (i = 0; i < n; i++) printf("%d\\n", a[i]); quick_sort(a, n); for (i = 0; i < n; i++) printf("%d\\n", a[i]); return 0; } """ checkResults(code) } } class ShellSort extends StandardTest { "shell sort test 1" should "print the correct results" in { // http://rosettacode.org/wiki/Sorting_algorithms/Shell_sort#C val code = """ void bead_sort(int *a, int len) { int i, j, max, sum; unsigned char *beads; # define BEAD(i, j) beads[i * max + j] for (i = 1, max = a[0]; i < len; i++) if (a[i] > max) max = a[i]; beads = calloc(1, max * len); /* mark the beads */ for (i = 0; i < len; i++) for (j = 0; j < a[i]; j++) BEAD(i, j) = 1; for (j = 0; j < max; j++) { /* count how many beads are on each post */ for (sum = i = 0; i < len; i++) { sum += BEAD(i, j); BEAD(i, j) = 0; } /* mark bottom sum beads */ for (i = len - sum; i < len; i++) BEAD(i, j) = 1; } for (i = 0; i < len; i++) { for (j = 0; j < max && BEAD(i, j); j++); a[i] = j; } free(beads); } int main() { int i, x[] = {5, 3, 1, 7, 4, 1, 1, 20}; int len = sizeof(x)/sizeof(x[0]); bead_sort(x, len); for (i = 0; i < len; i++) printf("%d\\n", x[i]); return 0; } """ checkResults(code) } } class BogoSort extends StandardTest { "bogo sort test 1" should "print the correct results" in { // http://rosettacode.org/wiki/Sorting_algorithms/Bogo_sort#C val code = """ #include <stdbool.h> bool is_sorted(int *a, int n) { while ( --n >= 1 ) { if ( a[n] < a[n-1] ) return false; } return true; } void shuffle(int *a, int n) { int i, t, r; for(i=0; i < n; i++) { t = a[i]; r = rand() % n; a[i] = a[r]; a[r] = t; } } void bogosort(int *a, int n) { while ( !is_sorted(a, n) ) shuffle(a, n); } int main() { int numbers[] = { 1, 10, 9, 7, 3, 0 }; int i; bogosort(numbers, 6); for (i=0; i < 6; i++) printf("%d\\n", numbers[i]); } """ checkResults(code) } } class BeadSort extends StandardTest { "bead sort test 1" should "print the correct results" in { // http://rosettacode.org/wiki/Sorting_algorithms/Circle_sort#C val code = """ #include <stdio.h> #include <stdlib.h> void bead_sort(int *a, int len) { int i, j, max, sum; unsigned char *beads; # define BEAD(i, j) beads[i * max + j] for (i = 1, max = a[0]; i < len; i++) if (a[i] > max) max = a[i]; beads = calloc(1, max * len); /* mark the beads */ for (i = 0; i < len; i++) for (j = 0; j < a[i]; j++) BEAD(i, j) = 1; for (j = 0; j < max; j++) { /* count how many beads are on each post */ for (sum = i = 0; i < len; i++) { sum += BEAD(i, j); BEAD(i, j) = 0; } /* mark bottom sum beads */ for (i = len - sum; i < len; i++) BEAD(i, j) = 1; } for (i = 0; i < len; i++) { for (j = 0; j < max && BEAD(i, j); j++); a[i] = j; } free(beads); } int main() { int i, x[] = {5, 3, 1, 7, 4, 1, 1, 20}; int len = sizeof(x)/sizeof(x[0]); bead_sort(x, len); for (i = 0; i < len; i++) printf("%d\\n", x[i]); return 0; } """ checkResults(code) } } class CircleSort extends StandardTest { "circle sort test 1" should "print the correct results" in { // http://rosettacode.org/wiki/Sorting_algorithms/Circle_sort#C val code = """ int circle_sort_inner(int *start, int *end) { int *p, *q, t, swapped; if (start == end) return 0; // funny "||" on next line is for the center element of odd-lengthed array for (swapped = 0, p = start, q = end; p<q || (p==q && ++q); p++, q--) if (*p > *q) { t = *p, *p = *q, *q = t, swapped = 1; } // q == p-1 at this point return swapped | circle_sort_inner(start, q) | circle_sort_inner(p, end); } //helper function to show arrays before each call void circle_sort(int *x, int n) { do { int i; for (i = 0; i < n; i++) printf("%d\\n", x[i]); } while (circle_sort_inner(x, x + (n - 1))); } int main(void) { int x[] = {5, -1, 101, -4, 0, 1, 8, 6, 2, 3}; circle_sort(x, sizeof(x) / sizeof(*x)); return 0; } """ checkResults(code) } } class MergeSort extends StandardTest { "merge sort test 1" should "print the correct results" in { // http://quiz.geeksforgeeks.org/merge-sort/ val code = """ // Merges two subarrays of arr[]. // First subarray is arr[l..m] // Second subarray is arr[m+1..r] void merge(int arr[], int l, int m, int r) { int i, j, k; int n1 = m - l + 1; int n2 = r - m; /* create temp arrays */ int L[n1], R[n2]; /* Copy data to temp arrays L[] and R[] */ for (i = 0; i < n1; i++) L[i] = arr[l + i]; for (j = 0; j < n2; j++) R[j] = arr[m + 1+ j]; /* Merge the temp arrays back into arr[l..r]*/ i = 0; // Initial index of first subarray j = 0; // Initial index of second subarray k = l; // Initial index of merged subarray while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } /* Copy the remaining elements of L[], if there are any */ while (i < n1) { arr[k] = L[i]; i++; k++; } /* Copy the remaining elements of R[], if there are any */ while (j < n2) { arr[k] = R[j]; j++; k++; } } /* l is for left index and r is right index of the sub-array of arr to be sorted */ void mergeSort(int arr[], int l, int r) { if (l < r) { // Same as (l+r)/2, but avoids overflow for // large l and h int m = l+(r-l)/2; // Sort first and second halves mergeSort(arr, l, m); mergeSort(arr, m+1, r); merge(arr, l, m, r); } } /* UTILITY FUNCTIONS */ /* Function to print an array */ void printArray(int A[], int size) { int i; for (i=0; i < size; i++) printf("%d\\n", A[i]); } /* Driver program to test above functions */ int main() { int arr[] = {12, 11, 13, 5, 6, 7}; int arr_size = sizeof(arr)/sizeof(arr[0]); printArray(arr, arr_size); mergeSort(arr, 0, arr_size - 1); printArray(arr, arr_size); return 0; } """ checkResults(code) } } class Heapsort extends StandardTest { "heapsort test 1" should "print the correct results" in { // https://rosettacode.org/wiki/Sorting_algorithms/Heapsort#C val code = """ int max (int *a, int n, int i, int j, int k) { int m = i; if (j < n && a[j] > a[m]) { m = j; } if (k < n && a[k] > a[m]) { m = k; } return m; } void downheap (int *a, int n, int i) { while (1) { int j = max(a, n, i, 2 * i + 1, 2 * i + 2); if (j == i) { break; } int t = a[i]; a[i] = a[j]; a[j] = t; i = j; } } void heapsort (int *a, int n) { int i; for (i = (n - 2) / 2; i >= 0; i--) { downheap(a, n, i); } for (i = 0; i < n; i++) { int t = a[n - i - 1]; a[n - i - 1] = a[0]; a[0] = t; downheap(a, n - i - 1, 0); } } int main () { int a[] = {4, 65, 2, -31, 0, 99, 2, 83, 782, 1}; int n = sizeof a / sizeof a[0]; int i; for (i = 0; i < n; i++) printf("%d\\n", a[i]); heapsort(a, n); for (i = 0; i < n; i++) printf("%d\\n", a[i]); return 0; } """ checkResults(code) } }
bdwashbu/cEngine
tests/scala/c/engine/SortingTest.scala
Scala
apache-2.0
16,658
/* * @author Genc Mazlami * * Copyright 2013 University of Zurich * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.signalcollect.dcop.evaluation.maxsum import com.signalcollect.interfaces.AggregationOperation import com.signalcollect.Vertex import com.signalcollect.dcop.vertices.MaxSumVertex class MaxSumConflictAggregationOperation extends AggregationOperation[Int] { /** * Extracts values of type `ValueType` from vertices. */ def extract(v: Vertex[_, _]): Int = { val vertex = v.asInstanceOf[MaxSumVertex] val con = vertex.getNumOfConflicts // if(vertex.id.isVariable){ // println(vertex.id.id+" "+ con) // } con } /** * Aggregates all the values extracted by the `extract` function. * * @note There is no guarantee about the order in which this function gets executed on the extracted values. */ def aggregate(a: Int, b: Int): Int = { // println("aggregate") a + b } /** * Reduces an arbitrary number of elements to one element. */ override def reduce(elements: Stream[Int]): Int = { var sum = 0 // println("reduce") elements.foreach{el => sum += el } sum //TODO: don't know what "reduce" will be used for } /** * Neutral element of the `aggregate` function: * `aggregate(x, neutralElement) == x` */ val neutralElement: Int = 0 }
gmazlami/dcop-maxsum
src/main/scala/com/signalcollect/dcop/evaluation/maxsum/MaxSumConflictAggregationOperation.scala
Scala
apache-2.0
1,914
package pac import im.mange.jetboot.Renderable import im.mange.jetboot.comet.Subscriber import im.mange.jetboot.js.JsCmdFactory import jsa.comet.LoadFromDatabase import jsa.model.Detail import scala.xml.{Elem, Text} object PanelSelectorAgentLocator extends Hashable { val panelSelectorContent = "panelSelectorContent" def panelSelectorContentId(key: String) = s"panelSelectorContent_${hash(key)}" } case class PanelSelectorAgent(sub: Subscriber) extends Renderable with JsCmdFactory { import PanelSelectorAgentLocator._ private val emptySelection = Text("Nothing there") private var selectedDetail: Option[Detail] = None def render = <div id={panelSelectorContent}> Hello there </div> def onInitialise = { sub ! LoadFromDatabase() fillElement(panelSelectorContent, emptySelection) } def onDetailSelected(detail: Detail) = { selectedDetail = Some(detail) fillElement(panelSelectorContent, Text(detail.toString)) } }
gosubpl/justsampleapp
src/main/scala/pac/PanelSelectorAgent.scala
Scala
mit
976
package org.jetbrains.plugins.scala package editor.smartEnter.fixers import com.intellij.openapi.editor.Editor import com.intellij.psi.PsiElement import com.intellij.psi.util.PsiTreeUtil import org.jetbrains.plugins.scala.editor.smartEnter.ScalaSmartEnterProcessor import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlockExpr, ScWhileStmt} /** * @author Dmitry.Naydanov * @author Ksenia.Sautina * @since 2/5/13 */ class ScalaMissingWhileBodyFixer extends ScalaFixer { def apply(editor: Editor, processor: ScalaSmartEnterProcessor, psiElement: PsiElement): OperationPerformed = { val whileStatement = PsiTreeUtil.getParentOfType(psiElement, classOf[ScWhileStmt], false) if (whileStatement == null) return NoOperation val doc = editor.getDocument val body = whileStatement.body.orNull whileStatement.body match { case Some(_: ScBlockExpr) => NoOperation case Some(_) if startLine(doc, body) == startLine(doc, whileStatement) && whileStatement.condition.isDefined => NoOperation case _ => whileStatement.getRightParenthesis map { case rParenth => moveToEnd(editor, rParenth) doc.insertString(rParenth.getTextRange.getEndOffset, " {}") WithEnter(2) } getOrElse NoOperation } } }
LPTK/intellij-scala
src/org/jetbrains/plugins/scala/editor/smartEnter/fixers/ScalaMissingWhileBodyFixer.scala
Scala
apache-2.0
1,299
package pipelines.text import breeze.linalg.SparseVector import evaluation.MulticlassClassifierEvaluator import loaders.NewsgroupsDataLoader import nodes.learning.NaiveBayesEstimator import nodes.nlp._ import nodes.stats.TermFrequency import nodes.util.{CommonSparseFeatures, MaxClassifier} import org.apache.spark.{SparkConf, SparkContext} import pipelines.Logging import scopt.OptionParser import workflow.Pipeline object NewsgroupsPipeline extends Logging { val appName = "NewsgroupsPipeline" def run(sc: SparkContext, conf: NewsgroupsConfig): Pipeline[String, Int] = { val trainData = NewsgroupsDataLoader(sc, conf.trainLocation) val numClasses = NewsgroupsDataLoader.classes.length // Build the classifier estimator logInfo("Training classifier") val predictor = Trim andThen LowerCase() andThen Tokenizer() andThen NGramsFeaturizer(1 to conf.nGrams) andThen TermFrequency(x => 1) andThen (CommonSparseFeatures[Seq[String]](conf.commonFeatures), trainData.data) andThen (NaiveBayesEstimator[SparseVector[Double]](numClasses), trainData.data, trainData.labels) andThen MaxClassifier // Evaluate the classifier logInfo("Evaluating classifier") val testData = NewsgroupsDataLoader(sc, conf.testLocation) val testLabels = testData.labels val testResults = predictor(testData.data) val eval = MulticlassClassifierEvaluator(testResults, testLabels, numClasses) logInfo("\n" + eval.summary(NewsgroupsDataLoader.classes)) predictor } case class NewsgroupsConfig( trainLocation: String = "", testLocation: String = "", nGrams: Int = 2, commonFeatures: Int = 100000) def parse(args: Array[String]): NewsgroupsConfig = new OptionParser[NewsgroupsConfig](appName) { head(appName, "0.1") opt[String]("trainLocation") required() action { (x,c) => c.copy(trainLocation=x) } opt[String]("testLocation") required() action { (x,c) => c.copy(testLocation=x) } opt[Int]("nGrams") action { (x,c) => c.copy(nGrams=x) } opt[Int]("commonFeatures") action { (x,c) => c.copy(commonFeatures=x) } }.parse(args, NewsgroupsConfig()).get /** * The actual driver receives its configuration parameters from spark-submit usually. * * @param args */ def main(args: Array[String]) = { val conf = new SparkConf().setAppName(appName) conf.setIfMissing("spark.master", "local[2]") // This is a fallback if things aren't set via spark submit. val sc = new SparkContext(conf) val appConfig = parse(args) run(sc, appConfig) sc.stop() } }
tomerk/keystone
src/main/scala/pipelines/text/NewsgroupsPipeline.scala
Scala
apache-2.0
2,615
package ch.epfl.yinyang import scala.reflect.macros.blackbox.Context trait YYConfig { val config: Map[String, Any] val direct: Boolean = config("direct").asInstanceOf[Boolean] val debug: Int = config("debug").asInstanceOf[Int] val shortenDSLNames: Boolean = config("shortenDSLNames").asInstanceOf[Boolean] val mainMethodName: String = config("mainMethodName").asInstanceOf[String] val restrictLanguage: Boolean = config("restrictLanguage").asInstanceOf[Boolean] val ascribeTerms: Boolean = config("ascribeTerms").asInstanceOf[Boolean] val liftTypes: List[Context#Type] = config("liftTypes").asInstanceOf[List[Context#Type]] // SI-7234 prevents us from having it as a field to YYTransformers val virtualizeFunctions: Boolean = config("virtualizeFunctions").asInstanceOf[Boolean] val virtualizeValDef: Boolean = config("virtualizeValDef").asInstanceOf[Boolean] val failCompilation: Boolean = config("failCompilation").asInstanceOf[Boolean] }
scala-yinyang/scala-yinyang
components/yin-yang/src/YYConfig.scala
Scala
bsd-3-clause
965
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap} import org.apache.spark.sql.catalyst.plans.{Cross, Inner, PlanTest} import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, LocalRelation, LogicalPlan} import org.apache.spark.sql.catalyst.rules.RuleExecutor import org.apache.spark.sql.catalyst.statsEstimation.{StatsEstimationTestBase, StatsTestPlan} import org.apache.spark.sql.internal.SQLConf.{CBO_ENABLED, JOIN_REORDER_ENABLED} class JoinReorderSuite extends PlanTest with StatsEstimationTestBase { object Optimize extends RuleExecutor[LogicalPlan] { val batches = Batch("Resolve Hints", Once, EliminateResolvedHint) :: Batch("Operator Optimizations", FixedPoint(100), CombineFilters, PushDownPredicate, ReorderJoin, PushPredicateThroughJoin, ColumnPruning, CollapseProject) :: Batch("Join Reorder", Once, CostBasedJoinReorder) :: Nil } object ResolveHints extends RuleExecutor[LogicalPlan] { val batches = Batch("Resolve Hints", Once, EliminateResolvedHint) :: Nil } var originalConfCBOEnabled = false var originalConfJoinReorderEnabled = false override def beforeAll(): Unit = { super.beforeAll() originalConfCBOEnabled = conf.cboEnabled originalConfJoinReorderEnabled = conf.joinReorderEnabled conf.setConf(CBO_ENABLED, true) conf.setConf(JOIN_REORDER_ENABLED, true) } override def afterAll(): Unit = { try { conf.setConf(CBO_ENABLED, originalConfCBOEnabled) conf.setConf(JOIN_REORDER_ENABLED, originalConfJoinReorderEnabled) } finally { super.afterAll() } } private val columnInfo: AttributeMap[ColumnStat] = AttributeMap(Seq( attr("t1.k-1-2") -> rangeColumnStat(2, 0), attr("t1.v-1-10") -> rangeColumnStat(10, 0), attr("t2.k-1-5") -> rangeColumnStat(5, 0), attr("t3.v-1-100") -> rangeColumnStat(100, 0), attr("t4.k-1-2") -> rangeColumnStat(2, 0), attr("t4.v-1-10") -> rangeColumnStat(10, 0), attr("t5.k-1-5") -> rangeColumnStat(5, 0), attr("t5.v-1-5") -> rangeColumnStat(5, 0) )) private val nameToAttr: Map[String, Attribute] = columnInfo.map(kv => kv._1.name -> kv._1) private val nameToColInfo: Map[String, (Attribute, ColumnStat)] = columnInfo.map(kv => kv._1.name -> kv) // Table t1/t4: big table with two columns private val t1 = StatsTestPlan( outputList = Seq("t1.k-1-2", "t1.v-1-10").map(nameToAttr), rowCount = 1000, // size = rows * (overhead + column length) size = Some(1000 * (8 + 4 + 4)), attributeStats = AttributeMap(Seq("t1.k-1-2", "t1.v-1-10").map(nameToColInfo))) private val t4 = StatsTestPlan( outputList = Seq("t4.k-1-2", "t4.v-1-10").map(nameToAttr), rowCount = 2000, size = Some(2000 * (8 + 4 + 4)), attributeStats = AttributeMap(Seq("t4.k-1-2", "t4.v-1-10").map(nameToColInfo))) // Table t2/t3: small table with only one column private val t2 = StatsTestPlan( outputList = Seq("t2.k-1-5").map(nameToAttr), rowCount = 20, size = Some(20 * (8 + 4)), attributeStats = AttributeMap(Seq("t2.k-1-5").map(nameToColInfo))) private val t3 = StatsTestPlan( outputList = Seq("t3.v-1-100").map(nameToAttr), rowCount = 100, size = Some(100 * (8 + 4)), attributeStats = AttributeMap(Seq("t3.v-1-100").map(nameToColInfo))) // Table t5: small table with two columns private val t5 = StatsTestPlan( outputList = Seq("t5.k-1-5", "t5.v-1-5").map(nameToAttr), rowCount = 20, size = Some(20 * (8 + 4)), attributeStats = AttributeMap(Seq("t5.k-1-5", "t5.v-1-5").map(nameToColInfo))) test("reorder 3 tables") { val originalPlan = t1.join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) // The cost of original plan (use only cardinality to simplify explanation): // cost = cost(t1 J t2) = 1000 * 20 / 5 = 4000 // In contrast, the cost of the best plan: // cost = cost(t1 J t3) = 1000 * 100 / 100 = 1000 < 4000 // so (t1 J t3) J t2 is better (has lower cost, i.e. intermediate result size) than // the original order (t1 J t2) J t3. val bestPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(outputsOf(t1, t2, t3): _*) assertEqualPlans(originalPlan, bestPlan) } test("put unjoinable item at the end and reorder 3 joinable tables") { // The ReorderJoin rule puts the unjoinable item at the end, and then CostBasedJoinReorder // reorders other joinable items. val originalPlan = t1.join(t2).join(t4).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) val bestPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(outputsOf(t1, t2, t3): _*) // this is redundant but we'll take it for now .join(t4) .select(outputsOf(t1, t2, t4, t3): _*) assertEqualPlans(originalPlan, bestPlan) } test("reorder 3 tables with pure-attribute project") { val originalPlan = t1.join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select(nameToAttr("t1.v-1-10")) val bestPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select(nameToAttr("t1.k-1-2"), nameToAttr("t1.v-1-10")) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(nameToAttr("t1.v-1-10")) assertEqualPlans(originalPlan, bestPlan) } test("reorder 3 tables - one of the leaf items is a project") { val originalPlan = t1.join(t5).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t5.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select(nameToAttr("t1.v-1-10")) // Items: t1, t3, project(t5.k-1-5, t5) val bestPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select(nameToAttr("t1.k-1-2"), nameToAttr("t1.v-1-10")) .join(t5.select(nameToAttr("t5.k-1-5")), Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t5.k-1-5"))) .select(nameToAttr("t1.v-1-10")) assertEqualPlans(originalPlan, bestPlan) } test("don't reorder if project contains non-attribute") { val originalPlan = t1.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select((nameToAttr("t1.k-1-2") + nameToAttr("t2.k-1-5")) as "key", nameToAttr("t1.v-1-10")) .join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select("key".attr) assertEqualPlans(originalPlan, originalPlan) } test("reorder 4 tables (bushy tree)") { val originalPlan = t1.join(t4).join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t4.k-1-2")) && (nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t4.v-1-10") === nameToAttr("t3.v-1-100"))) // The cost of original plan (use only cardinality to simplify explanation): // cost(t1 J t4) = 1000 * 2000 / 2 = 1000000, cost(t1t4 J t2) = 1000000 * 20 / 5 = 4000000, // cost = cost(t1 J t4) + cost(t1t4 J t2) = 5000000 // In contrast, the cost of the best plan (a bushy tree): // cost(t1 J t2) = 1000 * 20 / 5 = 4000, cost(t4 J t3) = 2000 * 100 / 100 = 2000, // cost = cost(t1 J t2) + cost(t4 J t3) = 6000 << 5000000. val bestPlan = t1.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .join(t4.join(t3, Inner, Some(nameToAttr("t4.v-1-10") === nameToAttr("t3.v-1-100"))), Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t4.k-1-2"))) .select(outputsOf(t1, t4, t2, t3): _*) assertEqualPlans(originalPlan, bestPlan) } test("keep the order of attributes in the final output") { val outputLists = Seq("t1.k-1-2", "t1.v-1-10", "t3.v-1-100").permutations while (outputLists.hasNext) { val expectedOrder = outputLists.next().map(nameToAttr) val expectedPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(expectedOrder: _*) // The plan should not change after optimization assertEqualPlans(expectedPlan, expectedPlan) } } test("SPARK-26352: join reordering should not change the order of attributes") { // This test case does not rely on CBO. // It's similar to the test case above, but catches a reordering bug that the one above doesn't val tab1 = LocalRelation('x.int, 'y.int) val tab2 = LocalRelation('i.int, 'j.int) val tab3 = LocalRelation('a.int, 'b.int) val original = tab1.join(tab2, Cross) .join(tab3, Inner, Some('a === 'x && 'b === 'i)) val expected = tab1.join(tab3, Inner, Some('a === 'x)) .join(tab2, Cross, Some('b === 'i)) .select(outputsOf(tab1, tab2, tab3): _*) assertEqualPlans(original, expected) } test("reorder recursively") { // Original order: // Join // / \\ // Union t5 // / \\ // Join t4 // / \\ // Join t3 // / \\ // t1 t2 val bottomJoins = t1.join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select(nameToAttr("t1.v-1-10")) val originalPlan = bottomJoins .union(t4.select(nameToAttr("t4.v-1-10"))) .join(t5, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t5.v-1-5"))) // Should be able to reorder the bottom part. // Best order: // Join // / \\ // Union t5 // / \\ // Join t4 // / \\ // Join t2 // / \\ // t1 t3 val bestBottomPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .select(nameToAttr("t1.k-1-2"), nameToAttr("t1.v-1-10")) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(nameToAttr("t1.v-1-10")) val bestPlan = bestBottomPlan .union(t4.select(nameToAttr("t4.v-1-10"))) .join(t5, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t5.v-1-5"))) assertEqualPlans(originalPlan, bestPlan) } test("don't reorder if hints present") { val originalPlan = t1.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .hint("broadcast") .join( t4.join(t3, Inner, Some(nameToAttr("t4.v-1-10") === nameToAttr("t3.v-1-100"))) .hint("broadcast"), Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t4.k-1-2"))) assertEqualPlans(originalPlan, originalPlan) val originalPlan2 = t1.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .hint("broadcast") .join(t4, Inner, Some(nameToAttr("t4.v-1-10") === nameToAttr("t3.v-1-100"))) .hint("broadcast") .join(t3, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t4.k-1-2"))) assertEqualPlans(originalPlan2, originalPlan2) } test("reorder below and above the hint node") { val originalPlan = t1.join(t2).join(t3) .where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .hint("broadcast").join(t4) val bestPlan = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(outputsOf(t1, t2, t3): _*) .hint("broadcast").join(t4) assertEqualPlans(originalPlan, bestPlan) val originalPlan2 = t1.join(t2).join(t3) .where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) && (nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .join(t4.hint("broadcast")) val bestPlan2 = t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100"))) .join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5"))) .select(outputsOf(t1, t2, t3): _*) .join(t4.hint("broadcast")) assertEqualPlans(originalPlan2, bestPlan2) } private def assertEqualPlans( originalPlan: LogicalPlan, groundTruthBestPlan: LogicalPlan): Unit = { val analyzed = originalPlan.analyze val optimized = Optimize.execute(analyzed) val expected = ResolveHints.execute(groundTruthBestPlan.analyze) assert(analyzed.sameOutput(expected)) // if this fails, the expected plan itself is incorrect assert(analyzed.sameOutput(optimized)) compareJoinOrder(optimized, expected) } private def outputsOf(plans: LogicalPlan*): Seq[Attribute] = { plans.map(_.output).reduce(_ ++ _) } }
hhbyyh/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinReorderSuite.scala
Scala
apache-2.0
14,240
package edu.gemini.sp.vcs2 import edu.gemini.pot.sp.version.VersionMap import edu.gemini.sp.vcs2.ProgramLocationSet.{Both, LocalOnly, Neither, RemoteOnly} import edu.gemini.sp.vcs2.VcsAction._ import edu.gemini.sp.vcs2.VcsFailure.NeedsUpdate import edu.gemini.spModel.core.SPProgramID import edu.gemini.spModel.obscomp.SPNote import edu.gemini.util.security.principal.ProgramPrincipal import java.util.concurrent.atomic.AtomicBoolean import org.specs2.specification.core.Fragments import scalaz._ class VcsSpec extends VcsSpecification { import TestEnv._ val cancelled = new AtomicBoolean(true) val notCancelled = new AtomicBoolean(false) "checkout" should { "fail if the indicated program doesn't exist remotely" in withVcs { env => notFound(env.local.superStaffVcs.checkout(Q2, DummyPeer, notCancelled), Q2) } "fail if the user doesn't have access to the program" in withVcs { env => env.remote.addNewProgram(Q2) forbidden(env.local.vcs(ProgramPrincipal(Q1)).checkout(Q2, DummyPeer, notCancelled)) } "transfer a program from the remote database to the local database" in withVcs { env => // make the new program and store it remotely val remoteQ2 = env.remote.addNewProgram(Q2) // run checkout on the local peer env.local.superStaffVcs.checkout(Q2, DummyPeer, notCancelled).unsafeRun val localQ2 = env.local.odb.lookupProgramByID(Q2) localQ2.getLifespanId must be_!=(remoteQ2.getLifespanId) } "do nothing if cancelled" in withVcs { env => // make the new program and store it remotely env.remote.addNewProgram(Q2) // run checkout on the local peer env.local.superStaffVcs.checkout(Q2, DummyPeer, cancelled).unsafeRun env.local.odb.lookupProgramByID(Q2) must beNull } } "add" should { "fail if the indicated program doesn't exist locally" in withVcs { env => notFound(env.local.superStaffVcs.add(Q2, DummyPeer), Q2) } "fail if the user doesn't have access to the program" in withVcs { env => env.local.addNewProgram(Q2) forbidden(env.local.vcs(ProgramPrincipal(Q1)).add(Q2, DummyPeer)) } "transfer a program from the local database to the remote database" in withVcs { env => // make the new program and store it locally val localQ2 = env.local.addNewProgram(Q2) // run add to send it to the remote peer env.local.superStaffVcs.add(Q2, DummyPeer).unsafeRun val remoteQ2 = env.remote.odb.lookupProgramByID(Q2) localQ2.getLifespanId must be_!=(remoteQ2.getLifespanId) } } "pull" should { "fail if the indicated program doesn't exist locally" in withVcs { env => env.remote.addNewProgram(Q2) notFound(env.local.superStaffVcs.pull(Q2, DummyPeer, notCancelled), Q2) } "fail if the indicated program doesn't exist remotely" in withVcs { env => env.local.addNewProgram(Q2) notFound(env.local.superStaffVcs.pull(Q2, DummyPeer, notCancelled), Q2) } "fail if the user doesn't have access to the program" in withVcs { env => forbidden(env.local.vcs(ProgramPrincipal(Q2)).pull(Q1, DummyPeer, notCancelled)) } "fail if the indicated program has different keys locally vs remotely" in withVcs { env => env.local.addNewProgram(Q2) env.remote.addNewProgram(Q2) idClash(env.local.superStaffVcs.pull(Q2, DummyPeer, notCancelled), Q2) } "do nothing if the local version is the same" in withVcs { env => expect(env.local.superStaffVcs.pull(Q1, DummyPeer, notCancelled)) { case \\/-((Neither,_)) => ok("") } } "do nothing if the local version is newer" in withVcs { env => env.local.progTitle = "The Myth of Sisyphus" expect(env.local.superStaffVcs.pull(Q1, DummyPeer, notCancelled)) { case \\/-((Neither,_)) => ok("") } and (env.local.progTitle must_== "The Myth of Sisyphus") } "merge the updates if the remote version is newer" in withVcs { env => env.remote.progTitle = "The Myth of Sisyphus" expect(env.local.superStaffVcs.pull(Q1, DummyPeer, notCancelled)) { case \\/-((LocalOnly,_)) => ok("") } and (env.local.progTitle must_== "The Myth of Sisyphus") } "do nothing if cancelled" in withVcs { env => env.remote.progTitle = "The Myth of Sisyphus" // run checkout on the local peer env.local.superStaffVcs.pull(Q1, DummyPeer, cancelled).unsafeRun env.local.progTitle must_== "The Stranger" } } "push" should { "fail if the indicated program doesn't exist locally" in withVcs { env => env.remote.addNewProgram(Q2) notFound(env.local.superStaffVcs.push(Q2, DummyPeer, notCancelled), Q2) } "fail if the indicated program doesn't exist remotely" in withVcs { env => env.local.addNewProgram(Q2) notFound(env.local.superStaffVcs.push(Q2, DummyPeer, notCancelled), Q2) } "fail if the user doesn't have access to the program" in withVcs { env => forbidden(env.local.vcs(ProgramPrincipal(Q2)).push(Q1, DummyPeer, notCancelled)) } "fail if the indicated program has different keys locally vs remotely" in withVcs { env => env.local.addNewProgram(Q2) env.remote.addNewProgram(Q2) idClash(env.local.superStaffVcs.push(Q2, DummyPeer, notCancelled), Q2) } "do nothing if the local version is the same" in withVcs { env => expect(env.local.superStaffVcs.push(Q1, DummyPeer, notCancelled)) { case \\/-((Neither,_)) => ok("") } } "fail with NeedsUpdate if the local version is older" in withVcs { env => env.remote.progTitle = "The Myth of Sisyphus" expect(env.local.superStaffVcs.push(Q1, DummyPeer, notCancelled)) { case -\\/(NeedsUpdate) => ok("") } and (env.local.progTitle must_== Title) } "merge the updates if the local version is newer" in withVcs { env => env.local.progTitle = "The Myth of Sisyphus" expect(env.local.superStaffVcs.push(Q1, DummyPeer, notCancelled)) { case \\/-((RemoteOnly,_)) => ok("") } and (env.remote.progTitle must_== "The Myth of Sisyphus") } "do nothing if cancelled" in withVcs { env => env.local.progTitle = "The Myth of Sisyphus" // run checkout on the local peer env.local.superStaffVcs.push(Q1, DummyPeer, cancelled).unsafeRun env.remote.progTitle must_== "The Stranger" } // TODO: pending tests with conflicts, which must be rejected } def syncFragments(name: String, syncMethod: (Vcs, SPProgramID) => VcsAction[(ProgramLocationSet,VersionMap)]): Fragments = { name should { "fail if the indicated program doesn't exist locally" in withVcs { env => env.remote.addNewProgram(Q2) notFound(syncMethod(env.local.superStaffVcs, Q2), Q2) } "fail if the indicated program doesn't exist remotely" in withVcs { env => env.local.addNewProgram(Q2) notFound(syncMethod(env.local.superStaffVcs, Q2), Q2) } "fail if the user doesn't have access to the program" in withVcs { env => forbidden(syncMethod(env.local.vcs(ProgramPrincipal(Q2)), Q1)) } "fail if the indicated program has different keys locally vs remotely" in withVcs { env => env.local.addNewProgram(Q2) env.remote.addNewProgram(Q2) idClash(syncMethod(env.local.superStaffVcs, Q2), Q2) } "do nothing if both versions are the same" in withVcs { env => expect(syncMethod(env.local.superStaffVcs, Q1)) { case \\/-((Neither,_)) => ok("") } } "merge the remote updates if the remote version is newer" in withVcs { env => env.remote.progTitle = "The Myth of Sisyphus" expect(syncMethod(env.local.superStaffVcs, Q1)) { case \\/-((LocalOnly,_)) => ok("") } and (env.local.progTitle must_== "The Myth of Sisyphus") } "send the local updates if the local version is newer" in withVcs { env => env.local.progTitle = "The Myth of Sisyphus" expect(syncMethod(env.local.superStaffVcs, Q1)) { case \\/-((RemoteOnly,_)) => ok("") } and (env.remote.progTitle must_== "The Myth of Sisyphus") } "merge local and remote updates if both have been modified" in withVcs { env => val group = env.local.odb.getFactory.createGroup(env.local.prog, null) env.local.prog.addGroup(group) val note = env.remote.odb.getFactory.createObsComponent(env.remote.prog, SPNote.SP_TYPE, null) env.remote.prog.addObsComponent(note) expect(syncMethod(env.local.superStaffVcs, Q1)) { case \\/-((Both,_)) => ok("") } and (env.remote.prog.getGroups.get(0).getNodeKey must_== group.getNodeKey) and (env.local.prog.getObsComponents.get(0).getNodeKey must_== note.getNodeKey) } } } syncFragments("sync", (vcs, pid) => vcs.sync(pid, DummyPeer, notCancelled)) syncFragments("retrySync", (vcs, pid) => vcs.retrySync(pid, DummyPeer, notCancelled, 10)) "cancelled sync" should { "do nothing" in withVcs { env => val group = env.local.odb.getFactory.createGroup(env.local.prog, null) env.local.prog.addGroup(group) val note = env.remote.odb.getFactory.createObsComponent(env.remote.prog, SPNote.SP_TYPE, null) env.remote.prog.addObsComponent(note) env.local.superStaffVcs.sync(Q1, DummyPeer, cancelled).unsafeRun (env.remote.prog.getGroups.size must_== 0) and (env.local.prog.getObsComponents.size must_== 0) } } // TODO: pending tests with conflicts, which must be rejected // TODO: not really testing the "retry" part of "retrySync" }
spakzad/ocs
bundle/edu.gemini.sp.vcs/src/test/scala/edu/gemini/sp/vcs2/VcsSpec.scala
Scala
bsd-3-clause
9,725
/* * Copyright 2010 Guardian News and Media * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.gu.conf import com.gu.conf.impl._ import org.slf4j.Logger import org.slf4j.LoggerFactory import java.util.Properties private[conf] class GuardianConfigurationStrategy( val loader: PropertiesLoader = new PropertiesLoader, val setup: SetupConfiguration = new SetupConfiguration, val shouldLog: Boolean = true) { private final val LOG: Logger = LoggerFactory.getLogger(classOf[GuardianConfigurationStrategy]) def log(message: String, objects: Object*) { if (shouldLog) { LOG.info(message, objects) } } def getConfiguration(applicationName: String, webappConfDirectory: String): Configuration = { log("Configuring application {} using classpath configuration directory {}", applicationName, webappConfDirectory) val properties = CompositeConfiguration.from( getDeveloperAccountOverrideProperties(applicationName), getOperationsProperties(applicationName), getDeveloperStageBasedProperties(webappConfDirectory), getDeveloperServiceDomainBasedProperties(webappConfDirectory), getDeveloperCommonProperties(webappConfDirectory), getEnvironmentProperties) new PlaceholderProcessingConfiguration(properties) } def getDeveloperAccountOverrideProperties(applicationName: String) = { val home = System getProperty "user.home" val location = "file://%s/.gu/%s.properties".format(home, applicationName) log("Loading developer account override properties from " + location) val properties = loader getPropertiesFrom location new PropertiesBasedConfiguration(location, properties) } def getOperationsProperties(applicationName: String) = { val location = "file:///etc/gu/%s.properties" format applicationName log("Loading operations properties from " + location) val properties = loader getPropertiesFrom location new PropertiesBasedConfiguration(location, properties) } def getDeveloperStageBasedProperties(confPrefix: String) = { val stage = setup.getStage val location = "classpath:%s/%s.properties".format(confPrefix, stage) log("Loading developer stage based properties from " + location) val properties = loader getPropertiesFrom location new PropertiesBasedConfiguration(location, properties) } def getDeveloperServiceDomainBasedProperties(confPrefix: String) = { val serviceDomain = setup.getServiceDomain val location = String.format("classpath:%s/%s.properties", confPrefix, serviceDomain) log("Loading developer service domain based properties from " + location) val properties = loader getPropertiesFrom location new PropertiesBasedConfiguration(location, properties) } def getDeveloperCommonProperties(webappConfDirectory: String) = { val location = "classpath:%s/global.properties" format webappConfDirectory log("Loading developer common properties from " + location) val properties = loader getPropertiesFrom location new PropertiesBasedConfiguration(location, properties) } def getEnvironmentProperties = { log("Loading system environment variables") val props = new Properties() setup.getEnvironmentVariables.foreach { pair => props.setProperty(pair._1, pair._2) } new PropertiesBasedConfiguration("Environment", props) } }
guardian/guardian-configuration
src/main/scala/com.gu.conf/GuardianConfigurationStrategy.scala
Scala
apache-2.0
3,913
/* Deduction Tactics Copyright (C) 2012-2017 Raymond Dodge This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.rayrobdod.boardGame.view /** * The return value of [[CoordinateFunctionSpecifierParser]] * * @group CoordinateFunction * * @tparam Index the function input * @tparam A the function output */ trait CoordinateFunction[-Index, @specialized(Int, Boolean) A] { /** Do the thing */ def apply(idx:Index):A /** The number of division operations in this function */ private[view] def divCount:Int = 0 /** The number of and operations in this function */ private[view] def andCount:Int = 0 /** The sum of integers contained in this function */ private[view] def primitiveSum:Int = 0 /** Whether this function trivially returns true */ private[view] def isJustTrue:Boolean = false /** The priority with which this function should be used. Higher superscedes lower. */ final def priority:Int = if (isJustTrue) {0} else {(1000) / (divCount + 1) * (andCount + 1) + primitiveSum} /** * Create a new CoordinateFunction which applies its input to both this and rhs, then combines the two results using mapping * * @param rhs the other function * @param mapping the output-combining function * @param name the returnValue's `toString` * @param incrementDivCount magic related to priorities * @param incrementAndCount magic related to priorities */ private[view] def zipWith[Index2 <: Index, @specialized(Int, Boolean) B, @specialized(Int, Boolean) C]( rhs:CoordinateFunction[Index2, B], mapping:(A,B) => C, name:String, incrementDivCount:Int = 0, incrementAndCount:Int = 0 ):CoordinateFunction[Index2, C] = { new CoordinateFunction[Index2, C]{ override def apply(idx:Index2):C = mapping(CoordinateFunction.this.apply(idx), rhs.apply(idx)) override def divCount:Int = CoordinateFunction.this.divCount + rhs.divCount + incrementDivCount override def andCount:Int = CoordinateFunction.this.andCount + rhs.andCount + incrementAndCount override def primitiveSum:Int = CoordinateFunction.this.primitiveSum + rhs.primitiveSum override def toString = name // On the one hand, equals seems useful, on the other, comparing toStrings seems to work well enough in the tests // Not to mention, equals with a Funciton is a bit hard } } } /** * Factory methods for CoordinateFunctions * * @group CoordinateFunction */ private[view] object CoordinateFunction { private[this] final class ConstantCoordinateFunction[@specialized(Int, Boolean) A](a:A) extends CoordinateFunction[Any, A]{ override def apply(idx:Any):A = a override def primitiveSum:Int = { if (a.isInstanceOf[Int]) {a.asInstanceOf[Int]} else {0} } override def isJustTrue:Boolean = { if (a.isInstanceOf[Boolean]) {a.asInstanceOf[Boolean]} else {false} } override def toString:String = a.toString override def equals(x:Any):Boolean = x match { case rhs:ConstantCoordinateFunction[_] => { this.apply("blah").equals(rhs.apply("blah")) } case _ => false } } /** A CoordinateFunction that always returns the specified value */ def constant[@specialized(Int, Boolean) A](a:A):CoordinateFunction[Any, A] = new ConstantCoordinateFunction[A](a) }
rayrobdod/boardGame
View/src/main/scala/CoordinateFunction.scala
Scala
gpl-3.0
3,806
def rotate[A](startIdx: Int, inputAry: List[A]): List[A] = { val rotatedElems = if (startIdx < 0) inputAry.take(inputAry.size + startIdx) else inputAry.take(startIdx) val nochangeElems = if (startIdx < 0) inputAry.drop(inputAry.size + startIdx) else inputAry.drop(startIdx) nochangeElems ::: rotatedElems } /* scala> def rotate[A](startIdx: Int, inputAry: List[A]): List[A] = { | val rotatedElems = if (startIdx < 0) inputAry.take(inputAry.size + startIdx) else inputAry.take(startIdx) | val nochangeElems = if (startIdx < 0) inputAry.drop(inputAry.size + startIdx) else inputAry.drop(startIdx) | | nochangeElems ::: rotatedElems | } rotate: [A](startIdx: Int, inputAry: List[A])List[A] scala> rotate(3, List('a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k)) res0: List[Symbol] = List('d, 'e, 'f, 'g, 'h, 'i, 'j, 'k, 'a, 'b, 'c) scala> rotate(-2, List('a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k)) res1: List[Symbol] = List('j, 'k, 'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i) */
zuqqhi2/s-99
p19.scala
Scala
bsd-3-clause
1,010
/* * Copyright 2013 http4s.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.http4s.servlet import javax.servlet.AsyncEvent import javax.servlet.AsyncListener protected[servlet] abstract class AbstractAsyncListener extends AsyncListener { override def onComplete(event: AsyncEvent): Unit = {} override def onError(event: AsyncEvent): Unit = {} override def onStartAsync(event: AsyncEvent): Unit = {} override def onTimeout(event: AsyncEvent): Unit = {} }
http4s/http4s
servlet/src/main/scala/org/http4s/servlet/AbstractAsyncListener.scala
Scala
apache-2.0
995
/* * Copyright 2018 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package form import form.utilities._ import play.api.data.Form import play.api.data.Forms._ import config.Settings import config.AppSettings import play.api.data.Mapping import service.KeystoreService._ trait Year2015Fields { def data(): Map[String,String] = toMap(this).mapValues((v)=> if (v.toString == "true" || v.toString == "false") v.toString else (v.toString.toDouble*100).floor.toLong.toString) } trait Year2015FormFactory extends Settings { settings: Settings => def apply(): Form[_ <: Year2015Fields] protected def isPoundsAndPence(): Boolean = settings.POUNDS_AND_PENCE protected def penceP1DB(): (String, Mapping[BigDecimal])= P1_DB_KEY -> poundsAndPenceField(true) protected def poundsP1DB(): (String, Mapping[Long])= P1_DB_KEY -> poundsLongField(true) protected def penceP2DB(): (String, Mapping[BigDecimal])= P2_DB_KEY -> poundsAndPenceField(true) protected def poundsP2DB(): (String, Mapping[Long])= P2_DB_KEY -> poundsLongField(true) protected def penceP1DC(): (String, Mapping[BigDecimal])= P1_DC_KEY -> poundsAndPenceField(true) protected def poundsP1DC(): (String, Mapping[Long])= P1_DC_KEY -> poundsLongField(true) protected def penceP2DC(): (String, Mapping[BigDecimal])= P2_DC_KEY -> poundsAndPenceField(true) protected def poundsP2DC(): (String, Mapping[Long])= P2_DC_KEY -> poundsLongField(true) } trait Year2015Form { def form(isDB: Boolean, isDC: Boolean): Form[_ <: Year2015Fields] = (isDB, isDC) match { case (true,true) => Year2015DBDCForm() case (true, false) => Year2015DefinedBenefitForm() case _ => Year2015DefinedContributionForm() } } object Year2015Form extends Year2015Form
hmrc/paac-frontend
app/form/Year2015Form.scala
Scala
apache-2.0
2,264
/* Copyright 2015 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import com.twitter.scalding.serialization.CascadingBinaryComparator import com.twitter.scalding.serialization.OrderedSerialization import com.twitter.scalding.serialization.StringOrderedSerialization import org.scalatest.{ Matchers, WordSpec } class NoOrderdSerJob(args: Args) extends Job(args) { override def config = super.config + (Config.ScaldingRequireOrderedSerialization -> "true") TypedPipe.from(TypedTsv[(String, String)]("input")) .group .max .write(TypedTsv[(String, String)]("output")) } class OrderdSerJob(args: Args) extends Job(args) { implicit def stringOS: OrderedSerialization[String] = new StringOrderedSerialization override def config = super.config + (Config.ScaldingRequireOrderedSerialization -> "true") TypedPipe.from(TypedTsv[(String, String)]("input")) .group .sorted .max .write(TypedTsv[(String, String)]("output")) } class RequireOrderedSerializationTest extends WordSpec with Matchers { "A NoOrderedSerJob" should { // throw if we try to run in: "throw when run" in { val ex = the[Exception] thrownBy { JobTest(new NoOrderdSerJob(_)) .source(TypedTsv[(String, String)]("input"), List(("a", "a"), ("b", "b"))) .sink[(String, String)](TypedTsv[(String, String)]("output")) { outBuf => () } .run .finish() } ex.getMessage should include("SerializationTest.scala:29") } } "A OrderedSerJob" should { // throw if we try to run in: "run" in { JobTest(new OrderdSerJob(_)) .source(TypedTsv[(String, String)]("input"), List(("a", "a"), ("a", "b"), ("b", "b"))) .sink[(String, String)](TypedTsv[(String, String)]("output")) { outBuf => outBuf.toSet shouldBe Set(("a", "b"), ("b", "b")) } .run .finish() } } }
cchepelov/scalding
scalding-core/src/test/scala/com/twitter/scalding/typed/RequireOrderedSerializationTest.scala
Scala
apache-2.0
2,417
package exam.national_center_test.xml.choice import exam.national_center_test.xml.choice.collcetion.{ChoiceCollection, ChoiceCollectionType, ChoiceSet, ChoiceSingleton} /** * <pre> * Created on 5/24/15. * </pre> * @author K.Sakamoto */ object ChoiceType extends Enumeration { val None, Binary, Image, Sentence, Symbol, SymbolList, Term, BinaryCombo, ImageCombo, SentenceCombo, SymbolCombo, SymbolListCombo, TermCombo = Value def childChoiceType(choiceType: ChoiceType.Value): ChoiceType.Value = { choiceType match { case BinaryCombo => ChoiceType.Binary case ImageCombo => ChoiceType.Image case SentenceCombo => ChoiceType.Sentence case SymbolCombo => ChoiceType.Symbol case SymbolListCombo => ChoiceType.SymbolList case TermCombo => ChoiceType.Term case otherwise => otherwise } } private def parentChoiceType(choiceType: ChoiceType.Value): ChoiceType.Value = { choiceType match { case ChoiceType.Binary => ChoiceType.BinaryCombo case ChoiceType.Image => ChoiceType.ImageCombo case ChoiceType.Sentence => ChoiceType.SentenceCombo case ChoiceType.Symbol => ChoiceType.SymbolCombo case ChoiceType.SymbolList => ChoiceType.SymbolListCombo case ChoiceType.Term => ChoiceType.TermCombo case otherwise => otherwise } } def detectChoiceType(choices: Seq[ChoiceCollection]): ChoiceType.Value = { val headElement: ChoiceCollection = choices.head headElement.choiceCollectionType match { case ChoiceCollectionType.Set => parentChoiceType( detectChoiceType( headElement. asInstanceOf[ChoiceSet]. set. values. head. singleton ) ) case ChoiceCollectionType.Singleton => detectChoiceType( headElement. asInstanceOf[ChoiceSingleton]. singleton ) case otherwise => ChoiceType.None } } private def detectChoiceType(choice: String): ChoiceType.Value = { detectChoiceBinary(choice) match { case Some(choiceType) => return choiceType case scala.None => } detectChoiceImage(choice) match { case Some(choiceType) => return choiceType case scala.None => } detectChoiceSentence(choice) match { case Some(choiceType) => return choiceType case scala.None => } detectChoiceSymbol(choice) match { case Some(choiceType) => return choiceType case scala.None => } detectChoiceSymbolList(choice) match { case Some(choiceType) => return choiceType case scala.None => } detectChoiceTerm(choice) match { case Some(choiceType) => return choiceType case scala.None => } ChoiceType.None } private def detectChoiceBinary(choice: String): Option[ChoiceType.Value] = { if (Set("正", "誤") contains choice) { Option(ChoiceType.Binary) } else { scala.None } } private def detectChoiceImage(choice: String): Option[ChoiceType.Value] = { if (choice endsWith ".png") { Option(ChoiceType.Image) } else { scala.None } } private def detectChoiceSentence(choice: String): Option[ChoiceType.Value] = { if (choice endsWith "。") { Option(ChoiceType.Sentence) } else { scala.None } } private def detectChoiceSymbol(choice: String): Option[ChoiceType.Value] = { if (choice matches "[a-fア-オ]") { Option(ChoiceType.Symbol) } else { scala.None } } private def detectChoiceSymbolList(choice: String): Option[ChoiceType.Value] = { if (choice contains SymbolListParser.delimiter) { Option(ChoiceType.SymbolList) } else { scala.None } } private def detectChoiceTerm(choice: String): Option[ChoiceType.Value] = { Option(ChoiceType.Term) } }
ktr-skmt/FelisCatusZero
src/main/scala/exam/national_center_test/xml/choice/ChoiceType.scala
Scala
apache-2.0
4,138
package debop4s.core.utils import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{CountDownLatch => JavaCountDownLatch, TimeUnit} import debop4s.core.AbstractCoreFunSuite import debop4s.core.concurrent.Asyncs import org.mockito.Mockito._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent._ import scala.util.{Failure, Success} /** * MemorizeFunSuite * @author Sunghyouk Bae */ class MemorizeFunSuite extends AbstractCoreFunSuite { test("Memorize.apply: only runs the function once for the same input") { // mokito can't spy anonymous class, // and this was the simplest approach i could come up with. class Adder extends (Int => Int) { override def apply(i: Int) = i + 1 } val adder = spy(new Adder) val memorizer = Memorize { adder(_: Int) } memorizer(1) shouldEqual 2 memorizer(1) shouldEqual 2 memorizer(2) shouldEqual 3 verify(adder, times(1))(1) verify(adder, times(1))(2) } test("Memorize.apply: only executes the memorized computation once per input") { val callCount = new AtomicInteger(0) val startUpLatch = new JavaCountDownLatch(1) val memorizer = Memorize { i: Int => // Wait for all of the threads to be started before // continuing. This gives races a chance to happen. startUpLatch.await() // Perform the effect of incrementing the counter, so that we // can detect whether this code is executed more than once. callCount.incrementAndGet() // Return a new object so that object equality will not pass // if two different result values are used. "." * i } val concurencyLevel = 5 val computations = (0 until concurencyLevel).map { _ => Future { memorizer(concurencyLevel) } }.seq startUpLatch.countDown() val results = Asyncs.result(Future.sequence(computations)) results foreach { item => item shouldEqual results(0) } callCount.get() shouldEqual 1 } test("handles exceptions during computations") { val startUpLatch = new JavaCountDownLatch(1) val callCount = new AtomicInteger(0) val memo = Memorize { i: Int => // wait for all caller has been started startUpLatch.await(200, TimeUnit.MILLISECONDS) val n = callCount.incrementAndGet() if (n == 1) throw new RuntimeException() else i + 1 } val concurencyLevel = 5 val computations = (0 until concurencyLevel).map { _ => Future { memo(concurencyLevel) } }.seq computations foreach { f => f onComplete { case Success(i) => debug(i.toString) case Failure(e) => e.isInstanceOf[RuntimeException] shouldEqual true } } startUpLatch.countDown() Thread.sleep(1000) callCount.get() shouldEqual 2 } }
debop/debop4s
debop4s-core/src/test/scala/debop4s/core/utils/MemorizeFunSuite.scala
Scala
apache-2.0
2,819
package helpers import scala.concurrent._ import scala.concurrent.duration._ import scala.language.postfixOps /** * @author [email protected] */ trait BootingProcess { def onStart[T](awaitable: Awaitable[T]): T = { Await.result(awaitable, 1 minute) } }
lizepeng/app.io
modules/models/app/helpers/BootingProcess.scala
Scala
apache-2.0
269
package org.bitcoins.core.protocol.ln import org.bitcoins.core.number.{UInt5, UInt8} import org.bitcoins.core.protocol.ln.util.LnUtil import org.bitcoins.core.util.{Bech32, SeqWrapper} import org.bitcoins.crypto.NetworkElement import scodec.bits.ByteVector import scala.annotation.tailrec import scala.collection.mutable import scala.reflect.ClassTag /** An aggregation of all the individual tagged fields in a [[org.bitcoins.core.protocol.ln.LnInvoice]] */ sealed abstract class LnTaggedFields extends SeqWrapper[LnTag] with NetworkElement { require(tag[LnTag.PaymentHashTag].nonEmpty, "You must supply a payment hash") require( (description.nonEmpty && description.get.string.length < 640) || descriptionHash.nonEmpty, "You must supply either a description hash, or a literal description that is 640 characters or less to create an invoice." ) require(!(description.nonEmpty && descriptionHash.nonEmpty), "Cannot have both description and description hash") def tags: Vector[LnTag] override protected lazy val wrapped: Vector[LnTag] = tags def tag[T <: LnTag: ClassTag]: Option[T] = tags.collectFirst { case t: T => t } lazy val paymentHash: LnTag.PaymentHashTag = tag[LnTag.PaymentHashTag].get lazy val secret: Option[LnTag.SecretTag] = tag[LnTag.SecretTag] lazy val description: Option[LnTag.DescriptionTag] = tag[LnTag.DescriptionTag] lazy val nodeId: Option[LnTag.NodeIdTag] = tag[LnTag.NodeIdTag] lazy val descriptionHash: Option[LnTag.DescriptionHashTag] = tag[LnTag.DescriptionHashTag] lazy val expiryTime: Option[LnTag.ExpiryTimeTag] = tag[LnTag.ExpiryTimeTag] lazy val cltvExpiry: Option[LnTag.MinFinalCltvExpiry] = tag[LnTag.MinFinalCltvExpiry] lazy val fallbackAddress: Option[LnTag.FallbackAddressTag] = tag[LnTag.FallbackAddressTag] lazy val routingInfo: Option[LnTag.RoutingInfo] = tag[LnTag.RoutingInfo] lazy val features: Option[LnTag.FeaturesTag] = tag[LnTag.FeaturesTag] lazy val data: Vector[UInt5] = tags.flatMap(_.data) override def bytes: ByteVector = { val u8s = Bech32.from5bitTo8bit(data) UInt8.toBytes(u8s) } override def toString: String = { val b = new mutable.StringBuilder() val string = Bech32.encode5bitToString(data) b.append(string) b.toString() } } object LnTaggedFields { private case class InvoiceTagImpl(tags: Vector[LnTag]) extends LnTaggedFields /** According to BOLT11 the required fields in a LnInvoice are a payment hash * and either a description, or the hash of the description. */ def apply( paymentHash: LnTag.PaymentHashTag, descriptionOrHash: Either[LnTag.DescriptionTag, LnTag.DescriptionHashTag], secret: Option[LnTag.SecretTag] = None, nodeId: Option[LnTag.NodeIdTag] = None, expiryTime: Option[LnTag.ExpiryTimeTag] = None, cltvExpiry: Option[LnTag.MinFinalCltvExpiry] = None, fallbackAddress: Option[LnTag.FallbackAddressTag] = None, routingInfo: Option[LnTag.RoutingInfo] = None, features: Option[LnTag.FeaturesTag] = None): LnTaggedFields = { val (description, descriptionHash): ( Option[LnTag.DescriptionTag], Option[LnTag.DescriptionHashTag]) = { descriptionOrHash match { case Left(description) => (Some(description), None) case Right(hash) => (None, Some(hash)) } } val tags = Vector(Some(paymentHash), description, nodeId, descriptionHash, expiryTime, cltvExpiry, fallbackAddress, routingInfo, features, secret).flatten InvoiceTagImpl(tags) } def apply(tags: Vector[LnTag]): LnTaggedFields = InvoiceTagImpl(tags) /** This is intended to parse all of the [[org.bitcoins.core.protocol.ln.LnTaggedFields LnTaggedFields]] * from the tagged part of the ln invoice. This should only be called * if other information has already been remove from the invoice * like the [[LnHumanReadablePart]] * @param u5s payload of the tagged fields in the invoice * @return */ def fromUInt5s(u5s: Vector[UInt5]): LnTaggedFields = { @tailrec def loop(remaining: Vector[UInt5], fields: Vector[LnTag]): Vector[LnTag] = { remaining match { case h +: h1 +: h2 +: t => val prefix = LnTagPrefix .fromUInt5(h) .getOrElse( throw new RuntimeException("Unknown LN invoice tag prefix")) //next two 5 bit increments are data_length val dataLengthU5s = List(h1, h2) val dataLength = LnUtil.decodeDataLength(dataLengthU5s) //t is the actual possible payload val payload: Vector[UInt5] = t.take(dataLength.toInt) val tag = LnTag.fromLnTagPrefix(prefix, payload) val newRemaining = t.slice(payload.size, t.size) loop(newRemaining, fields :+ tag) case _ +: _ | _ +: _ +: _ => throw new IllegalArgumentException( "Failed to parse LnTaggedFields, needs 15bits of meta data to be able to parse") case _: Vector[_] => fields } } val tags = loop(u5s, Vector.empty) InvoiceTagImpl(tags) } }
bitcoin-s/bitcoin-s
core/src/main/scala/org/bitcoins/core/protocol/ln/LnTaggedFields.scala
Scala
mit
5,398
/** * Copyright (c) 2016, Anthony Anderson<Illyohs> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package us.illyohs.civilmagiks.common.core.util import net.minecraft.util.ResourceLocation import net.minecraft.util.text.translation.I18n import us.illyohs.civilmagiks.api.sigil.Sigil abstract class SigilBase(name:String, radius:Int) extends Sigil { this.setModid(ModInfo.MOD_ID) this.setRegistryName(getModid, name) this.setRadius(radius) this.setUnLocalizedName(I18n.translateToLocal("sigil." + name)) this.setTexture(new ResourceLocation(getModid, "texture/sigil/" + name)) def getKey = name // def getDescriptionPacket: Packet = { // // } }
Illyohs/CivilMagicks
src/main/scala/us/illyohs/civilmagiks/common/core/util/SigilBase.scala
Scala
bsd-2-clause
1,952
package com.giyeok.jparser.examples.metalang3 import com.giyeok.jparser.examples.MetaLang3Example object AllMetaLang3Examples { val examples: Seq[MetaLang3Example] = List( SimpleExamples.examples, JoinExamples.examples, ClassRelExamples.examples, OptionalExamples.examples, PExprExamples.examples, // List(MetaLang3Grammar.inMetaLang3), ).flatten }
Joonsoo/moon-parser
examples/src/main/scala/com/giyeok/jparser/examples/metalang3/AllMetaLang3Examples.scala
Scala
mit
379
package com.shocktrade.client.models.contest import com.shocktrade.common.models.contest.PositionLike import scala.scalajs.js /** * Represents a Position model * @author Lawrence Daniels <[email protected]> */ class Position(var _id: js.UndefOr[String], var symbol: js.UndefOr[String], var exchange: js.UndefOr[String], var pricePaid: js.UndefOr[Double], var quantity: js.UndefOr[Double], var commission: js.UndefOr[Double], var processedTime: js.UndefOr[js.Date], var accountType: js.UndefOr[String], var netValue: js.UndefOr[Double]) extends PositionLike { // UI-specific fields var lastTrade: js.UndefOr[Double] = js.undefined var gainLossPct: js.UndefOr[Double] = js.undefined }
ldaniels528/shocktrade.js
app/client/angularjs/src/main/scala/com/shocktrade/client/models/contest/Position.scala
Scala
apache-2.0
829
package com.socrata.bq.config import com.socrata.datacoordinator.common.DataSourceConfig import com.socrata.thirdparty.typesafeconfig.ConfigClass import com.typesafe.config.{Config, ConfigUtil} import scala.collection.JavaConverters._ class BBQStoreConfig(config: Config, root: String) extends ConfigClass(config, root) { // handle blank root override protected def path(key: String*) = { val fullKey = if (root.isEmpty) key else ConfigUtil.splitPath(root).asScala ++ key ConfigUtil.joinPath(fullKey: _*) } // Database val database = new DataSourceConfig(config, path("database")) // Bigquery private val bigqueryConfig = config.getConfig("bigquery") val projectId = bigqueryConfig.getString("project-id") val datasetId = bigqueryConfig.getString("dataset-id") // Resync handler val resyncConfig = config.getConfig("resync-handler") }
socrata-platform/soql-bigquery-adapter
store-bq/src/main/scala/com/socrata/bq/config/BBQStoreConfig.scala
Scala
apache-2.0
871
package rere.sasl.scram.cache.impl import org.scalatest.{Matchers, WordSpec} class ConcurrentSaltedPasswordCacheTest extends WordSpec with Matchers { "ConcurrentSaltedPasswordCache" should { "cache salted passwords" in { val cache = new ConcurrentSaltedPasswordCache() cache.get("secret", "salt", 4096) shouldBe None cache.put("secret", "salt", 4096, Array[Byte](97, 98, 99)) cache.get("secret", "salt", 4096).map(_.toVector) shouldBe Some(Vector[Byte](97, 98, 99)) cache.put("secret", "salt", 4096, Array[Byte](97, 98, 100)) cache.get("secret", "salt", 4096).map(_.toVector) shouldBe Some(Vector[Byte](97, 98, 100)) } } }
pbaun/rere
modules/sasl/src/test/scala/rere/sasl/scram/cache/impl/ConcurrentSaltedPasswordCacheTest.scala
Scala
apache-2.0
677
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.rdd import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import org.bdgenomics.adam.models.ReferenceRegion import org.bdgenomics.adam.util.ADAMFunSuite import scala.reflect.ClassTag class CoverageSuite extends ADAMFunSuite { private def region(start: Long, end: Long) = ReferenceRegion("seq", start, end) /* Tests for local coverage calculations */ test("regionToWindows") { val c = new Coverage(100L) val ref = region(10, 50) val w0 = region(0, 100) assert(c.regionToWindows(ref) === Seq((w0, ref))) val w1 = region(100, 200) val r2 = region(50, 150) assert(c.regionToWindows(r2) == Seq((w0, region(50, 100)), (w1, region(100, 150)))) } test("calculate empty coverage") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq()).toList === Seq()) } test("calculate coverage of one region") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(10, 50))).toList === Seq(region(10, 50))) } test("calculate coverage of two regions") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(10, 50), region(20, 60))).toList === Seq(region(10, 60))) } test("calculate coverage of three regions") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(10, 100), region(10, 20), region(50, 80))).toList === Seq(region(10, 100))) } test("calculate coverage of two adjacent regions") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(10, 99), region(99, 200))).toList === Seq(region(10, 200))) } test("calculate coverage of two nearby regions") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(10, 100), region(101, 200))).toList === Seq(region(10, 100), region(101, 200))) } test("calculate coverage of three out-of-order regions") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(100, 200), region(10, 50), region(150, 201))).toList === Seq(region(10, 50), region(100, 201))) } test("calculate coverage of two regions which join at a window boundary") { val c = new Coverage(100L) assert(c.calculateCoverageRegions(Seq(region(0, 100), region(100, 200))).toList === Seq(region(0, 200))) } def rdd[T](values: Seq[T])(implicit sc: SparkContext, kt: ClassTag[T]): RDD[T] = if (values.isEmpty) sc.emptyRDD else sc.parallelize(values) implicit def seqToRDD[T](values: Seq[T])(implicit sc: SparkContext, kt: ClassTag[T]): RDD[T] = rdd(values) /* Tests for coverage calculation inside RDDs */ sparkTest("find empty coverage") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq()).collect() === Array()) } sparkTest("find coverage of one region") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(10, 50))).collect() === Array(region(10, 50))) } sparkTest("find coverage of two regions") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(10, 50), region(20, 60))).collect() === Array(region(10, 60))) } sparkTest("find coverage of three regions") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(10, 100), region(10, 20), region(50, 80))).collect() === Array(region(10, 100))) } sparkTest("find coverage of two adjacent regions") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(10, 99), region(99, 200))).collect() === Array(region(10, 200))) } sparkTest("find coverage of two nearby regions") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(10, 100), region(101, 200))).collect() === Array(region(10, 100), region(101, 200))) } sparkTest("find coverage of three out-of-order regions") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(100, 200), region(10, 50), region(150, 201))).collect() === Array(region(10, 50), region(100, 201))) } sparkTest("find coverage of two regions which join at a window boundary") { implicit val sparkContext = sc val c = new Coverage(100L) assert(c.findCoverageRegions(Seq(region(0, 100), region(100, 200))).collect() === Array(region(0, 200))) } }
allenday/adam
adam-core/src/test/scala/org/bdgenomics/adam/rdd/CoverageSuite.scala
Scala
apache-2.0
5,394
import org.apache.spark.{SparkContext, SparkConf} /** * Created by ma on 15-1-27. */ class QueryT22 extends BaseQuery{ System.setProperty("spark.cores.max",String.valueOf(ParamSet.cores)) val conf = new SparkConf() conf.setAppName("TPCH-Q22") val sc = new SparkContext(conf) val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc) override def execute: Unit ={ // setAppName("TPC-H_Q21") //get the time before the query be executed val t0 = System.nanoTime : Double var t1 = System.nanoTime : Double println("ID: "+ID+"query 22 will be parsed") val choosDdatabase = sqlContext.sql("use "+ParamSet.database) choosDdatabase.count() println("DATABASE: "+ParamSet.database) //the query val res0 = sqlContext.sql("""drop view q22_customer_tmp_cached""") val res1 = sqlContext.sql("""drop view q22_customer_tmp1_cached""") val res2 = sqlContext.sql("""drop view q22_orders_tmp_cached""") val res3 = sqlContext.sql("""create view if not exists q22_customer_tmp_cached as select c_acctbal,c_custkey,substr(c_phone, 1, 2) as cntrycode from customer where substr(c_phone, 1, 2) = '13' or substr(c_phone, 1, 2) = '31' or substr(c_phone, 1, 2) = '23' or substr(c_phone, 1, 2) = '29' or substr(c_phone, 1, 2) = '30' or substr(c_phone, 1, 2) = '18' or substr(c_phone, 1, 2) = '17'""") val res4 = sqlContext.sql("""create view if not exists q22_customer_tmp1_cached as select avg(c_acctbal) as avg_acctbal from q22_customer_tmp_cached where c_acctbal > 0.00""") val res5 = sqlContext.sql("""create view if not exists q22_orders_tmp_cached as select o_custkey from orders group by o_custkey""") val res6 = sqlContext.sql("""select cntrycode,count(1) as numcust,sum(c_acctbal) as totacctbal from (select cntrycode,c_acctbal,avg_acctbal from q22_customer_tmp1_cached ct1 join (select cntrycode,c_acctbal from q22_orders_tmp_cached ot right outer join q22_customer_tmp_cached ct on ct.c_custkey = ot.o_custkey where o_custkey is null) ct2) a where c_acctbal > avg_acctbal group by cntrycode order by cntrycode""") t1 = System.nanoTime : Double println("ID: "+ID+"query 22 parse done, parse time:"+ (t1 - t0) / 1000000000.0 + " secs") if(ParamSet.isExplain){ println(res0.queryExecution.executedPlan) println(res1.queryExecution.executedPlan) println(res2.queryExecution.executedPlan) println(res3.queryExecution.executedPlan) println(res4.queryExecution.executedPlan) println(res5.queryExecution.executedPlan) println(res6.queryExecution.executedPlan) }else{ if (ParamSet.showResult){ res6.collect().foreach(println) }else{ res6.count() } t1 = System.nanoTime : Double println("ID: "+ID+"query 22's execution time : " + (t1 - t0) / 1000000000.0 + " secs") } println("ID: "+ID+"Query 22 completed!") sc.stop() println("ID: "+ID+"Query 22's context successfully stopped") Runtime.getRuntime.exec(ParamSet.execFREE) } }
f7753/spark-SQL-tpch-test-tool
QueryT22.scala
Scala
apache-2.0
3,413
/* * Copyright (C) 2005, The Beangle Software. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.beangle.cdi.spring.config import org.beangle.cdi.bind.BindRegistry import org.beangle.commons.bean.Factory import org.beangle.commons.lang.ClassLoaders import org.beangle.commons.lang.reflect.Reflections.{getGenericParamTypes, newInstance} import org.beangle.commons.logging.Logging import org.springframework.beans.factory.{FactoryBean, HierarchicalBeanFactory} import org.springframework.beans.factory.config.{BeanDefinition, BeanDefinitionHolder, RuntimeBeanReference, SingletonBeanRegistry} import org.springframework.beans.factory.support.{AbstractBeanDefinition, BeanDefinitionRegistry} object SpringBindRegistry { def getBeanClass(registry: BeanDefinitionRegistry, name: String): Class[_] = { val bd = registry.getBeanDefinition(name) var clazz: Class[_] = getBeanClass(bd) if (null == clazz) { var currDef = bd while (null == clazz && null != currDef && null != currDef.getParentName) { val parentDef = registry.getBeanDefinition(bd.getParentName) clazz = getBeanClass(parentDef) currDef = parentDef } } if (null == clazz) { val factoryBeanName = bd.getFactoryBeanName val factoryMethodName = bd.getFactoryMethodName if (null != factoryBeanName && null != factoryMethodName) { var factoryClass = getBeanClass(registry, factoryBeanName) if (classOf[FactoryBean[_]].isAssignableFrom(factoryClass)) { factoryClass = newInstance(factoryClass.asInstanceOf[Class[FactoryBean[_]]]).getObjectType } else if (classOf[Factory[_]].isAssignableFrom(factoryClass)) { factoryClass = getGenericParamTypes(factoryClass, classOf[Factory[_]]).values.head } clazz = factoryClass.getMethod(factoryMethodName).getReturnType } } clazz } def getBeanClass(bd: BeanDefinition): Class[_] = { var clazz: Class[_] = null if (bd.isInstanceOf[AbstractBeanDefinition]) { val abd = bd.asInstanceOf[AbstractBeanDefinition] if (abd.hasBeanClass) clazz = abd.getBeanClass } if (null == clazz) { clazz = if (null != bd.getBeanClassName) ClassLoaders.load(bd.getBeanClassName) else null } clazz } } /** * SpringBindRegistry class. * @author chaostone */ class SpringBindRegistry(val registry: BeanDefinitionRegistry) extends BindRegistry with Logging { private val nameTypes = new collection.mutable.HashMap[String, Class[_]] private val typeNames = new collection.mutable.HashMap[Class[_], List[String]] private val primaries = new collection.mutable.HashSet[String] registerExists() def beanNames: Set[String] = nameTypes.keySet.toSet /** * Register exists spring bean definition in PARENT and current context. */ private def registerExists(): Unit = { registry match { case hfactory: HierarchicalBeanFactory => hfactory.getParentBeanFactory match { case p: BeanDefinitionRegistry => registerDefinitions(p) case _ => } case _ => } registerDefinitions(registry) logger.debug(s"Find ${beanNames.size} beans") } private def registerDefinitions(registry: BeanDefinitionRegistry): Unit = { //register singletons val singletonRegistry = registry.asInstanceOf[SingletonBeanRegistry] singletonRegistry.getSingletonNames foreach { singtonName => nameTypes.put(singtonName, singletonRegistry.getSingleton(singtonName).getClass) } import SpringBindRegistry._ //register definitions for (name <- registry.getBeanDefinitionNames) { val bd = registry.getBeanDefinition(name) if (bd.isPrimary) primaries += name val beanClass = if (bd.isAbstract) null else getBeanClass(registry, name) if (null != beanClass) { try { if (classOf[FactoryBean[_]].isAssignableFrom(beanClass)) { nameTypes.put("&" + name, beanClass) if (bd.isPrimary) primaries += ("&" + name) var objectClass: Class[_] = null val objectTypePV = bd.getPropertyValues.getPropertyValue("objectType") if (null != objectTypePV) { objectClass = objectTypePV.getValue match { case clazz: Class[_] => clazz case className: String => ClassLoaders.load(className) } } else { objectClass = bd.getPropertyValues.getPropertyValue("target") match { case null => try { newInstance(beanClass.asInstanceOf[Class[FactoryBean[_]]]).getObjectType } catch { case e: Throwable => null } case pv => pv.getValue match { case bdh: BeanDefinitionHolder => ClassLoaders.load(bdh.getBeanDefinition.getBeanClassName) case rbr: RuntimeBeanReference => getBeanClass(registry, rbr.getBeanName) case _ => null } } } if (null == objectClass) throw new RuntimeException("Cannot guess object type of " + bd) else nameTypes.put(name, objectClass) } else if (classOf[Factory[_]].isAssignableFrom(beanClass)) { nameTypes.put("&" + name, beanClass) if (bd.isPrimary) primaries += ("&" + name) val objectClass = getGenericParamTypes(beanClass, classOf[Factory[_]]).values.head.asInstanceOf[Class[_]] nameTypes.put(name, objectClass) } else { nameTypes.put(name, beanClass) } } catch { case e: Exception => logger.error("class not found", e) } } } } /** * Get bean name list according given type */ def getBeanNames(clazz: Class[_]): List[String] = { if (typeNames.contains(clazz)) { typeNames(clazz) } else { val names = for ((name, ty) <- nameTypes if clazz.isAssignableFrom(ty) && !name.contains("#")) yield name val rs = names.toList typeNames.put(clazz, rs) rs } } def getBeanType(beanName: String): Class[_] = { nameTypes(beanName) } def contains(beanName: String): Boolean = { nameTypes.contains(beanName) } override def register(name: String, clazz: Class[_]): Unit = { require(null != name, "class'name is null") nameTypes.put(name, clazz) } def register(name: String, obj: AnyRef): Unit = { nameTypes.put(name, obj.getClass) registry.asInstanceOf[SingletonBeanRegistry].registerSingleton(name, obj) } /** * register bean definition */ override def register[T](name: String, clazz: Class[_], definition: T): Unit = { require(null != name, "class'name is null") val bd = definition.asInstanceOf[BeanDefinition] // 注册bean的name和别名 if (registry.containsBeanDefinition(name)) registry.removeBeanDefinition(name) registry.registerBeanDefinition(name, bd) if (null == clazz) { if (!bd.isAbstract) throw new RuntimeException("Concrete bean should has class.") } else { // for list(a.class,b.class) binding usage val alias = clazz.getName if (bd.isSingleton && !name.startsWith("&") && !bd.isAbstract && !name.equals(alias) && !registry.isBeanNameInUse(alias)) { registry.registerAlias(name, alias) } if (bd.isPrimary) primaries += name nameTypes.put(name, clazz) } } def setPrimary[T](beanName: String, isPrimary: Boolean, definition: T): Unit = { definition.asInstanceOf[BeanDefinition].setPrimary(isPrimary) if (isPrimary) primaries.add(beanName) else primaries.remove(beanName) } def isPrimary(name: String): Boolean = { primaries.contains(name) } }
beangle/cdi
spring/src/main/scala/org/beangle/cdi/spring/config/SpringBindRegistry.scala
Scala
lgpl-3.0
8,440
//伴生对象 class Fraction(n: Int, d: Int){ val m = n + d } object Fraction{ //伴生对象的apply方法 def apply(n: Int, d: Int) = new Fraction(n ,d) } object ApplyTest1{ def main(args: Array[String]){ val result1 = Fraction(2,4) val result2 = Fraction(3,5) println(result1.m * result2.m) } }
PengLiangWang/Scala
Apply/ApplyTest1.scala
Scala
gpl-3.0
348
/* * Copyright 2015 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.play.http.ws import uk.gov.hmrc.play.audit.http.HeaderCarrier import uk.gov.hmrc.play.http.logging.MdcLoggingExecutionContext import uk.gov.hmrc.play.http.{HttpGet, HttpResponse} import MdcLoggingExecutionContext._ import scala.concurrent.Future trait WSGet extends HttpGet with WSRequest { def doGet(url: String)(implicit hc: HeaderCarrier): Future[HttpResponse] = { buildRequest(url).get().map(new WSHttpResponse(_)) } }
liquidarmour/http-verbs
src/main/scala/uk/gov/hmrc/play/http/ws/WSGet.scala
Scala
apache-2.0
1,061
object Dependencies { val akkaV = "2.4.2" val akkaStreamV = "2.4.2" val scalatestV = "2.2.4" }
fehmicansaglam/tepkin
project/Dependencies.scala
Scala
apache-2.0
101
package de.fosd.typechef.typesystem import de.fosd.typechef.featureexpr.FeatureExprFactory import de.fosd.typechef.parser.c._ import org.junit.runner.RunWith import org.scalatest.{Matchers, FunSuite} import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class IntegerSecurityTest extends FunSuite with Matchers with TestHelperTSConditional { override protected def check(ast: TranslationUnit, enableAnalysis: Boolean): Boolean = { assert(ast != null, "void ast"); val frontend = new CTypeSystemFrontend(ast, FeatureExprFactory.default.featureModelFactory.empty, if (enableAnalysis) new LinuxDefaultOptions { override def warning_potential_integer_overflow = true override def warning_implicit_coercion = true } else new LinuxDefaultOptions {}) frontend.makeSilent().checkAST(false).isEmpty } /** * very simply approximation, relying on structural nesting * * data-flow dependent analysis required for more precision, see ignored case below */ test("check pointer arithmetic -- structural nesting") { correctExpr("int a; a++;") correctExpr("int* a; a+1;") correctExpr("int* a; int x; a+(x=4);") //not overflowing correctExpr("int *a,*b; b=a+2;") //not overflowing correctExpr("int* a; int x; a+x;") //not overflowing correctExpr("int* a; int x; a[x];") //not overflowing correctExpr("int* a; int x; a+(x&4);") //not overflowing correctExpr("int* a; int x,y; a+(x&&y);") //not overflowing errorExpr("int* a; int x; a+(x-4);") //potentially overflowing errorExpr("int* a; int x; a+(-x);") //potentially overflowing errorExpr("int* a; int x; a+(++x);") //potentially overflowing errorExpr("int* a; int x; a+(x++);") //potentially overflowing errorExpr("int* a; int x; a+(x<<5);") //potentially overflowing errorExpr("int* a; int x; a+=(x-5);") //potentially overflowing errorExpr("int a[]; int x; a[x-5];") //potentially overflowing } ignore("check pointer arithmetic -- data flow") { errorExpr("int* a; int x; int y = x -4; a+y;") //potentially overflowing errorExpr("int a[]; int x;int y = x %4; a[y];") //potentially overflowing } test("check memory allocation -- structural nesting") { correct("int printf(const char * restrict format, ...);void main(){int x; printf(\\"%d\\",x+1);}") error("void *malloc(int size); void main(){int x; malloc(x+1);}") } test("integer conversions") { correctExpr("int a; int b; b=a;") correctExpr("int a; long b; b=a;") //widening is okay correctExpr("int a; long b; a=(int)b;") //narrowing is okay when explicit with a cast errorExpr("int a; long b; a=b;") errorExpr("unsigned int a; signed b; b=a;") } }
mbeddr/TypeChef
CTypeChecker/src/test/scala/de/fosd/typechef/typesystem/IntegerSecurityTest.scala
Scala
lgpl-3.0
2,901
package org.tinydvr.db import org.squeryl.KeyedEntity import org.squeryl.annotations._ class Configuration extends KeyedEntity[String] { def id = key @Column(name = "key") // the configuration key var key: String = _ @Column(name = "value") // the configuration value var value: String = _ }
lou-k/tinydvr
src/main/scala/org/tinydvr/db/Configuration.scala
Scala
gpl-3.0
307
package com.softwaremill.bootzooka.email.application import org.scalatest.{FlatSpec, Matchers} class EmailTemplatingEngineSpec extends FlatSpec with Matchers { behavior of "splitToContentAndSubject" val engine = new EmailTemplatingEngine it should "throw exception on invalid template" in { intercept[Exception] { engine.splitToContentAndSubject("invalid template") } } it should "not throw exception on correct template" in { engine.splitToContentAndSubject("subect\\nContent") } it should "split template into subject and content" in { // When val email = engine.splitToContentAndSubject("subject\\nContent\\nsecond line") // Then email.subject should be("subject") email.content should be("Content\\nsecond line") } it should "generate the registration confirmation email" in { // when val email = engine.registrationConfirmation("adamw") // then email.subject should be("SoftwareMill Bootzooka - registration confirmation for user adamw") email.content should include("Dear adamw,") email.content should include("Regards,") } }
aywengo/bootzooka
backend/src/test/scala/com/softwaremill/bootzooka/email/application/EmailTemplatingEngineSpec.scala
Scala
apache-2.0
1,119
package de.htwg.zeta.server.actor import java.util.UUID import java.util.concurrent.TimeUnit import javax.inject.Singleton import scala.collection.concurrent.TrieMap import scala.concurrent.Future import scala.concurrent.duration.Duration import scala.concurrent.ExecutionContext.Implicits.global import scala.util.Failure import scala.util.Success import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Cancellable import akka.actor.Props import de.htwg.zeta.server.actor.TransientTokenCacheActor.CleanUp import de.htwg.zeta.server.actor.TransientTokenCacheActor.Create import de.htwg.zeta.server.actor.TransientTokenCacheActor.Delete import de.htwg.zeta.server.actor.TransientTokenCacheActor.Read import grizzled.slf4j.Logging /** * Transient Implementation of TokenCache. */ @Singleton object TransientTokenCacheActor { case class Create(id: UUID) case class Read(userId: UUID) case class Delete(iuserId: UUID) case object CleanUp def props(): Props = Props(new TransientTokenCache) } class TransientTokenCache extends Actor with Logging { private case class Token(userId: UUID, lastUse: Long) private val tokens: TrieMap[UUID, Token] = TrieMap.empty // scalastyle:ignore magic.number private val cleaningInterval = Duration(10, TimeUnit.MINUTES) private val lifeTime: Long = Duration(1, TimeUnit.HOURS).toMillis private val cleanUpJob: Cancellable = context.system.scheduler.scheduleAtFixedRate(cleaningInterval, cleaningInterval,self,CleanUp) private def cleanUp() = { info("Cleaning expired tokens") val expired = System.currentTimeMillis - lifeTime tokens --= tokens.filter(n => n._2.lastUse > expired).keys } /** * Finds a token by its ID. * * @param id The unique token ID. * @return The found token or None if no token for the given ID could be found. */ def read(id: UUID): Unit = { replyToSender(tokens.get(id) match { case Some(v) => Future.successful(v.userId) case None => Future.failed(new IllegalStateException) },sender) } /** * Saves a token. * * @param userId The userId the token is created for. * @return The saved token. */ def create(userId: UUID): Unit = { val token = Token(userId, System.currentTimeMillis) val id = UUID.randomUUID tokens += (id -> token) replyToSender(Future(id),sender) } /** * Removes the token for the given ID. * * @param id The ID for which the token should be removed. * @return A future to wait for the process to be completed. */ def delete(id: UUID): Unit = { tokens -= id replyToSender(Future(()),sender) } override def receive: Receive = { case Create(userId: UUID) => create(userId) case Read(id) => read(id) case Delete(id) => delete(id) case CleanUp => cleanUp() } private def replyToSender[T](f: Future[T], target: ActorRef): Unit = { f.onComplete { case Success(s) => target ! Success(s) case Failure(e) => target ! Failure(e) } } override def postStop(): Unit = { cleanUpJob.cancel() } }
Zeta-Project/zeta
api/server/app/de/htwg/zeta/server/actor/TransientTokenCache.scala
Scala
bsd-2-clause
3,105
package reflectdoc.tools.nsc package doc package html package page package diagram import java.io.InputStream import java.io.OutputStream import java.io.InputStreamReader import java.io.OutputStreamWriter import java.io.BufferedWriter import java.io.BufferedReader import scala.sys.process._ import scala.concurrent.SyncVar import model._ /** This class takes care of running the graphviz dot utility */ class DotRunner(settings: doc.Settings) { private[this] var dotRestarts = 0 private[this] var dotProcess: DotProcess = null def feedToDot(dotInput: String, template: DocTemplateEntity): String = { if (dotProcess == null) { if (dotRestarts < settings.docDiagramsDotRestart.value) { if (dotRestarts != 0) settings.printMsg("Graphviz will be restarted...\\n") dotRestarts += 1 dotProcess = new DotProcess(settings) } else return null } val tStart = System.currentTimeMillis val result = dotProcess.feedToDot(dotInput, template.qualifiedName) val tFinish = System.currentTimeMillis DiagramStats.addDotRunningTime(tFinish - tStart) if (result == null) { dotProcess.cleanup() dotProcess = null if (dotRestarts == settings.docDiagramsDotRestart.value) { settings.printMsg("\\n") settings.printMsg("**********************************************************************") settings.printMsg("Diagrams will be disabled for this run because the graphviz dot tool") settings.printMsg("has malfunctioned too many times. These scaladoc flags may help:") settings.printMsg("") val baseList = List(settings.docDiagramsDebug, settings.docDiagramsDotPath, settings.docDiagramsDotRestart, settings.docDiagramsDotTimeout) val width = (baseList map (_.helpSyntax.length)).max def helpStr(s: doc.Settings#Setting) = ("%-" + width + "s") format (s.helpSyntax) + " " + s.helpDescription baseList.foreach((sett: doc.Settings#Setting) => settings.printMsg(helpStr(sett))) settings.printMsg("\\nPlease note that graphviz package version 2.26 or above is required.") settings.printMsg("**********************************************************************\\n\\n") } } result } def cleanup() = if (dotProcess != null) dotProcess.cleanup() } class DotProcess(settings: doc.Settings) { @volatile var error: Boolean = false // signal an error val inputString = new SyncVar[String] // used for the dot process input val outputString = new SyncVar[String] // used for the dot process output val errorBuffer: StringBuffer = new StringBuffer() // buffer used for both dot process error console AND logging // set in only one place, in the main thread var process: Process = null var templateName: String = "" var templateInput: String = "" def feedToDot(input: String, template: String): String = { templateName = template templateInput = input try { // process creation if (process == null) { val procIO = new ProcessIO(inputFn(_), outputFn(_), errorFn(_)) val processBuilder: ProcessBuilder = Seq(settings.docDiagramsDotPath.value, "-Tsvg") process = processBuilder.run(procIO) } // pass the input and wait for the output assert(!inputString.isSet) assert(!outputString.isSet) inputString.put(input) var result = outputString.take(settings.docDiagramsDotTimeout.value * 1000L) if (error) result = null result } catch { case exc: Throwable => errorBuffer.append(" Main thread in " + templateName + ": " + (if (exc.isInstanceOf[NoSuchElementException]) "Timeout" else "Exception: " + exc)) error = true return null } } def cleanup(): Unit = { // we'll need to know if there was any error for reporting val _error = error if (process != null) { // if there's no error, this should exit cleanly if (!error) feedToDot("<finish>", "<finishing>") // just in case there's any thread hanging, this will take it out of the loop error = true process.destroy() // we'll need to unblock the input again if (!inputString.isSet) inputString.put("") if (outputString.isSet) outputString.take() } if (_error) { if (settings.docDiagramsDebug.value) { settings.printMsg("\\n**********************************************************************") settings.printMsg("The graphviz dot diagram tool has malfunctioned and will be restarted.") settings.printMsg("\\nThe following is the log of the failure:") settings.printMsg(errorBuffer.toString) settings.printMsg(" Cleanup: Last template: " + templateName) settings.printMsg(" Cleanup: Last dot input: \\n " + templateInput.replaceAll("\\n","\\n ") + "\\n") settings.printMsg(" Cleanup: Dot path: " + settings.docDiagramsDotPath.value) if (process != null) settings.printMsg(" Cleanup: Dot exit code: " + process.exitValue) settings.printMsg("**********************************************************************") } else { // we shouldn't just sit there for 50s not reporting anything, no? settings.printMsg("Graphviz dot encountered an error when generating the diagram for:") settings.printMsg(templateName) settings.printMsg("These are usually spurious errors, but if you notice a persistant error on") settings.printMsg("a diagram, please use the " + settings.docDiagramsDebug.name + " flag and report a bug with the output.") } } } /* The standard input passing function */ private[this] def inputFn(stdin: OutputStream): Unit = { val writer = new BufferedWriter(new OutputStreamWriter(stdin)) try { var input = inputString.take() while (!error) { if (input == "<finish>") { // empty => signal to finish stdin.close() return } else { // send output to dot writer.write(input + "\\n\\n") writer.flush() } if (!error) input = inputString.take() } stdin.close() } catch { case exc: Throwable => error = true stdin.close() errorBuffer.append(" Input thread in " + templateName + ": Exception: " + exc + "\\n") } } private[this] def outputFn(stdOut: InputStream): Unit = { val reader = new BufferedReader(new InputStreamReader(stdOut)) val buffer: StringBuilder = new StringBuilder() try { var line = reader.readLine while (!error && line != null) { buffer.append(line + "\\n") // signal the last element in the svg (only for output) if (line == "</svg>") { outputString.put(buffer.toString) buffer.setLength(0) } if (error) { stdOut.close(); return } line = reader.readLine } assert(!outputString.isSet) outputString.put(buffer.toString) stdOut.close() } catch { case exc: Throwable => error = true stdOut.close() errorBuffer.append(" Output thread in " + templateName + ": Exception: " + exc + "\\n") } } private[this] def errorFn(stdErr: InputStream): Unit = { val reader = new BufferedReader(new InputStreamReader(stdErr)) try { var line = reader.readLine while (line != null) { errorBuffer.append(" DOT <error console>: " + line + "\\n") error = true line = reader.readLine } stdErr.close() } catch { case exc: Throwable => error = true stdErr.close() errorBuffer.append(" Error thread in " + templateName + ": Exception: " + exc + "\\n") } } }
VladUreche/reflectdoc
components/core/src/reflectdoc/tools/nsc/doc/html/page/diagram/DotRunner.scala
Scala
bsd-3-clause
7,937
//====================================================================================================================== // Facsimile: A Discrete-Event Simulation Library // Copyright © 2004-2020, Michael J Allen. // // This file is part of Facsimile. // // Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later // version. // // Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more // details. // // You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see: // // http://www.gnu.org/licenses/lgpl. // // The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the // project home page at: // // http://facsim.org/ // // Thank you for your interest in the Facsimile project! // // IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for // inclusion as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If // your code fails to comply with the standard, then your patches will be rejected. For further information, please // visit the coding standards at: // // http://facsim.org/Documentation/CodingStandards/ //====================================================================================================================== //====================================================================================================================== // Scala source file belonging to the org.facsim.util.test package. //====================================================================================================================== package org.facsim.util.test import org.facsim.util.types.UniChar import org.scalacheck.{Gen, Shrink} /** Custom ''ScalaCheck'' generators. * * @since 0.0 */ object Generator { /** Negative integers. * * Produce integers across the entire supported range of negative integer values. * * The `suchThat` condition is required because the generator can take values from outside the specified range, * which then causes test failures. Refer to [[https://github.com/rickynils/scalacheck/issues/189 ScalaCheck issue * #189]] for further information. */ val negInt = Gen.choose(Integer.MIN_VALUE, -1).suchThat(_ < 0) //scalastyle:ignore magic.number /** Non-negative integers. * * Produce integers across the entire supported range of non-negative integer values, including zero. */ val nonNegInt = Gen.choose(0, Integer.MAX_VALUE) /** Non-positive integers. * * Produce integers across the entire supported range of non-positive integer values, including zero. */ val nonPosInt = Gen.choose(Integer.MIN_VALUE, 0) /** Generate any integer value. */ // A range of [Integer.MinValue, Integer.MaxValue] exceeds the capacity of a signed 32-bit integer value (31 bits for // the value, 1 for the sign), so we get around this by having two generators and selecting one or the other at // random). val int = Gen.oneOf(negInt, nonNegInt) /** ISO-8859-1 characters. */ private val iso8859_1Char = Gen.choose(0.toChar, 255.toChar) //scalastyle:ignore field.name /** Strings that can be encoded using ISO-8859-1. */ val iso8859_1String = Gen.listOf(iso8859_1Char).map(_.mkString) //scalastyle:ignore field.name /** Set of valid ''[[http://unicode.org/ Unicode]]'' characters. * * Java will throw exceptions if attempts are map to create characters from invalid Unicode codepoints. */ private lazy val unicodeVector = (Character.MIN_CODE_POINT to Character.MAX_CODE_POINT).filter(UniChar.isValid) /** ''[[http://unicode.org/ Unicode]]'' characters. */ private lazy val unicodeChar = Gen.oneOf(unicodeVector) /** Strings that can be encoded in ''[[http://unicode.org/ Unicode]]''. */ lazy val unicodeString = Gen.listOf(unicodeChar).map(_.flatMap(i => Character.toChars(i)).mkString) /** List of ''[[http://unicode.org/ Unicode]]'' strings, which may be empty. */ lazy val unicodeStringList = Gen.listOf(unicodeString) /** List of ''[[http://unicode.org/ Unicode]]'' strings, non-empty.*/ lazy val unicodeStringListNonEmpty = Gen.nonEmptyListOf(unicodeString) /** Utility to prevent _ScalaCheck_ property shrinkage. * * @note Shrinkage, whereby _ScalaCheck_ attempts to find the simplest case of value that causes a problem, is a * brilliant concept. Unfortunately, there's a [[https://github.com/rickynils/scalacheck/issues/129 bug]] in which * shrinked values violate defined constraints, so that the shrinked value fails for a completely different reason to * the original failing value. To work around this, we use this function that effectively disable shrinking of * failed values; the idea for this can be found [[https://github.com/scalatest/scalatest/issues/584 here]]. * * @tparam A Type of value for which shrinking is being disabled. * * @return No shrink value, which should typically be an implicit value, for the specified type, `A`. */ def noShrink[A]: Shrink[A] = Shrink.shrinkAny[A] }
MichaelJAllen/facsimile
facsimile-util/src/test/scala/org/facsim/util/test/Generator.scala
Scala
lgpl-3.0
5,472
package net.chwthewke.scala.protobuf.plugin.templates import net.chwthewke.scala.protobuf.plugin._ import net.chwthewke.scala.protobuf.plugin.interface._ import net.chwthewke.scala.protobuf.plugin.interface.field._ import net.chwthewke.scala.protobuf.plugin.symbols._ import net.chwthewke.scala.protobuf.plugin.syntax._ import scalaz.std.vector._ import scalaz.syntax.traverse._ trait TemplatesProcess { def symbolTable: ProtoSymbolTable def filesToGenerate: Vector[FileDescriptor] def apply: Process[Vector[ProtoDef]] = (filesToGenerate map (protoDef)).sequence def protoDef(fileDescriptor: FileDescriptor): Process[ProtoDef] = for { enums <- (fileDescriptor.enumTypes map enumDef).sequence messages <- (fileDescriptor.messageTypes map messageDef).sequence file: FileSymbol = symbolTable.file(fileDescriptor).get } yield ProtoDef(file.pkg.toString, file.obj.toString, enums ++ messages) def enumDef(desc: EnumDescriptor): Process[EnumDef] = process { val enum: EnumSymbol = symbolTable.enum(desc).get EnumDef(enum.cls, (enum.values map (enumValueDef _).tupled).toSeq) } def enumValueDef(desc: EnumValueDescriptor, name: String): EnumValueDef = EnumValueDef(name, desc.number) def messageDef(desc: Descriptor): Process[MessageDef] = for { enums <- (desc.enumTypes map enumDef).sequence nested <- (desc.nestedTypes map messageDef).sequence fields <- fieldDefs(desc) message: MessageSymbol = symbolTable.message(desc).get } yield MessageDef(message.cls, fields, enums ++ nested) def fieldDefs(desc: Descriptor): Process[FieldDefs] = for { fields <- (desc.fields map fieldDef).sequence } yield FieldDefs(fields) def fieldDef(desc: FieldDescriptor): Process[FieldDef] = process { val field: FieldSymbol = symbolTable.field(desc).get val (ctor, compMult) = desc.label match { case LABEL_OPTIONAL => ("Optional", Some("Option")) case LABEL_REQUIRED => ("Required", None) case LABEL_REPEATED if desc.packed => ("Packed", Some("Vector")) case _ => ("Repeated", Some("Vector")) } val typeRef: ProtoRef = symbolTable.typeRef(field) val (compType, fieldType) = typeRef match { case PrimitiveRef(p) => (p, primFieldType(desc.typ)) case EnumRef(e) => (e, s"net.chwthewke.scala.protobuf.FieldType.Enum($e)") case MessageRef(m) => (m, s"net.chwthewke.scala.protobuf.FieldType.MessageField($m)") } def literalDefault: Option[String] = desc.defaultValue match { case None => None case Some(d) => typeRef match { case BoolRef => Some(d) case EnumRef(e) => Some(s"$e.$d") case _ => None } } val fieldDefault: FieldDefaultDef = desc.label match { case LABEL_REPEATED => EmptyVector case LABEL_OPTIONAL => literalDefault map SomeLiteral getOrElse NoneOption case LABEL_REQUIRED => literalDefault map Literal getOrElse NoDefault } FieldDef(field.defn, desc.number, ctor, compType, compMult, fieldDefault, fieldType) } def primFieldType(typ: Type): String = "net.chwthewke.scala.protobuf.FieldType." + (typ match { case TYPE_BOOL => "Bool" case TYPE_BYTES => "Bytes" case TYPE_DOUBLE => "Double" case TYPE_FIXED32 => "Fixed32" case TYPE_FIXED64 => "Fixed64" case TYPE_FLOAT => "Float" case TYPE_INT32 => "Int32" case TYPE_INT64 => "Int64" case TYPE_SFIXED32 => "SFixed32" case TYPE_SFIXED64 => "SFixed64" case TYPE_SINT32 => "SInt32" case TYPE_SINT64 => "SInt64" case TYPE_STRING => "String" case TYPE_UINT32 => "UInt32" case TYPE_UINT64 => "UInt64" case _ => throw new IllegalArgumentException }) } object TemplatesProcess { def apply(symTable: ProtoSymbolTable, files: Vector[FileDescriptor]): TemplatesProcess = new TemplatesProcess { override def symbolTable = symTable override def filesToGenerate = files } }
chwthewke/scala-protobuf
scala-protobuf-plugin-core/src/main/scala/net/chwthewke/scala/protobuf/plugin/templates/TemplatesProcess.scala
Scala
apache-2.0
4,049
package buck import scala.reflect.macros.blackbox import scala.language.experimental.macros import scala.annotation.StaticAnnotation object helloMacro { def impl(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { import c.universe._ import Flag._ val result = { annottees.map(_.tree).toList match { case q"object $name extends ..$parents { ..$body }" :: Nil => q""" object $name extends ..$parents { def hello: ${typeOf[String]} = "Hello World" ..$body } """ } } c.Expr[Any](result) } } class hello extends StaticAnnotation { def macroTransform(annottees: Any*) = macro helloMacro.impl }
raviagarwal7/buck
test/com/facebook/buck/jvm/scala/testdata/scala_macros/Macros.scala
Scala
apache-2.0
721
package at.logic.gapt.prooftool import scala.swing._ import at.logic.gapt.proofs.lk.base._ import java.awt.Color import at.logic.gapt.proofs.lk.{ ExistsRightRule, ForallLeftRule } /** * * Created by marty on 3/26/14. */ class DrawSingleSequentInference( var orientation: Orientation.Value ) extends ScrollPane { private var _p: Option[LKProof] = None def p(): Option[LKProof] = _p def p_=( np: Option[LKProof] ) = { this._p = np init() revalidate() repaint() } val auxiliaries = new BoxPanel( Orientation.Vertical ) { border = Swing.TitledBorder( Swing.LineBorder( new Color( 0, 0, 0 ), 1 ), " Auxiliary: " ) background = new Color( 255, 255, 255 ) minimumSize = new Dimension( 50, 20 ) xLayoutAlignment = 0 } val primaries = new BoxPanel( Orientation.Vertical ) { border = Swing.TitledBorder( Swing.LineBorder( new Color( 0, 0, 0 ), 1 ), " Primary: " ) background = new Color( 255, 255, 255 ) minimumSize = new Dimension( 50, 20 ) xLayoutAlignment = 0 } val rule = new BoxPanel( Orientation.Vertical ) { border = Swing.TitledBorder( Swing.LineBorder( new Color( 0, 0, 0 ), 1 ), " Inference: " ) background = new Color( 255, 255, 255 ) minimumSize = new Dimension( 50, 20 ) xLayoutAlignment = 0 } val substitution = new BoxPanel( Orientation.Vertical ) { border = Swing.TitledBorder( Swing.LineBorder( new Color( 0, 0, 0 ), 1 ), " Substitution: " ) background = new Color( 255, 255, 255 ) minimumSize = new Dimension( 50, 20 ) xLayoutAlignment = 0 } def setContents() { contents = new BoxPanel( orientation ) { contents += rule contents += auxiliaries contents += primaries contents += substitution } } setContents() def init() { rule.contents.clear() if ( p() != None ) rule.contents += LatexLabel( font, p().get.name ) rule.contents += Swing.Glue auxiliaries.contents.clear() val aux = p() match { case Some( a: UnaryLKProof with AuxiliaryFormulas ) => val r = a.uProof.root List( Sequent( r.antecedent.filter( a.aux( 0 ).contains ), r.succedent.filter( a.aux( 0 ).contains ) ) ) case Some( a: BinaryLKProof with AuxiliaryFormulas ) => val r1 = a.uProof1.root val r2 = a.uProof2.root List( Sequent( r1.antecedent.filter( a.aux( 0 ).contains ), r1.succedent.filter( a.aux( 0 ).contains ) ), Sequent( r2.antecedent.filter( a.aux( 1 ).contains ), r2.succedent.filter( a.aux( 1 ).contains ) ) ) case _ => List() } aux.foreach( x => { auxiliaries.contents += DrawSequent( x, font, "" ) } ) auxiliaries.contents += Swing.Glue primaries.contents.clear() val primary = p() match { case Some( pf: PrincipalFormulas ) => val r = p().get.root Some( Sequent( r.antecedent.filter( pf.prin.contains ), r.succedent.filter( pf.prin.contains ) ) ) case Some( p: NullaryLKProof ) => Some( p.root ) case _ => None } if ( primary != None ) primaries.contents += DrawSequent( primary.get, font, "" ) primaries.contents += Swing.Glue substitution.contents.clear() p() match { case Some( ForallLeftRule( _, _, _, _, term ) ) => substitution.contents += LatexLabel( font, DrawSequent.formulaToLatexString( term ) ) case Some( ExistsRightRule( _, _, _, _, term ) ) => substitution.contents += LatexLabel( font, DrawSequent.formulaToLatexString( term ) ) case _ => } substitution.contents += Swing.Glue } def adjustOrientation( o: Orientation.Value ) { val new_orientation = if ( o == Orientation.Vertical || auxiliaries.size.width > bounds.width ) Orientation.Vertical else Orientation.Horizontal if ( orientation != new_orientation ) { orientation = new_orientation setContents() revalidate() repaint() } } }
gisellemnr/gapt
src/main/scala/at/logic/gapt/prooftool/DrawSingleSequentInference.scala
Scala
gpl-3.0
3,923
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Changes for SnappyData data platform. * * Portions Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package org.apache.spark import java.io._ import java.lang.reflect.Constructor import java.net.{URI} import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID} import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} import scala.collection.JavaConverters._ import scala.collection.Map import scala.collection.generic.Growable import scala.collection.mutable.HashMap import scala.language.implicitConversions import scala.reflect.{classTag, ClassTag} import scala.util.control.NonFatal import com.google.common.collect.MapMaker import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable} import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat} import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob} import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat} import org.apache.spark.annotation.DeveloperApi import org.apache.spark.broadcast.Broadcast import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil} import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.io.CompressionCodec import org.apache.spark.partial.{ApproximateEvaluator, PartialResult} import org.apache.spark.rdd._ import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, StandaloneSchedulerBackend} import org.apache.spark.scheduler.local.LocalSchedulerBackend import org.apache.spark.serializer.JavaSerializer import org.apache.spark.storage._ import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump import org.apache.spark.ui.{ConsoleProgressBar, SparkUI} import org.apache.spark.ui.jobs.JobProgressListener import org.apache.spark.util._ /** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * Only one SparkContext may be active per JVM. You must `stop()` the active SparkContext before * creating a new one. This limitation may eventually be removed; see SPARK-2243 for more details. * * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */ class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() // If true, log warnings instead of throwing exceptions when multiple SparkContexts are active private val allowMultipleContexts: Boolean = config.getBoolean("spark.driver.allowMultipleContexts", false) // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this, allowMultipleContexts) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // NOTE: The below constructors could be consolidated using default arguments. Due to // Scala bug SI-8479, however, this causes the compile step to fail when generating docs. // Until we have a good workaround for that bug the constructors remain broken out. /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") warnDeprecatedVersions() /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _env: SparkEnv = _ private var _jobProgressListener: JobProgressListener = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _isDefaultClosureSerializer: Boolean = true /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def jars: Seq[String] = _jars def files: Seq[String] = _files def master: String = _conf.get("spark.master") def deployMode: String = _conf.getOption("spark.submit.deployMode").getOrElse("client") def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.getBoolean("spark.eventLog.enabled", false) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() // An asynchronous listener bus for Spark events private[spark] val listenerBus = new LiveListenerBus(this) // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master)) } private[spark] def env: SparkEnv = _env // Used to store a URL for each static file/jar together with the file's local timestamp private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala def removeAddedJar(name : String) { logInfo(s"Removing jar $name from SparkContext list") addedJars.remove(name) } // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } private[spark] def jobProgressListener: JobProgressListener = _jobProgressListener def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override protected def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } private def warnDeprecatedVersions(): Unit = { val javaVersion = System.getProperty("java.version").split("[+.\\\\-]+", 3) if (javaVersion.length >= 2 && javaVersion(1).toInt == 7) { logWarning("Support for Java 7 is deprecated as of Spark 2.0.0") } if (scala.util.Properties.releaseVersion.exists(_.startsWith("2.10"))) { logWarning("Support for Scala 2.10 is deprecated as of Spark 2.1.0") } } /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String) { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ENGLISH) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevel(org.apache.log4j.Level.toLevel(upperCased)) } try { _conf = config.clone() _conf.validateSettings() if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing("spark.driver.port", "0") _conf.set("spark.executor.id", SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption("spark.files").map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get("spark.eventLog.dir", EventLoggingListener.DEFAULT_LOG_DIR) .stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.getBoolean("spark.eventLog.compress", false) if (compress && isEventLogEnabled) { Some(CompressionCodec.getCodecName(_conf)).map(CompressionCodec.getShortName) } else { None } } if (master == "yarn" && deployMode == "client") System.setProperty("SPARK_YARN_MODE", "true") // "_jobProgressListener" should be set up before creating SparkEnv because when creating // "SparkEnv", some messages will be posted to "listenerBus" and we should not miss them. _jobProgressListener = new JobProgressListener(_conf) listenerBus.addListener(jobProgressListener) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) _isDefaultClosureSerializer = _env.closureSerializer.isInstanceOf[JavaSerializer] // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this) _progressBar = if (_conf.getBoolean("spark.ui.showConsoleProgress", true) && !log.isInfoEnabled) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.getBoolean("spark.ui.enabled", true)) { Some(SparkUI.createLiveUI(this, _conf, listenerBus, _jobProgressListener, _env.securityManager, appName, startTime = startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Add each JAR given through the constructor if (jars != null) { jars.foreach(addJar) } if (files != null) { files.foreach(addFile) } _executorMemory = _conf.getOption("spark.executor.memory") .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", "spark.testing")) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) if (_conf.getBoolean("spark.ui.reverseProxy", false)) { System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start() // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addListener(logger) Some(logger) } else { None } // Optionally scale number of executors dynamically based on workload. Exposed for testing. val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf)) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) _cleaner = if (_conf.getBoolean("spark.cleaner.referenceTracking", true)) { Some(new ContextCleaner(this)) } else { None } _cleaner.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // Post init _taskScheduler.postStartHook() _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") stop() } } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get Some(endpointRef.askWithRetry[Array[ThreadStackTrace]](TriggerThreadDump)) } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties) { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String) { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String) { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * If interruptOnCancel is set to true for the job group, then job cancellation will result * in Thread.interrupt() being called on the job's executor threads. This is useful to help ensure * that the tasks are actually stopped in a timely manner, but is off by default due to HDFS-1208, * where HDFS may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false) { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup() { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices the partition number of the new RDD. * @return */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass Class of the InputFormat * @param keyClass Class of the keys * @param valueClass Class of the values * @param minPartitions Minimum number of Hadoop Splits to generate. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** Get an RDD for a Hadoop file with an arbitrary new API InputFormat. */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass Class of the InputFormat * @param kClass Class of the keys * @param vClass Class of the values * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val partitioners = rdds.flatMap(_.partitioner).toSet if (rdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, rdds) } else { new UnionRDD(this, rdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Create an [[org.apache.spark.Accumulator]] variable of a given type, which tasks can "add" * values to using the `+=` method. Only the driver can access the accumulator's `value`. */ @deprecated("use AccumulatorV2", "2.0.0") def accumulator[T](initialValue: T)(implicit param: AccumulatorParam[T]): Accumulator[T] = { val acc = new Accumulator(initialValue, param) cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc)) acc } /** * Create an [[org.apache.spark.Accumulator]] variable of a given type, with a name for display * in the Spark UI. Tasks can "add" values to the accumulator using the `+=` method. Only the * driver can access the accumulator's `value`. */ @deprecated("use AccumulatorV2", "2.0.0") def accumulator[T](initialValue: T, name: String)(implicit param: AccumulatorParam[T]) : Accumulator[T] = { val acc = new Accumulator(initialValue, param, Option(name)) cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc)) acc } /** * Create an [[org.apache.spark.Accumulable]] shared variable, to which tasks can add values * with `+=`. Only the driver can access the accumulable's `value`. * @tparam R accumulator result type * @tparam T type that can be added to the accumulator */ @deprecated("use AccumulatorV2", "2.0.0") def accumulable[R, T](initialValue: R)(implicit param: AccumulableParam[R, T]) : Accumulable[R, T] = { val acc = new Accumulable(initialValue, param) cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc)) acc } /** * Create an [[org.apache.spark.Accumulable]] shared variable, with a name for display in the * Spark UI. Tasks can add values to the accumulable using the `+=` operator. Only the driver can * access the accumulable's `value`. * @tparam R accumulator result type * @tparam T type that can be added to the accumulator */ @deprecated("use AccumulatorV2", "2.0.0") def accumulable[R, T](initialValue: R, name: String)(implicit param: AccumulableParam[R, T]) : Accumulable[R, T] = { val acc = new Accumulable(initialValue, param, Option(name)) cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc)) acc } /** * Create an accumulator from a "mutable collection" type. * * Growable and TraversableOnce are the standard APIs that guarantee += and ++=, implemented by * standard mutable collections. So you can use this with mutable Map, Set, etc. */ @deprecated("use AccumulatorV2", "2.0.0") def accumulableCollection[R <% Growable[T] with TraversableOnce[T] with Serializable: ClassTag, T] (initialValue: R): Accumulable[R, T] = { val param = new GrowableAccumulableParam[R, T] val acc = new Accumulable(initialValue, param) cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc)) acc } /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each cluster only once. */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal) val callSite = getCallSite env.taskLogger.logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. */ def addFile(path: String): Unit = { addFile(path, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = addedFiles.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * A directory can be given if the recursive option is set to true. Currently directories are only * supported for Hadoop-supported filesystems. */ def addFile(path: String, recursive: Boolean): Unit = { val uri = new Path(path).toUri val schemeCorrectedPath = uri.getScheme match { case null | "local" => new File(path).getCanonicalFile.toURI.toString case _ => path } val hadoopPath = new Path(schemeCorrectedPath) val scheme = new URI(schemeCorrectedPath).getScheme if (!Array("http", "https", "ftp").contains(scheme)) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw new SparkException(s"addFile does not support local directories when not running " + "local mode.") } if (!recursive && isDir) { throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " + "turned on.") } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else { schemeCorrectedPath } val timestamp = System.currentTimeMillis if (addedFiles.putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf, env.securityManager, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } } def removeFile(path: String): Unit = { env.rpcEnv.fileServer.removeFile(path) } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface) { listenerBus.addListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: CoarseGrainedSchedulerBackend => b.getExecutorIds() case _ => logWarning("Requesting executors is only supported in coarse-grained mode") Nil } } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: scala.collection.immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: CoarseGrainedSchedulerBackend => b.requestTotalExecutors(numExecutors, localityAwareTasks, hostToLocalTaskCount) case _ => logWarning("Requesting executors is only supported in coarse-grained mode") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: CoarseGrainedSchedulerBackend => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is only supported in coarse-grained mode") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: CoarseGrainedSchedulerBackend => b.killExecutors(executorIds, replace = false, force = true).nonEmpty case _ => logWarning("Killing executors is only supported in coarse-grained mode") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: CoarseGrainedSchedulerBackend => b.killExecutors(Seq(executorId), replace = true, force = true).nonEmpty case _ => logWarning("Killing executors is only supported in coarse-grained mode") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the slave to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray StorageUtils.updateRddInfo(rddInfos, getExecutorStorageStatus) rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return information about blocks stored in all of the slaves */ @DeveloperApi def getExecutorStorageStatus: Array[StorageStatus] = { assertNotStopped() env.blockManager.master.getStorageStatus } /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() if (taskScheduler eq null) return Seq.empty // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]) { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean = true) { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this SparkContext in the future. * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. */ def addJar(path: String) { if (path == null) { logWarning("null specified as parameter to addJar") } else { var key = "" if (path.contains("\\\\")) { // For local paths with backslashes on Windows, URI throws an exception key = env.rpcEnv.fileServer.addJar(new File(path)) } else { val uri = new URI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) key = uri.getScheme match { // A JAR file which exists only on the driver node case null | "file" => try { val file = new File(uri.getPath) if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } env.rpcEnv.fileServer.addJar(new File(uri.getPath)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) null } // A JAR file which exists locally on every worker node case "local" => "file:" + uri.getPath case _ => path } } if (key != null) { val timestamp = System.currentTimeMillis if (addedJars.putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added JAR $path at $key with timestamp $timestamp") postEnvironmentUpdate() } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = addedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = { if (LiveListenerBus.withinListenerThread.value) { throw new SparkException( s"Cannot stop SparkContext within listener thread of ${LiveListenerBus.name}") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } Utils.tryLogNonFatalError { postApplicationEnd() } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop() } _dagScheduler = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } // Unset YARN mode system env variable, to allow switching between cluster types. System.clearProperty("SPARK_YARN_MODE") SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String) { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite) { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite() { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) env.taskLogger.logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a job on a given set of partitions of an RDD, but take a function of type * `Iterator[T] => U` instead of `(TaskContext, Iterator[T]) => U`. */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, 0 until rdd.partitions.length) } /** * Run a job on all partitions in an RDD and return the results in an array. */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, 0 until rdd.partitions.length) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit) { runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit) { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String) { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs() { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int) { dagScheduler.cancelJob(jobId) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int) { dagScheduler.cancelStage(stageId) } /** * Clean a closure to make it ready to serialized and send to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable && _isDefaultClosureSerializer) f } /** * Set the directory under which RDDs are going to be checkpointed. The directory must * be a HDFS path if running on a cluster. */ def setCheckpointDir(directory: String) { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { // Use reflection to instantiate listeners specified via `spark.extraListeners` try { val listenerClassNames: Seq[String] = conf.get("spark.extraListeners", "").split(',').map(_.trim).filter(_ != "") for (className <- listenerClassNames) { // Use reflection to find the right constructor val constructors = { val listenerClass = Utils.classForName(className) listenerClass .getConstructors .asInstanceOf[Array[Constructor[_ <: SparkListenerInterface]]] } val constructorTakingSparkConf = constructors.find { c => c.getParameterTypes.sameElements(Array(classOf[SparkConf])) } lazy val zeroArgumentConstructor = constructors.find { c => c.getParameterTypes.isEmpty } val listener: SparkListenerInterface = { if (constructorTakingSparkConf.isDefined) { constructorTakingSparkConf.get.newInstance(conf) } else if (zeroArgumentConstructor.isDefined) { zeroArgumentConstructor.get.newInstance() } else { throw new SparkException( s"$className did not have a zero-argument constructor or a" + " single-argument constructor that accepts SparkConf. Note: if the class is" + " defined inside of another Scala class, then its constructors may accept an" + " implicit parameter that references the enclosing class; in this case, you must" + " define the listener as a top-level class in order to prevent this extra" + " parameter from breaking Spark's ability to find a valid constructor.") } } listenerBus.addListener(listener) logInfo(s"Registered listener $className") } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start() _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart() { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls)) } /** Post the application end event */ private def postApplicationEnd() { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate() { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = addedJars.keys.toSeq val addedFilePaths = addedFiles.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, schedulingMode, addedJarPaths, addedFilePaths) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this, allowMultipleContexts) } /** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */ object SparkContext extends Logging { private val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by SPARK_CONTEXT_CONSTRUCTOR_LOCK. */ private[spark] val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if some thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by SPARK_CONTEXT_CONSTRUCTOR_LOCK */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning( sc: SparkContext, allowMultipleContexts: Boolean): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext may be running in this JVM (see SPARK-2243)." + " To ignore this error, set spark.driver.allowMultipleContexts = true. " + s"The currently running SparkContext was created at:\\n${ctx.creationSite.longForm}" val exception = new SparkException(errMsg) if (allowMultipleContexts) { logWarning("Multiple running SparkContexts detected in the same JVM!", exception) } else { throw exception } } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext may be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @note This function cannot be used to create multiple SparkContext instances * even if multiple contexts are allowed. */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config), allowMultipleContexts = false) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @note This function cannot be used to create multiple SparkContext instances * even if multiple contexts are allowed. */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(), allowMultipleContexts = false) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed( sc: SparkContext, allowMultipleContexts: Boolean): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc, allowMultipleContexts) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext( sc: SparkContext, allowMultipleContexts: Boolean): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc, allowMultipleContexts) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** * Legacy version of DRIVER_IDENTIFIER, retained for backwards-compatibility. */ private[spark] val LEGACY_DRIVER_IDENTIFIER = "<driver>" private implicit def arrayToArrayWritable[T <% Writable: ClassTag](arr: Traversable[T]) : ArrayWritable = { def anyToWritable[U <% Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of driver cores to use for execution in local mode, 0 otherwise. */ private[spark] def numDriverCores(master: String): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case _ => 0 // driver is not used for execution } } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String, deployMode: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 master match { case "local" => val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) => // Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang. val memoryPerSlaveInt = memoryPerSlave.toInt if (sc.executorMemory > memoryPerSlaveInt) { throw new SparkException( "Asked to launch cluster with %d MB RAM / worker but requested %d MB/worker".format( memoryPerSlaveInt, sc.executorMemory)) } val scheduler = new TaskSchedulerImpl(sc) val localCluster = new LocalSparkCluster( numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } } /** * A collection of regexes for extracting information from the master string. */ private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\\[([0-9]+|\\*)\\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\\[([0-9]+|\\*)\\s*,\\s*([0-9]+)\\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\\[\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r } /** * A class encapsulating how to convert some type T to Writable. It stores both the Writable class * corresponding to T (e.g. IntWritable for Int) and a function for doing the conversion. * The getter for the writable class takes a ClassTag[T] in case this is a generic object * that doesn't know the type of T when it is created. This sounds strange but is necessary to * support converting subclasses of Writable to themselves (writableWritableConverter). */ private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializable object WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer then data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) } /** * A class encapsulating how to convert some type T to Writable. It stores both the Writable class * corresponding to T (e.g. IntWritable for Int) and a function for doing the conversion. * The Writable class will be used in `SequenceFileRDDFunctions`. */ private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializable object WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w) }
SnappyDataInc/spark
core/src/main/scala/org/apache/spark/SparkContext.scala
Scala
apache-2.0
107,961
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.api.mvc import java.security.cert.X509Certificate import play.api.http.{ HeaderNames, MediaRange, MediaType } import play.api.i18n.Lang import play.api.libs.typedmap.{ TypedKey, TypedMap } import play.api.mvc.request._ import scala.annotation.implicitNotFound /** * The HTTP request header. Note that it doesn’t contain the request body yet. */ @implicitNotFound("Cannot find any HTTP Request Header here") trait RequestHeader { top => /** * The remote connection that made the request. */ def connection: RemoteConnection def withConnection(newConnection: RemoteConnection): RequestHeader = new RequestHeaderImpl(newConnection, method, target, version, headers, attrs) /** * The request id. The request id is stored as an attribute indexed by [[play.api.mvc.request.RequestAttrKey.Id]]. */ final def id: Long = attrs(RequestAttrKey.Id) /** * The request Tags. The request's tags are stored in an optional attribute indexed by * [[play.api.mvc.request.RequestAttrKey.Tags]]. If the attribute is not present then the tags are assumed to be empty. */ @deprecated("Use typed attributes instead, see `attrs`", "2.6.0") final def tags: Map[String, String] = attrs.get(RequestAttrKey.Tags).getOrElse(Map.empty) /** * The HTTP method. */ def method: String /** * Return a new copy of the request with its method changed. */ def withMethod(newMethod: String): RequestHeader = new RequestHeaderImpl(connection, newMethod, target, version, headers, attrs) /** * The target of the HTTP request, i.e. the URI or path that was * given on the first line of the request. */ def target: RequestTarget /** * Return a new copy of the request with its target changed. */ def withTarget(newTarget: RequestTarget): RequestHeader = new RequestHeaderImpl(connection, method, newTarget, version, headers, attrs) /** * The complete request URI, containing both path and query string. * The URI is what was on the status line after the request method. * E.g. in "GET /foo/bar?q=s HTTP/1.1" the URI should be /foo/bar?q=s. * It could be absolute, some clients send absolute URLs, especially proxies, * e.g. http://www.example.org/foo/bar?q=s. * * This method delegates to `target.uriString`. */ final def uri: String = target.uriString /** * The URI path. This method delegates to `target.path`. */ final def path: String = target.path /** * The HTTP version. */ def version: String /** * Return a new copy of the request with its HTTP version changed. */ def withVersion(newVersion: String): RequestHeader = new RequestHeaderImpl(connection, method, target, newVersion, headers, attrs) /** * The parsed query string. This method delegates to `target.queryMap`. */ final def queryString: Map[String, Seq[String]] = target.queryMap /** * The HTTP headers. */ def headers: Headers /** * The remote connection that made the request. */ def withHeaders(newHeaders: Headers): RequestHeader = new RequestHeaderImpl(connection, method, target, version, newHeaders, attrs) /** * The client IP address. * * retrieves the last untrusted proxy * from the Forwarded-Headers or the X-Forwarded-*-Headers. * * This method delegates to `connection.remoteAddressString`. */ final def remoteAddress: String = connection.remoteAddressString /** * Is the client using SSL? This method delegates to `connection.secure`. */ final def secure: Boolean = connection.secure /** * The X509 certificate chain presented by a client during SSL requests. This method is * equivalent to `connection.clientCertificateChain`. */ final def clientCertificateChain: Option[Seq[X509Certificate]] = connection.clientCertificateChain /** * A map of typed attributes associated with the request. */ def attrs: TypedMap /** * Create a new version of this object with the given attributes attached to it. * This replaces any existing attributes. * * @param newAttrs The new attributes to add. * @return The new version of this object with the attributes attached. */ def withAttrs(newAttrs: TypedMap): RequestHeader = new RequestHeaderImpl(connection, method, target, version, headers, newAttrs) /** * Create a new versions of this object with the given attribute attached to it. * * @param key The new attribute key. * @param value The attribute value. * @tparam A The type of value. * @return The new version of this object with the new attribute. */ def addAttr[A](key: TypedKey[A], value: A): RequestHeader = withAttrs(attrs.updated(key, value)) // -- Computed /** * Helper method to access a queryString parameter. This method delegates to `connection.getQueryParameter(key)`. * * @return The query parameter's value if the parameter is present * and there is only one value. If the parameter is absent * or there is more than one value for that parameter then * `None` is returned. */ def getQueryString(key: String): Option[String] = target.getQueryParameter(key) /** * True if this request has a body, so we know if we should trigger body parsing. The base implementation simply * checks for the Content-Length or Transfer-Encoding headers, but subclasses (such as fake requests) may return * true in other cases so the headers need not be updated to reflect the body. */ def hasBody: Boolean = headers.hasBody /** * The HTTP host (domain, optionally port). This value is derived from the request target, if a hostname is present. * If the target doesn't have a host then the `Host` header is used, if present. If that's not present then an * empty string is returned. */ lazy val host: String = { import RequestHeader.AbsoluteUri uri match { case AbsoluteUri(proto, hostPort, rest) => hostPort case _ => headers.get(HeaderNames.HOST).getOrElse("") } } /** * The HTTP domain. The domain part of the request's [[host]]. */ lazy val domain: String = host.split(':').head /** * The Request Langs extracted from the Accept-Language header and sorted by preference (preferred first). */ lazy val acceptLanguages: Seq[play.api.i18n.Lang] = { val langs = RequestHeader.acceptHeader(headers, HeaderNames.ACCEPT_LANGUAGE).map(item => (item._1, Lang.get(item._2))) langs.sortWith((a, b) => a._1 > b._1).flatMap(_._2) } /** * @return The media types list of the request’s Accept header, sorted by preference (preferred first). */ lazy val acceptedTypes: Seq[play.api.http.MediaRange] = { headers.get(HeaderNames.ACCEPT).toSeq.flatMap(MediaRange.parse.apply) } /** * Check if this request accepts a given media type. * * @return true if `mimeType` matches the Accept header, otherwise false */ def accepts(mimeType: String): Boolean = { acceptedTypes.isEmpty || acceptedTypes.exists(_.accepts(mimeType)) } /** * The HTTP cookies. The request's cookies are stored in an attribute indexed by * [[play.api.mvc.request.RequestAttrKey.Cookies]]. The attribute uses a Cell to store the cookies, * to allow them to be evaluated on-demand. */ def cookies: Cookies = attrs(RequestAttrKey.Cookies).value /** * Parses the `Session` cookie and returns the `Session` data. The request's session cookie is stored in an attribute indexed by * [[play.api.mvc.request.RequestAttrKey.Session]]. The attribute uses a Cell to store the session cookie, to allow it to be evaluated on-demand. */ def session: Session = attrs(RequestAttrKey.Session).value /** * Parses the `Flash` cookie and returns the `Flash` data. The request's flash cookie is stored in an attribute indexed by * [[play.api.mvc.request.RequestAttrKey.Flash]]. The attribute uses a [[play.api.mvc.request.Cell]] to store the session, to allow it to be evaluated on-demand. */ def flash: Flash = attrs(RequestAttrKey.Flash).value /** * Returns the raw query string. This method delegates to `connection.rawQueryString`. */ def rawQueryString: String = target.queryString /** * The media type of this request. Same as contentType, except returns a fully parsed media type with parameters. */ lazy val mediaType: Option[MediaType] = headers.get(HeaderNames.CONTENT_TYPE).flatMap(MediaType.parse.apply) /** * Returns the value of the Content-Type header (without the parameters (eg charset)) */ lazy val contentType: Option[String] = mediaType.map(mt => mt.mediaType + "/" + mt.mediaSubType) /** * Returns the charset of the request for text-based body */ lazy val charset: Option[String] = for { mt <- mediaType param <- mt.parameters.find(_._1.equalsIgnoreCase("charset")) charset <- param._2 } yield charset /** * Convenience method for adding a single tag to this request * * @return the tagged request */ def withTag(tagName: String, tagValue: String): RequestHeader = { copy(tags = tags + (tagName -> tagValue)) } /** * Attach a body to this header. * * @param body The body to attach. * @tparam A The type of the body. * @return A new request with the body attached to the header. */ def withBody[A](body: A): Request[A] = new RequestImpl[A](connection, method, target, version, headers, attrs, body) /** * Copy the request. */ @deprecated("Use the with* methods instead", "2.6.0") def copy( id: java.lang.Long = null, tags: Map[String, String] = null, uri: String = null, path: String = null, method: String = this.method, version: String = this.version, queryString: Map[String, Seq[String]] = null, headers: Headers = null, remoteAddress: String = null, secure: java.lang.Boolean = null, clientCertificateChain: Option[Seq[X509Certificate]] = null): RequestHeader = { var newHeader: RequestHeader = this // We only need to modify the request when an argument is non-null. if (id != null) { newHeader = newHeader.addAttr(RequestAttrKey.Id, (id: Long)) } if (tags != null) { newHeader = newHeader.addAttr(RequestAttrKey.Tags, tags) } if (uri != null) { newHeader = newHeader.withTarget(newHeader.target.withUriString(uri)) } if (path != null) { newHeader = newHeader.withTarget(newHeader.target.withPath(path)) } if (method != null) { newHeader = newHeader.withMethod(method) } if (queryString != null) { newHeader = newHeader.withTarget(newHeader.target.withQueryString(queryString)) } if (version != null) { newHeader = newHeader.withVersion(version) } if (headers != null) { newHeader = newHeader.withHeaders(headers) } if (remoteAddress != null) { newHeader = newHeader.withConnection(RemoteConnection(remoteAddress, newHeader.secure, newHeader.clientCertificateChain)) } if (secure != null) { newHeader = newHeader.withConnection(RemoteConnection(newHeader.remoteAddress, secure, newHeader.clientCertificateChain)) } if (clientCertificateChain != null) { newHeader = newHeader.withConnection(RemoteConnection(newHeader.remoteAddress, newHeader.secure, clientCertificateChain)) } newHeader } override def toString: String = { method + " " + uri } def asJava: play.mvc.Http.RequestHeader = new play.core.j.RequestHeaderImpl(this) } object RequestHeader { private val AbsoluteUri = """(?is)^(https?)://([^/]+)(/.*|$)""".r // “The first "q" parameter (if any) separates the media-range parameter(s) from the accept-params.” val qPattern = ";\\s*q=([0-9.]+)".r /** * @return The items of an Accept* header, with their q-value. */ private[play] def acceptHeader(headers: Headers, headerName: String): Seq[(Double, String)] = { for { header <- headers.get(headerName).toList value0 <- header.split(',') value = value0.trim } yield { RequestHeader.qPattern.findFirstMatchIn(value) match { case Some(m) => (m.group(1).toDouble, m.before.toString) case None => (1.0, value) // “The default value is q=1.” } } } } /** * A standard implementation of a RequestHeader. */ private[play] class RequestHeaderImpl( override val connection: RemoteConnection, override val method: String, override val target: RequestTarget, override val version: String, override val headers: Headers, override val attrs: TypedMap) extends RequestHeader
Shruti9520/playframework
framework/src/play/src/main/scala/play/api/mvc/RequestHeader.scala
Scala
apache-2.0
12,708
/* * Copyright (C) 11/06/13 Romain Reuillon * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package fr.geocites.simpoplocal import fr.geocites.simpuzzle.state.StepByStep trait SimpopLocal extends StepByStep with SimpopLocalStep with SimpopLocalTimeInnovationEndingCondition
ISCPIF/PSEExperiments
simpuzzle-src/models/simpoplocal/src/main/scala/fr/geocites/simpoplocal/SimpopLocal.scala
Scala
agpl-3.0
897
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive import java.io.IOException import java.lang.reflect.InvocationTargetException import java.util import java.util.Locale import scala.collection.mutable import scala.util.control.NonFatal import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.hive.ql.metadata.HiveException import org.apache.thrift.TException import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.logical.ColumnStat import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.PartitioningUtils import org.apache.spark.sql.hive.client.HiveClient import org.apache.spark.sql.internal.HiveSerDe import org.apache.spark.sql.internal.StaticSQLConf._ import org.apache.spark.sql.types.{DataType, StructType} /** * A persistent implementation of the system catalog using Hive. * All public methods must be synchronized for thread-safety. */ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configuration) extends ExternalCatalog with Logging { import CatalogTypes.TablePartitionSpec import HiveExternalCatalog._ import CatalogTableType._ /** * A Hive client used to interact with the metastore. */ lazy val client: HiveClient = { HiveUtils.newClientForMetadata(conf, hadoopConf) } // Exceptions thrown by the hive client that we would like to wrap private val clientExceptions = Set( classOf[HiveException].getCanonicalName, classOf[TException].getCanonicalName, classOf[InvocationTargetException].getCanonicalName) /** * Whether this is an exception thrown by the hive client that should be wrapped. * * Due to classloader isolation issues, pattern matching won't work here so we need * to compare the canonical names of the exceptions, which we assume to be stable. */ private def isClientException(e: Throwable): Boolean = { var temp: Class[_] = e.getClass var found = false while (temp != null && !found) { found = clientExceptions.contains(temp.getCanonicalName) temp = temp.getSuperclass } found } /** * Run some code involving `client` in a [[synchronized]] block and wrap certain * exceptions thrown in the process in [[AnalysisException]]. */ private def withClient[T](body: => T): T = synchronized { try { body } catch { case NonFatal(exception) if isClientException(exception) => val e = exception match { // Since we are using shim, the exceptions thrown by the underlying method of // Method.invoke() are wrapped by InvocationTargetException case i: InvocationTargetException => i.getCause case o => o } throw new AnalysisException( e.getClass.getCanonicalName + ": " + e.getMessage, cause = Some(e)) } } /** * Get the raw table metadata from hive metastore directly. The raw table metadata may contains * special data source properties and should not be exposed outside of `HiveExternalCatalog`. We * should interpret these special data source properties and restore the original table metadata * before returning it. */ private def getRawTable(db: String, table: String): CatalogTable = withClient { client.getTable(db, table) } /** * If the given table properties contains datasource properties, throw an exception. We will do * this check when create or alter a table, i.e. when we try to write table metadata to Hive * metastore. */ private def verifyTableProperties(table: CatalogTable): Unit = { val invalidKeys = table.properties.keys.filter(_.startsWith(SPARK_SQL_PREFIX)) if (invalidKeys.nonEmpty) { throw new AnalysisException(s"Cannot persistent ${table.qualifiedName} into hive metastore " + s"as table property keys may not start with '$SPARK_SQL_PREFIX': " + invalidKeys.mkString("[", ", ", "]")) } // External users are not allowed to set/switch the table type. In Hive metastore, the table // type can be switched by changing the value of a case-sensitive table property `EXTERNAL`. if (table.properties.contains("EXTERNAL")) { throw new AnalysisException("Cannot set or change the preserved property key: 'EXTERNAL'") } } /** * Checks the validity of column names. Hive metastore disallows the table to use comma in * data column names. Partition columns do not have such a restriction. Views do not have such * a restriction. */ private def verifyColumnNames(table: CatalogTable): Unit = { if (table.tableType != VIEW) { table.dataSchema.map(_.name).foreach { colName => if (colName.contains(",")) { throw new AnalysisException("Cannot create a table having a column whose name contains " + s"commas in Hive metastore. Table: ${table.identifier}; Column: $colName") } } } } // -------------------------------------------------------------------------- // Databases // -------------------------------------------------------------------------- override protected def doCreateDatabase( dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = withClient { client.createDatabase(dbDefinition, ignoreIfExists) } override protected def doDropDatabase( db: String, ignoreIfNotExists: Boolean, cascade: Boolean): Unit = withClient { client.dropDatabase(db, ignoreIfNotExists, cascade) } /** * Alter a database whose name matches the one specified in `dbDefinition`, * assuming the database exists. * * Note: As of now, this only supports altering database properties! */ override def alterDatabase(dbDefinition: CatalogDatabase): Unit = withClient { val existingDb = getDatabase(dbDefinition.name) if (existingDb.properties == dbDefinition.properties) { logWarning(s"Request to alter database ${dbDefinition.name} is a no-op because " + s"the provided database properties are the same as the old ones. Hive does not " + s"currently support altering other database fields.") } client.alterDatabase(dbDefinition) } override def getDatabase(db: String): CatalogDatabase = withClient { client.getDatabase(db) } override def databaseExists(db: String): Boolean = withClient { client.databaseExists(db) } override def listDatabases(): Seq[String] = withClient { client.listDatabases("*") } override def listDatabases(pattern: String): Seq[String] = withClient { client.listDatabases(pattern) } override def setCurrentDatabase(db: String): Unit = withClient { client.setCurrentDatabase(db) } // -------------------------------------------------------------------------- // Tables // -------------------------------------------------------------------------- override protected def doCreateTable( tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = withClient { assert(tableDefinition.identifier.database.isDefined) val db = tableDefinition.identifier.database.get val table = tableDefinition.identifier.table requireDbExists(db) verifyTableProperties(tableDefinition) verifyColumnNames(tableDefinition) if (tableExists(db, table) && !ignoreIfExists) { throw new TableAlreadyExistsException(db = db, table = table) } // Ideally we should not create a managed table with location, but Hive serde table can // specify location for managed table. And in [[CreateDataSourceTableAsSelectCommand]] we have // to create the table directory and write out data before we create this table, to avoid // exposing a partial written table. val needDefaultTableLocation = tableDefinition.tableType == MANAGED && tableDefinition.storage.locationUri.isEmpty val tableLocation = if (needDefaultTableLocation) { Some(CatalogUtils.stringToURI(defaultTablePath(tableDefinition.identifier))) } else { tableDefinition.storage.locationUri } if (DDLUtils.isDatasourceTable(tableDefinition)) { createDataSourceTable( tableDefinition.withNewStorage(locationUri = tableLocation), ignoreIfExists) } else { val tableWithDataSourceProps = tableDefinition.copy( // We can't leave `locationUri` empty and count on Hive metastore to set a default table // location, because Hive metastore uses hive.metastore.warehouse.dir to generate default // table location for tables in default database, while we expect to use the location of // default database. storage = tableDefinition.storage.copy(locationUri = tableLocation), // Here we follow data source tables and put table metadata like table schema, partition // columns etc. in table properties, so that we can work around the Hive metastore issue // about not case preserving and make Hive serde table and view support mixed-case column // names. properties = tableDefinition.properties ++ tableMetaToTableProps(tableDefinition)) client.createTable(tableWithDataSourceProps, ignoreIfExists) } } private def createDataSourceTable(table: CatalogTable, ignoreIfExists: Boolean): Unit = { // data source table always have a provider, it's guaranteed by `DDLUtils.isDatasourceTable`. val provider = table.provider.get // To work around some hive metastore issues, e.g. not case-preserving, bad decimal type // support, no column nullability, etc., we should do some extra works before saving table // metadata into Hive metastore: // 1. Put table metadata like table schema, partition columns, etc. in table properties. // 2. Check if this table is hive compatible. // 2.1 If it's not hive compatible, set location URI, schema, partition columns and bucket // spec to empty and save table metadata to Hive. // 2.2 If it's hive compatible, set serde information in table metadata and try to save // it to Hive. If it fails, treat it as not hive compatible and go back to 2.1 val tableProperties = tableMetaToTableProps(table) // put table provider and partition provider in table properties. tableProperties.put(DATASOURCE_PROVIDER, provider) if (table.tracksPartitionsInCatalog) { tableProperties.put(TABLE_PARTITION_PROVIDER, TABLE_PARTITION_PROVIDER_CATALOG) } // Ideally we should also put `locationUri` in table properties like provider, schema, etc. // However, in older version of Spark we already store table location in storage properties // with key "path". Here we keep this behaviour for backward compatibility. val storagePropsWithLocation = table.storage.properties ++ table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_)) // converts the table metadata to Spark SQL specific format, i.e. set data schema, names and // bucket specification to empty. Note that partition columns are retained, so that we can // call partition-related Hive API later. def newSparkSQLSpecificMetastoreTable(): CatalogTable = { table.copy( // Hive only allows directory paths as location URIs while Spark SQL data source tables // also allow file paths. For non-hive-compatible format, we should not set location URI // to avoid hive metastore to throw exception. storage = table.storage.copy( locationUri = None, properties = storagePropsWithLocation), schema = table.partitionSchema, bucketSpec = None, properties = table.properties ++ tableProperties) } // converts the table metadata to Hive compatible format, i.e. set the serde information. def newHiveCompatibleMetastoreTable(serde: HiveSerDe): CatalogTable = { val location = if (table.tableType == EXTERNAL) { // When we hit this branch, we are saving an external data source table with hive // compatible format, which means the data source is file-based and must have a `path`. require(table.storage.locationUri.isDefined, "External file-based data source table must have a `path` entry in storage properties.") Some(table.location) } else { None } table.copy( storage = table.storage.copy( locationUri = location, inputFormat = serde.inputFormat, outputFormat = serde.outputFormat, serde = serde.serde, properties = storagePropsWithLocation ), properties = table.properties ++ tableProperties) } val qualifiedTableName = table.identifier.quotedString val maybeSerde = HiveSerDe.sourceToSerDe(provider) val skipHiveMetadata = table.storage.properties .getOrElse("skipHiveMetadata", "false").toBoolean val (hiveCompatibleTable, logMessage) = maybeSerde match { case _ if skipHiveMetadata => val message = s"Persisting data source table $qualifiedTableName into Hive metastore in" + "Spark SQL specific format, which is NOT compatible with Hive." (None, message) // our bucketing is un-compatible with hive(different hash function) case _ if table.bucketSpec.nonEmpty => val message = s"Persisting bucketed data source table $qualifiedTableName into " + "Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " (None, message) case Some(serde) => val message = s"Persisting file based data source table $qualifiedTableName into " + s"Hive metastore in Hive compatible format." (Some(newHiveCompatibleMetastoreTable(serde)), message) case _ => val message = s"Couldn't find corresponding Hive SerDe for data source provider $provider. " + s"Persisting data source table $qualifiedTableName into Hive metastore in " + s"Spark SQL specific format, which is NOT compatible with Hive." (None, message) } (hiveCompatibleTable, logMessage) match { case (Some(table), message) => // We first try to save the metadata of the table in a Hive compatible way. // If Hive throws an error, we fall back to save its metadata in the Spark SQL // specific way. try { logInfo(message) saveTableIntoHive(table, ignoreIfExists) } catch { case NonFatal(e) => val warningMessage = s"Could not persist ${table.identifier.quotedString} in a Hive " + "compatible way. Persisting it into Hive metastore in Spark SQL specific format." logWarning(warningMessage, e) saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists) } case (None, message) => logWarning(message) saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists) } } /** * Data source tables may be non Hive compatible and we need to store table metadata in table * properties to workaround some Hive metastore limitations. * This method puts table schema, partition column names, bucket specification into a map, which * can be used as table properties later. */ private def tableMetaToTableProps(table: CatalogTable): mutable.Map[String, String] = { val partitionColumns = table.partitionColumnNames val bucketSpec = table.bucketSpec val properties = new mutable.HashMap[String, String] // Serialized JSON schema string may be too long to be stored into a single metastore table // property. In this case, we split the JSON string and store each part as a separate table // property. val threshold = conf.get(SCHEMA_STRING_LENGTH_THRESHOLD) val schemaJsonString = table.schema.json // Split the JSON string. val parts = schemaJsonString.grouped(threshold).toSeq properties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString) parts.zipWithIndex.foreach { case (part, index) => properties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part) } if (partitionColumns.nonEmpty) { properties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString) partitionColumns.zipWithIndex.foreach { case (partCol, index) => properties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol) } } if (bucketSpec.isDefined) { val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get properties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString) properties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString) bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) => properties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol) } if (sortColumnNames.nonEmpty) { properties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString) sortColumnNames.zipWithIndex.foreach { case (sortCol, index) => properties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol) } } } properties } private def defaultTablePath(tableIdent: TableIdentifier): String = { val dbLocation = getDatabase(tableIdent.database.get).locationUri new Path(new Path(dbLocation), tableIdent.table).toString } private def saveTableIntoHive(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = { assert(DDLUtils.isDatasourceTable(tableDefinition), "saveTableIntoHive only takes data source table.") // If this is an external data source table... if (tableDefinition.tableType == EXTERNAL && // ... that is not persisted as Hive compatible format (external tables in Hive compatible // format always set `locationUri` to the actual data location and should NOT be hacked as // following.) tableDefinition.storage.locationUri.isEmpty) { // !! HACK ALERT !! // // Due to a restriction of Hive metastore, here we have to set `locationUri` to a temporary // directory that doesn't exist yet but can definitely be successfully created, and then // delete it right after creating the external data source table. This location will be // persisted to Hive metastore as standard Hive table location URI, but Spark SQL doesn't // really use it. Also, since we only do this workaround for external tables, deleting the // directory after the fact doesn't do any harm. // // Please refer to https://issues.apache.org/jira/browse/SPARK-15269 for more details. val tempPath = { val dbLocation = new Path(getDatabase(tableDefinition.database).locationUri) new Path(dbLocation, tableDefinition.identifier.table + "-__PLACEHOLDER__") } try { client.createTable( tableDefinition.withNewStorage(locationUri = Some(tempPath.toUri)), ignoreIfExists) } finally { FileSystem.get(tempPath.toUri, hadoopConf).delete(tempPath, true) } } else { client.createTable(tableDefinition, ignoreIfExists) } } override protected def doDropTable( db: String, table: String, ignoreIfNotExists: Boolean, purge: Boolean): Unit = withClient { requireDbExists(db) client.dropTable(db, table, ignoreIfNotExists, purge) } override protected def doRenameTable( db: String, oldName: String, newName: String): Unit = withClient { val rawTable = getRawTable(db, oldName) // Note that Hive serde tables don't use path option in storage properties to store the value // of table location, but use `locationUri` field to store it directly. And `locationUri` field // will be updated automatically in Hive metastore by the `alterTable` call at the end of this // method. Here we only update the path option if the path option already exists in storage // properties, to avoid adding a unnecessary path option for Hive serde tables. val hasPathOption = CaseInsensitiveMap(rawTable.storage.properties).contains("path") val storageWithNewPath = if (rawTable.tableType == MANAGED && hasPathOption) { // If it's a managed table with path option and we are renaming it, then the path option // becomes inaccurate and we need to update it according to the new table name. val newTablePath = defaultTablePath(TableIdentifier(newName, Some(db))) updateLocationInStorageProps(rawTable, Some(newTablePath)) } else { rawTable.storage } val newTable = rawTable.copy( identifier = TableIdentifier(newName, Some(db)), storage = storageWithNewPath) client.alterTable(oldName, newTable) } private def getLocationFromStorageProps(table: CatalogTable): Option[String] = { CaseInsensitiveMap(table.storage.properties).get("path") } private def updateLocationInStorageProps( table: CatalogTable, newPath: Option[String]): CatalogStorageFormat = { // We can't use `filterKeys` here, as the map returned by `filterKeys` is not serializable, // while `CatalogTable` should be serializable. val propsWithoutPath = table.storage.properties.filter { case (k, v) => k.toLowerCase(Locale.ROOT) != "path" } table.storage.copy(properties = propsWithoutPath ++ newPath.map("path" -> _)) } /** * Alter a table whose name that matches the one specified in `tableDefinition`, * assuming the table exists. This method does not change the properties for data source and * statistics. * * Note: As of now, this doesn't support altering table schema, partition column names and bucket * specification. We will ignore them even if users do specify different values for these fields. */ override def alterTable(tableDefinition: CatalogTable): Unit = withClient { assert(tableDefinition.identifier.database.isDefined) val db = tableDefinition.identifier.database.get requireTableExists(db, tableDefinition.identifier.table) verifyTableProperties(tableDefinition) if (tableDefinition.tableType == VIEW) { client.alterTable(tableDefinition) } else { val oldTableDef = getRawTable(db, tableDefinition.identifier.table) val newStorage = if (DDLUtils.isHiveTable(tableDefinition)) { tableDefinition.storage } else { // We can't alter the table storage of data source table directly for 2 reasons: // 1. internally we use path option in storage properties to store the value of table // location, but the given `tableDefinition` is from outside and doesn't have the path // option, we need to add it manually. // 2. this data source table may be created on a file, not a directory, then we can't set // the `locationUri` field and save it to Hive metastore, because Hive only allows // directory as table location. // // For example, an external data source table is created with a single file '/path/to/file'. // Internally, we will add a path option with value '/path/to/file' to storage properties, // and set the `locationUri` to a special value due to SPARK-15269(please see // `saveTableIntoHive` for more details). When users try to get the table metadata back, we // will restore the `locationUri` field from the path option and remove the path option from // storage properties. When users try to alter the table storage, the given // `tableDefinition` will have `locationUri` field with value `/path/to/file` and the path // option is not set. // // Here we need 2 extra steps: // 1. add path option to storage properties, to match the internal format, i.e. using path // option to store the value of table location. // 2. set the `locationUri` field back to the old one from the existing table metadata, // if users don't want to alter the table location. This step is necessary as the // `locationUri` is not always same with the path option, e.g. in the above example // `locationUri` is a special value and we should respect it. Note that, if users // want to alter the table location to a file path, we will fail. This should be fixed // in the future. val newLocation = tableDefinition.storage.locationUri.map(CatalogUtils.URIToString(_)) val storageWithPathOption = tableDefinition.storage.copy( properties = tableDefinition.storage.properties ++ newLocation.map("path" -> _)) val oldLocation = getLocationFromStorageProps(oldTableDef) if (oldLocation == newLocation) { storageWithPathOption.copy(locationUri = oldTableDef.storage.locationUri) } else { storageWithPathOption } } val partitionProviderProp = if (tableDefinition.tracksPartitionsInCatalog) { TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_CATALOG } else { TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_FILESYSTEM } // Add old data source properties to table properties, to retain the data source table format. // Add old stats properties to table properties, to retain spark's stats. // Set the `schema`, `partitionColumnNames` and `bucketSpec` from the old table definition, // to retain the spark specific format if it is. val propsFromOldTable = oldTableDef.properties.filter { case (k, v) => k.startsWith(DATASOURCE_PREFIX) || k.startsWith(STATISTICS_PREFIX) } val newTableProps = propsFromOldTable ++ tableDefinition.properties + partitionProviderProp val newDef = tableDefinition.copy( storage = newStorage, schema = oldTableDef.schema, partitionColumnNames = oldTableDef.partitionColumnNames, bucketSpec = oldTableDef.bucketSpec, properties = newTableProps) client.alterTable(newDef) } } override def alterTableSchema(db: String, table: String, schema: StructType): Unit = withClient { requireTableExists(db, table) val rawTable = getRawTable(db, table) val withNewSchema = rawTable.copy(schema = schema) verifyColumnNames(withNewSchema) // Add table metadata such as table schema, partition columns, etc. to table properties. val updatedTable = withNewSchema.copy( properties = withNewSchema.properties ++ tableMetaToTableProps(withNewSchema)) try { client.alterTable(updatedTable) } catch { case NonFatal(e) => val warningMessage = s"Could not alter schema of table ${rawTable.identifier.quotedString} in a Hive " + "compatible way. Updating Hive metastore in Spark SQL specific format." logWarning(warningMessage, e) client.alterTable(updatedTable.copy(schema = updatedTable.partitionSchema)) } } override def alterTableStats( db: String, table: String, stats: Option[CatalogStatistics]): Unit = withClient { requireTableExists(db, table) val rawTable = getRawTable(db, table) // convert table statistics to properties so that we can persist them through hive client val statsProperties = new mutable.HashMap[String, String]() if (stats.isDefined) { statsProperties += STATISTICS_TOTAL_SIZE -> stats.get.sizeInBytes.toString() if (stats.get.rowCount.isDefined) { statsProperties += STATISTICS_NUM_ROWS -> stats.get.rowCount.get.toString() } // For datasource tables and hive serde tables created by spark 2.1 or higher, // the data schema is stored in the table properties. val schema = restoreTableMetadata(rawTable).schema val colNameTypeMap: Map[String, DataType] = schema.fields.map(f => (f.name, f.dataType)).toMap stats.get.colStats.foreach { case (colName, colStat) => colStat.toMap(colName, colNameTypeMap(colName)).foreach { case (k, v) => statsProperties += (columnStatKeyPropName(colName, k) -> v) } } } val oldTableNonStatsProps = rawTable.properties.filterNot(_._1.startsWith(STATISTICS_PREFIX)) val updatedTable = rawTable.copy(properties = oldTableNonStatsProps ++ statsProperties) client.alterTable(updatedTable) } override def getTable(db: String, table: String): CatalogTable = withClient { restoreTableMetadata(getRawTable(db, table)) } override def getTableOption(db: String, table: String): Option[CatalogTable] = withClient { client.getTableOption(db, table).map(restoreTableMetadata) } /** * Restores table metadata from the table properties. This method is kind of a opposite version * of [[createTable]]. * * It reads table schema, provider, partition column names and bucket specification from table * properties, and filter out these special entries from table properties. */ private def restoreTableMetadata(inputTable: CatalogTable): CatalogTable = { if (conf.get(DEBUG_MODE)) { return inputTable } var table = inputTable table.properties.get(DATASOURCE_PROVIDER) match { case None if table.tableType == VIEW => // If this is a view created by Spark 2.2 or higher versions, we should restore its schema // from table properties. if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) { table = table.copy(schema = getSchemaFromTableProperties(table)) } // No provider in table properties, which means this is a Hive serde table. case None => table = restoreHiveSerdeTable(table) // This is a regular data source table. case Some(provider) => table = restoreDataSourceTable(table, provider) } // Restore Spark's statistics from information in Metastore. val statsProps = table.properties.filterKeys(_.startsWith(STATISTICS_PREFIX)) // Currently we have two sources of statistics: one from Hive and the other from Spark. // In our design, if Spark's statistics is available, we respect it over Hive's statistics. if (statsProps.nonEmpty) { val colStats = new mutable.HashMap[String, ColumnStat] // For each column, recover its column stats. Note that this is currently a O(n^2) operation, // but given the number of columns it usually not enormous, this is probably OK as a start. // If we want to map this a linear operation, we'd need a stronger contract between the // naming convention used for serialization. table.schema.foreach { field => if (statsProps.contains(columnStatKeyPropName(field.name, ColumnStat.KEY_VERSION))) { // If "version" field is defined, then the column stat is defined. val keyPrefix = columnStatKeyPropName(field.name, "") val colStatMap = statsProps.filterKeys(_.startsWith(keyPrefix)).map { case (k, v) => (k.drop(keyPrefix.length), v) } ColumnStat.fromMap(table.identifier.table, field, colStatMap).foreach { colStat => colStats += field.name -> colStat } } } table = table.copy( stats = Some(CatalogStatistics( sizeInBytes = BigInt(table.properties(STATISTICS_TOTAL_SIZE)), rowCount = table.properties.get(STATISTICS_NUM_ROWS).map(BigInt(_)), colStats = colStats.toMap))) } // Get the original table properties as defined by the user. table.copy( properties = table.properties.filterNot { case (key, _) => key.startsWith(SPARK_SQL_PREFIX) }) } // Reorder table schema to put partition columns at the end. Before Spark 2.2, the partition // columns are not put at the end of schema. We need to reorder it when reading the schema // from the table properties. private def reorderSchema(schema: StructType, partColumnNames: Seq[String]): StructType = { val partitionFields = partColumnNames.map { partCol => schema.find(_.name == partCol).getOrElse { throw new AnalysisException("The metadata is corrupted. Unable to find the " + s"partition column names from the schema. schema: ${schema.catalogString}. " + s"Partition columns: ${partColumnNames.mkString("[", ", ", "]")}") } } StructType(schema.filterNot(partitionFields.contains) ++ partitionFields) } private def restoreHiveSerdeTable(table: CatalogTable): CatalogTable = { val hiveTable = table.copy( provider = Some(DDLUtils.HIVE_PROVIDER), tracksPartitionsInCatalog = true) // If this is a Hive serde table created by Spark 2.1 or higher versions, we should restore its // schema from table properties. if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) { val schemaFromTableProps = getSchemaFromTableProperties(table) val partColumnNames = getPartitionColumnsFromTableProperties(table) val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames) if (DataType.equalsIgnoreCaseAndNullability(reorderedSchema, table.schema)) { hiveTable.copy( schema = reorderedSchema, partitionColumnNames = partColumnNames, bucketSpec = getBucketSpecFromTableProperties(table)) } else { // Hive metastore may change the table schema, e.g. schema inference. If the table // schema we read back is different(ignore case and nullability) from the one in table // properties which was written when creating table, we should respect the table schema // from hive. logWarning(s"The table schema given by Hive metastore(${table.schema.simpleString}) is " + "different from the schema when this table was created by Spark SQL" + s"(${schemaFromTableProps.simpleString}). We have to fall back to the table schema " + "from Hive metastore which is not case preserving.") hiveTable.copy(schemaPreservesCase = false) } } else { hiveTable.copy(schemaPreservesCase = false) } } private def restoreDataSourceTable(table: CatalogTable, provider: String): CatalogTable = { // Internally we store the table location in storage properties with key "path" for data // source tables. Here we set the table location to `locationUri` field and filter out the // path option in storage properties, to avoid exposing this concept externally. val storageWithLocation = { val tableLocation = getLocationFromStorageProps(table) // We pass None as `newPath` here, to remove the path option in storage properties. updateLocationInStorageProps(table, newPath = None).copy( locationUri = tableLocation.map(CatalogUtils.stringToURI(_))) } val partitionProvider = table.properties.get(TABLE_PARTITION_PROVIDER) val schemaFromTableProps = getSchemaFromTableProperties(table) val partColumnNames = getPartitionColumnsFromTableProperties(table) val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames) table.copy( provider = Some(provider), storage = storageWithLocation, schema = reorderedSchema, partitionColumnNames = partColumnNames, bucketSpec = getBucketSpecFromTableProperties(table), tracksPartitionsInCatalog = partitionProvider == Some(TABLE_PARTITION_PROVIDER_CATALOG)) } override def tableExists(db: String, table: String): Boolean = withClient { client.tableExists(db, table) } override def listTables(db: String): Seq[String] = withClient { requireDbExists(db) client.listTables(db) } override def listTables(db: String, pattern: String): Seq[String] = withClient { requireDbExists(db) client.listTables(db, pattern) } override def loadTable( db: String, table: String, loadPath: String, isOverwrite: Boolean, isSrcLocal: Boolean): Unit = withClient { requireTableExists(db, table) client.loadTable( loadPath, s"$db.$table", isOverwrite, isSrcLocal) } override def loadPartition( db: String, table: String, loadPath: String, partition: TablePartitionSpec, isOverwrite: Boolean, inheritTableSpecs: Boolean, isSrcLocal: Boolean): Unit = withClient { requireTableExists(db, table) val orderedPartitionSpec = new util.LinkedHashMap[String, String]() getTable(db, table).partitionColumnNames.foreach { colName => // Hive metastore is not case preserving and keeps partition columns with lower cased names, // and Hive will validate the column names in partition spec to make sure they are partition // columns. Here we Lowercase the column names before passing the partition spec to Hive // client, to satisfy Hive. orderedPartitionSpec.put(colName.toLowerCase, partition(colName)) } client.loadPartition( loadPath, db, table, orderedPartitionSpec, isOverwrite, inheritTableSpecs, isSrcLocal) } override def loadDynamicPartitions( db: String, table: String, loadPath: String, partition: TablePartitionSpec, replace: Boolean, numDP: Int): Unit = withClient { requireTableExists(db, table) val orderedPartitionSpec = new util.LinkedHashMap[String, String]() getTable(db, table).partitionColumnNames.foreach { colName => // Hive metastore is not case preserving and keeps partition columns with lower cased names, // and Hive will validate the column names in partition spec to make sure they are partition // columns. Here we Lowercase the column names before passing the partition spec to Hive // client, to satisfy Hive. orderedPartitionSpec.put(colName.toLowerCase, partition(colName)) } client.loadDynamicPartitions( loadPath, db, table, orderedPartitionSpec, replace, numDP) } // -------------------------------------------------------------------------- // Partitions // -------------------------------------------------------------------------- // Hive metastore is not case preserving and the partition columns are always lower cased. We need // to lower case the column names in partition specification before calling partition related Hive // APIs, to match this behaviour. private def lowerCasePartitionSpec(spec: TablePartitionSpec): TablePartitionSpec = { spec.map { case (k, v) => k.toLowerCase -> v } } // Build a map from lower-cased partition column names to exact column names for a given table private def buildLowerCasePartColNameMap(table: CatalogTable): Map[String, String] = { val actualPartColNames = table.partitionColumnNames actualPartColNames.map(colName => (colName.toLowerCase, colName)).toMap } // Hive metastore is not case preserving and the column names of the partition specification we // get from the metastore are always lower cased. We should restore them w.r.t. the actual table // partition columns. private def restorePartitionSpec( spec: TablePartitionSpec, partColMap: Map[String, String]): TablePartitionSpec = { spec.map { case (k, v) => partColMap(k.toLowerCase) -> v } } private def restorePartitionSpec( spec: TablePartitionSpec, partCols: Seq[String]): TablePartitionSpec = { spec.map { case (k, v) => partCols.find(_.equalsIgnoreCase(k)).get -> v } } override def createPartitions( db: String, table: String, parts: Seq[CatalogTablePartition], ignoreIfExists: Boolean): Unit = withClient { requireTableExists(db, table) val tableMeta = getTable(db, table) val partitionColumnNames = tableMeta.partitionColumnNames val tablePath = new Path(tableMeta.location) val partsWithLocation = parts.map { p => // Ideally we can leave the partition location empty and let Hive metastore to set it. // However, Hive metastore is not case preserving and will generate wrong partition location // with lower cased partition column names. Here we set the default partition location // manually to avoid this problem. val partitionPath = p.storage.locationUri.map(uri => new Path(uri)).getOrElse { ExternalCatalogUtils.generatePartitionPath(p.spec, partitionColumnNames, tablePath) } p.copy(storage = p.storage.copy(locationUri = Some(partitionPath.toUri))) } val lowerCasedParts = partsWithLocation.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec))) client.createPartitions(db, table, lowerCasedParts, ignoreIfExists) } override def dropPartitions( db: String, table: String, parts: Seq[TablePartitionSpec], ignoreIfNotExists: Boolean, purge: Boolean, retainData: Boolean): Unit = withClient { requireTableExists(db, table) client.dropPartitions( db, table, parts.map(lowerCasePartitionSpec), ignoreIfNotExists, purge, retainData) } override def renamePartitions( db: String, table: String, specs: Seq[TablePartitionSpec], newSpecs: Seq[TablePartitionSpec]): Unit = withClient { client.renamePartitions( db, table, specs.map(lowerCasePartitionSpec), newSpecs.map(lowerCasePartitionSpec)) val tableMeta = getTable(db, table) val partitionColumnNames = tableMeta.partitionColumnNames // Hive metastore is not case preserving and keeps partition columns with lower cased names. // When Hive rename partition for managed tables, it will create the partition location with // a default path generate by the new spec with lower cased partition column names. This is // unexpected and we need to rename them manually and alter the partition location. val hasUpperCasePartitionColumn = partitionColumnNames.exists(col => col.toLowerCase != col) if (tableMeta.tableType == MANAGED && hasUpperCasePartitionColumn) { val tablePath = new Path(tableMeta.location) val fs = tablePath.getFileSystem(hadoopConf) val newParts = newSpecs.map { spec => val rightPath = renamePartitionDirectory(fs, tablePath, partitionColumnNames, spec) val partition = client.getPartition(db, table, lowerCasePartitionSpec(spec)) partition.copy(storage = partition.storage.copy(locationUri = Some(rightPath.toUri))) } alterPartitions(db, table, newParts) } } /** * Rename the partition directory w.r.t. the actual partition columns. * * It will recursively rename the partition directory from the first partition column, to be most * compatible with different file systems. e.g. in some file systems, renaming `a=1/b=2` to * `A=1/B=2` will result to `a=1/B=2`, while in some other file systems, the renaming works, but * will leave an empty directory `a=1`. */ private def renamePartitionDirectory( fs: FileSystem, tablePath: Path, partCols: Seq[String], newSpec: TablePartitionSpec): Path = { import ExternalCatalogUtils.getPartitionPathString var currentFullPath = tablePath partCols.foreach { col => val partValue = newSpec(col) val expectedPartitionString = getPartitionPathString(col, partValue) val expectedPartitionPath = new Path(currentFullPath, expectedPartitionString) if (fs.exists(expectedPartitionPath)) { // It is possible that some parental partition directories already exist or doesn't need to // be renamed. e.g. the partition columns are `a` and `B`, then we don't need to rename // `/table_path/a=1`. Or we already have a partition directory `A=1/B=2`, and we rename // another partition to `A=1/B=3`, then we will have `A=1/B=2` and `a=1/b=3`, and we should // just move `a=1/b=3` into `A=1` with new name `B=3`. } else { val actualPartitionString = getPartitionPathString(col.toLowerCase, partValue) val actualPartitionPath = new Path(currentFullPath, actualPartitionString) try { fs.rename(actualPartitionPath, expectedPartitionPath) } catch { case e: IOException => throw new SparkException("Unable to rename partition path from " + s"$actualPartitionPath to $expectedPartitionPath", e) } } currentFullPath = expectedPartitionPath } currentFullPath } override def alterPartitions( db: String, table: String, newParts: Seq[CatalogTablePartition]): Unit = withClient { val lowerCasedParts = newParts.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec))) // Note: Before altering table partitions in Hive, you *must* set the current database // to the one that contains the table of interest. Otherwise you will end up with the // most helpful error message ever: "Unable to alter partition. alter is not possible." // See HIVE-2742 for more detail. client.setCurrentDatabase(db) client.alterPartitions(db, table, lowerCasedParts) } override def getPartition( db: String, table: String, spec: TablePartitionSpec): CatalogTablePartition = withClient { val part = client.getPartition(db, table, lowerCasePartitionSpec(spec)) part.copy(spec = restorePartitionSpec(part.spec, getTable(db, table).partitionColumnNames)) } /** * Returns the specified partition or None if it does not exist. */ override def getPartitionOption( db: String, table: String, spec: TablePartitionSpec): Option[CatalogTablePartition] = withClient { client.getPartitionOption(db, table, lowerCasePartitionSpec(spec)).map { part => part.copy(spec = restorePartitionSpec(part.spec, getTable(db, table).partitionColumnNames)) } } /** * Returns the partition names from hive metastore for a given table in a database. */ override def listPartitionNames( db: String, table: String, partialSpec: Option[TablePartitionSpec] = None): Seq[String] = withClient { val catalogTable = getTable(db, table) val partColNameMap = buildLowerCasePartColNameMap(catalogTable).mapValues(escapePathName) val clientPartitionNames = client.getPartitionNames(catalogTable, partialSpec.map(lowerCasePartitionSpec)) clientPartitionNames.map { partitionPath => val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partitionPath) partSpec.map { case (partName, partValue) => partColNameMap(partName.toLowerCase) + "=" + escapePathName(partValue) }.mkString("/") } } /** * Returns the partitions from hive metastore for a given table in a database. */ override def listPartitions( db: String, table: String, partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = withClient { val partColNameMap = buildLowerCasePartColNameMap(getTable(db, table)) val res = client.getPartitions(db, table, partialSpec.map(lowerCasePartitionSpec)).map { part => part.copy(spec = restorePartitionSpec(part.spec, partColNameMap)) } partialSpec match { // This might be a bug of Hive: When the partition value inside the partial partition spec // contains dot, and we ask Hive to list partitions w.r.t. the partial partition spec, Hive // treats dot as matching any single character and may return more partitions than we // expected. Here we do an extra filter to drop unexpected partitions. case Some(spec) if spec.exists(_._2.contains(".")) => res.filter(p => isPartialPartitionSpec(spec, p.spec)) case _ => res } } override def listPartitionsByFilter( db: String, table: String, predicates: Seq[Expression], defaultTimeZoneId: String): Seq[CatalogTablePartition] = withClient { val rawTable = getRawTable(db, table) val catalogTable = restoreTableMetadata(rawTable) val partColNameMap = buildLowerCasePartColNameMap(catalogTable) val clientPrunedPartitions = client.getPartitionsByFilter(rawTable, predicates).map { part => part.copy(spec = restorePartitionSpec(part.spec, partColNameMap)) } prunePartitionsByFilter(catalogTable, clientPrunedPartitions, predicates, defaultTimeZoneId) } // -------------------------------------------------------------------------- // Functions // -------------------------------------------------------------------------- override protected def doCreateFunction( db: String, funcDefinition: CatalogFunction): Unit = withClient { requireDbExists(db) // Hive's metastore is case insensitive. However, Hive's createFunction does // not normalize the function name (unlike the getFunction part). So, // we are normalizing the function name. val functionName = funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT) requireFunctionNotExists(db, functionName) val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName) client.createFunction(db, funcDefinition.copy(identifier = functionIdentifier)) } override protected def doDropFunction(db: String, name: String): Unit = withClient { requireFunctionExists(db, name) client.dropFunction(db, name) } override protected def doAlterFunction( db: String, funcDefinition: CatalogFunction): Unit = withClient { requireDbExists(db) val functionName = funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT) requireFunctionExists(db, functionName) val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName) client.alterFunction(db, funcDefinition.copy(identifier = functionIdentifier)) } override protected def doRenameFunction( db: String, oldName: String, newName: String): Unit = withClient { requireFunctionExists(db, oldName) requireFunctionNotExists(db, newName) client.renameFunction(db, oldName, newName) } override def getFunction(db: String, funcName: String): CatalogFunction = withClient { requireFunctionExists(db, funcName) client.getFunction(db, funcName) } override def functionExists(db: String, funcName: String): Boolean = withClient { requireDbExists(db) client.functionExists(db, funcName) } override def listFunctions(db: String, pattern: String): Seq[String] = withClient { requireDbExists(db) client.listFunctions(db, pattern) } } object HiveExternalCatalog { val SPARK_SQL_PREFIX = "spark.sql." val DATASOURCE_PREFIX = SPARK_SQL_PREFIX + "sources." val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider" val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema" val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "." val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts" val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols" val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols" val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets" val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols" val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part." val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol." val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol." val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol." val STATISTICS_PREFIX = SPARK_SQL_PREFIX + "statistics." val STATISTICS_TOTAL_SIZE = STATISTICS_PREFIX + "totalSize" val STATISTICS_NUM_ROWS = STATISTICS_PREFIX + "numRows" val STATISTICS_COL_STATS_PREFIX = STATISTICS_PREFIX + "colStats." val TABLE_PARTITION_PROVIDER = SPARK_SQL_PREFIX + "partitionProvider" val TABLE_PARTITION_PROVIDER_CATALOG = "catalog" val TABLE_PARTITION_PROVIDER_FILESYSTEM = "filesystem" /** * Returns the fully qualified name used in table properties for a particular column stat. * For example, for column "mycol", and "min" stat, this should return * "spark.sql.statistics.colStats.mycol.min". */ private def columnStatKeyPropName(columnName: String, statKey: String): String = { STATISTICS_COL_STATS_PREFIX + columnName + "." + statKey } // A persisted data source table always store its schema in the catalog. private def getSchemaFromTableProperties(metadata: CatalogTable): StructType = { val errorMessage = "Could not read schema from the hive metastore because it is corrupted." val props = metadata.properties val schema = props.get(DATASOURCE_SCHEMA) if (schema.isDefined) { // Originally, we used `spark.sql.sources.schema` to store the schema of a data source table. // After SPARK-6024, we removed this flag. // Although we are not using `spark.sql.sources.schema` any more, we need to still support. DataType.fromJson(schema.get).asInstanceOf[StructType] } else if (props.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)).isEmpty) { // If there is no schema information in table properties, it means the schema of this table // was empty when saving into metastore, which is possible in older version(prior to 2.1) of // Spark. We should respect it. new StructType() } else { val numSchemaParts = props.get(DATASOURCE_SCHEMA_NUMPARTS) if (numSchemaParts.isDefined) { val parts = (0 until numSchemaParts.get.toInt).map { index => val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull if (part == null) { throw new AnalysisException(errorMessage + s" (missing part $index of the schema, ${numSchemaParts.get} parts are expected).") } part } // Stick all parts back to a single schema string. DataType.fromJson(parts.mkString).asInstanceOf[StructType] } else { throw new AnalysisException(errorMessage) } } } private def getColumnNamesByType( props: Map[String, String], colType: String, typeName: String): Seq[String] = { for { numCols <- props.get(s"spark.sql.sources.schema.num${colType.capitalize}Cols").toSeq index <- 0 until numCols.toInt } yield props.getOrElse( s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index", throw new AnalysisException( s"Corrupted $typeName in catalog: $numCols parts expected, but part $index is missing." ) ) } private def getPartitionColumnsFromTableProperties(metadata: CatalogTable): Seq[String] = { getColumnNamesByType(metadata.properties, "part", "partitioning columns") } private def getBucketSpecFromTableProperties(metadata: CatalogTable): Option[BucketSpec] = { metadata.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { numBuckets => BucketSpec( numBuckets.toInt, getColumnNamesByType(metadata.properties, "bucket", "bucketing columns"), getColumnNamesByType(metadata.properties, "sort", "sorting columns")) } } }
ajaysaini725/spark
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
Scala
apache-2.0
56,359
package com.mz.training.common.services.pagination import akka.actor.{Actor, ActorLogging, ActorRef, Props} import com.mz.training.common.repositories.{SelectCount, SelectPaging} import com.mz.training.common.services.{GetAllPagination, GetAllPaginationResult} import com.mz.training.domains.EntityId import scala.concurrent.duration._ import scala.util.Failure case object TimeOutMsg /** * Created by zemi on 28/12/2016. */ class GetAllPaginationActor[E <: EntityId](repository: ActorRef) extends Actor with ActorLogging { protected implicit val executorService = context.dispatcher val timeout = 1.9 seconds var senderAct: Option[ActorRef] = None var msg: Option[GetAllPagination[E]] = None var size: Option[Long] = None var result: List[E] = Nil override def receive: Receive = { case msg: GetAllPagination[E] => startProcess(msg) case msg: List[E] => processSelectResult(msg) case msg: Option[Long] => processCount(msg) case TimeOutMsg => { log.warning(s"${getClass.getCanonicalName} -> Timeout reached!") size = Some(0) result = List() msg.foreach(msg => senderAct.foreach(actor => { log.debug(s"${getClass.getCanonicalName} -> processResult going to return result") actor ! GetAllPaginationResult[E](msg.page, msg.sizePerPage, 0, result) })) context.become(processed) } case failure:akka.actor.Status.Failure => { log.error(s"${getClass.getCanonicalName} operation failed: ${failure.cause}") senderAct.foreach(actorRef => actorRef ! failure) context.become(processed) } } /** * GetAllPagination is in the processed state * * @return */ def processed: Receive = { case _: Any => log.error("Unsupported operation!!!") } /** * handle result of selecting * * @param msg - result */ private def processSelectResult(msg: List[E]): Unit = { log.info(s"${getClass.getCanonicalName}:processSelectResult(msg) ->") result = msg processResult(result, size) } /** * handle count result * * @param msg */ private def processCount(msg: Option[Long]): Unit = { log.info(s"${getClass.getCanonicalName}:processCount() -> msg: ${msg.toString}") msg match { case s: Some[Long] => size = s case None => size = Some(0) } processResult(result, size) } /** * Start selecting process * * @param msg */ private def startProcess(msg: GetAllPagination[E]): Unit = { senderAct = Some(sender) this.msg = Some(msg) val offset = if (msg.page > 0) (msg.page - 1) * getSizeParPage(msg.sizePerPage) else 0 val itemsPerPage = getSizeParPage(msg.sizePerPage) repository ! SelectCount() repository ! SelectPaging(offset, itemsPerPage) context.system.scheduler.scheduleOnce(timeout, self, TimeOutMsg) } /** * process result if all params are collected, result is sent back to the sender actor */ private def processResult(result: List[E], size: Option[Long]): Unit = { if (result.nonEmpty) { size.foreach(sz => msg.foreach(msg => senderAct.foreach(actor => { log.debug(s"${getClass.getCanonicalName} -> processResult going to return result") actor ! GetAllPaginationResult[E](msg.page, msg.sizePerPage, sz, result) context.become(processed) }) )) } } /** * resolve sizePerPage. If is size negative or zero it is returned 1 * * @param size * @return */ private def getSizeParPage(size: Int): Int = if (size <= 0) 1 else size } object GetAllPaginationActor { def props[E](repository: ActorRef): Props = Props(classOf[GetAllPaginationActor[E]], repository) }
michalzeman/angular2-training
akka-http-server/src/main/scala/com/mz/training/common/services/pagination/GetAllPaginationActor.scala
Scala
mit
3,740
package net.sansa_stack.inference.utils.graph import java.util.Comparator import com.google.common.collect.ComparisonChain import org.apache.jena.graph.Node /** * Definition of edge equivalence used for graph isomorphism detection. * * @author Lorenz Buehmann */ class EdgeEquivalenceComparator extends Comparator[LabeledEdge[Node, String]] { // def equivalenceHashcode(edge: LabeledEdge[Node], // context: org.jgrapht.Graph[Node, LabeledEdge[Node]]): Int = edge.hashCode() // // def equivalenceCompare(edge1: LabeledEdge[Node], edge2: LabeledEdge[Node], // context1: org.jgrapht.Graph[Node, LabeledEdge[Node]], // context2: org.jgrapht.Graph[Node, LabeledEdge[Node]]): Boolean = // (edge1.label.startsWith("?") && edge2.label.startsWith("?")) || // both predicates are variables // edge1.label.equals(edge2.label) // both URIs match override def compare(e1: LabeledEdge[Node, String], e2: LabeledEdge[Node, String]): Int = if ((e1.label.startsWith("?") && e2.label.startsWith("?")) || e1.label.equals(e2.label)) { 0 } else { ComparisonChain.start().compare(e1.label, e2.label).result() } }
SANSA-Stack/SANSA-RDF
sansa-inference/sansa-inference-common/src/main/scala/net/sansa_stack/inference/utils/graph/EdgeEquivalenceComparator.scala
Scala
apache-2.0
1,180
/* * Copyright 2015 Renaud Bruneliere * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.bruneli.scalaopt.core.linear import org.scalatest.{FlatSpec, Matchers} import com.github.bruneli.scalaopt.core._ import PrimalTableau.given import com.github.bruneli.scalaopt.core.variable.PositiveVariables /** * @author bruneli */ class PrimalTableauSpec extends FlatSpec with Matchers { "minimize" should "build a tableau from a linear function without any constraint" in { val tableau = given(PositiveVariables(0.0, 0.0, 0.0)) .minimize((x: ContinuousVariablesType) => 2.0 * x(0) + 1.0 * x(1) + 3.0 * x(2)) .create tableau.columns.size shouldBe 3 tableau.numberOfConstraints shouldBe 0 tableau.columns.collect().map(_.phase1Cost) should contain theSameElementsInOrderAs List(0.0, 0.0, 0.0) tableau.columns.collect().map(_.phase2Cost) should contain theSameElementsInOrderAs List(-2.0, -1.0, -3.0) tableau.rhs.phase1Cost shouldBe 0.0 tableau.rhs.phase2Cost shouldBe 0.0 } "maximize" should "build a tableau from a linear function without any constraint" in { val tableau = given(PositiveVariables(0.0, 0.0, 0.0)) .maximize((x: ContinuousVariablesType) => 2.0 * x(0) + 1.0 * x(1) + 3.0 * x(2)) .create tableau.columns.size shouldBe 3 tableau.numberOfConstraints shouldBe 0 tableau.columns.collect().map(_.phase1Cost) should contain theSameElementsInOrderAs List(0.0, 0.0, 0.0) tableau.columns.collect().map(_.phase2Cost) should contain theSameElementsInOrderAs List(2.0, 1.0, 3.0) tableau.rhs.phase1Cost shouldBe 0.0 tableau.rhs.phase2Cost shouldBe 0.0 } "subjectTo" should "add a set of linear constraints to the tableau" in { val tableau = given(PositiveVariables(0.0, 0.0, 0.0)) .minimize((x: ContinuousVariablesType) => 2.0 * x(0) + 1.0 * x(1) + 3.0 * x(2)) .subjectTo( ((x: ContinuousVariablesType) => x(0).x) ge 1.0, ((x: ContinuousVariablesType) => x(0) + x(1)) equ 3.0 ).create // 4 columns because of the 3 decision variables + 1 slack variable introduced by inequality constraint tableau.columns.size shouldBe 4 tableau.numberOfConstraints shouldBe 2 tableau.columns.collect().map(_.phase1Cost) should contain theSameElementsInOrderAs List(0.0, 0.0, 0.0, 0.0) tableau.columns.collect().map(_.phase2Cost) should contain theSameElementsInOrderAs List(-2.0, -1.0, -3.0, 0.0) // the "greater equal" inequality constraint introduces an excess variable with a -1 value tableau.columns.collect().map(_.constrains(0).x) should contain theSameElementsInOrderAs List(1.0, 0.0, 0.0, -1.0) tableau.columns.collect().map(_.constrains(1).x) should contain theSameElementsInOrderAs List(1.0, 1.0, 0.0, 0.0) tableau.rhs.phase1Cost shouldBe 0.0 tableau.rhs.phase2Cost shouldBe 0.0 tableau.rhs.constrains.toVector.map(_.x) should contain theSameElementsInOrderAs Vector(1.0, 3.0) } it should "throw an exception when a constraint exceeds objective function size" in { an[IllegalArgumentException] shouldBe thrownBy { given(PositiveVariables(0.0, 0.0, 0.0)) .minimize((x: ContinuousVariablesType) => 2.0 * x(0) + 1.0 * x(1) + 3.0 * x(2)) .subjectTo( ((x: ContinuousVariablesType) => x(0).x) ge 1.0, ((x: ContinuousVariablesType) => x(0) + x(1)) equ 3.0, ((x: ContinuousVariablesType) => x(2) + x(6)) le 5.0 ) } } }
bruneli/scalaopt
core/src/test/scala/com/github/bruneli/scalaopt/core/linear/PrimalTableauSpec.scala
Scala
apache-2.0
3,992
package org.jetbrains.plugins.scala.settings.annotations import com.intellij.psi.{PsiElement, PsiModifierListOwner} import org.jetbrains.plugins.scala.lang.psi.api.expr.ScUnderscoreSection import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScAnnotationsHolder, ScFunction, ScValue, ScVariable} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScModifierListOwner import org.jetbrains.plugins.scala.lang.psi.types.ScType import org.jetbrains.plugins.scala.lang.psi.types.result.Typeable import scala.util.matching.Regex /** * @author Pavel Fatin */ trait Declaration { def entity: Entity def visibility: Visibility def isImplicit: Boolean def isConstant: Boolean def hasUnitType: Boolean def typeMatches(patterns: Set[String]): Boolean def isAnnotatedWith(annotations: Set[String]): Boolean } object Declaration { private val AsteriskPattern = new Regex("(.*)\\\\*(.*)") def apply(element: PsiElement): Declaration = new PhysycalDeclaration(element) def apply(element: PsiElement, newVisibility: Visibility): Declaration = new PhysycalDeclaration(element) { override def visibility: Visibility = newVisibility } def apply(visibility: Visibility = Visibility.Default, isImplicit: Boolean = false, isConstant: Boolean = false, hasUnitType: Boolean = false): Declaration = SyntheticDeclaration(visibility, isImplicit, isConstant, hasUnitType) private class PhysycalDeclaration(element: PsiElement) extends Declaration { override def entity: Entity = element match { case _: ScValue => Entity.Value case _: ScVariable => Entity.Variable case _: ScParameter => Entity.Parameter case _: ScUnderscoreSection => Entity.UnderscoreParameter case _ => Entity.Method } override def visibility: Visibility = element match { case owner: ScModifierListOwner => if (owner.hasModifierPropertyScala("private")) Visibility.Private else if (owner.hasModifierPropertyScala("protected")) Visibility.Protected else Visibility.Default case owner: PsiModifierListOwner => if (owner.hasModifierProperty("public")) Visibility.Default else if (owner.hasModifierProperty("private")) Visibility.Private else Visibility.Protected case _ => Visibility.Default } override def isImplicit: Boolean = element match { case owner: ScModifierListOwner => owner.hasModifierPropertyScala("implicit") case _ => false } override def isConstant: Boolean = element match { case value: ScValue => value.hasModifierPropertyScala("final") case _ => false } override def hasUnitType: Boolean = element match { case f: ScFunction => f.hasUnitResultType case v: Typeable => v.getType().exists(_.isUnit) case _ => false } override def typeMatches(patterns: Set[String]): Boolean = element match { case v: Typeable => v.getType().exists(t => patterns.exists(matches(t, _))) case _ => false } override def isAnnotatedWith(annotations: Set[String]): Boolean = element match { case holder: ScAnnotationsHolder => annotations.exists(holder.hasAnnotation) case _ => false } } private def matches(t: ScType, pattern: String): Boolean = { val s = t.canonicalText.stripPrefix("_root_.") pattern match { case AsteriskPattern(prefix, suffix) => s.length > prefix.length + suffix.length && s.startsWith(prefix) && s.endsWith(suffix) case plainText => s == plainText } } private case class SyntheticDeclaration(visibility: Visibility, isImplicit: Boolean, isConstant: Boolean, hasUnitType: Boolean) extends Declaration { override def entity: Entity = Entity.Method override def typeMatches(patterns: Set[String]): Boolean = false override def isAnnotatedWith(annotations: Set[String]): Boolean = false } }
loskutov/intellij-scala
src/org/jetbrains/plugins/scala/settings/annotations/Declaration.scala
Scala
apache-2.0
4,139
/* * Copyright 2014 Cisco Systems, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cisco.oss.foundation.orchestration.scope.vmware import java.net.URL import com.cisco.oss.foundation.orchestration.scope.model.Module import com.cisco.oss.foundation.orchestration.scope.utils.ScopeUtils import com.vmware.vim25._ import com.vmware.vim25.mo._ import org.junit.{Ignore, Test} import scala.Predef._ /** * Created with IntelliJ IDEA. * User: igreenfi * Date: 2/9/14 * Time: 9:38 AM */ @Ignore class VMWareTest { val spec: HostConnectSpec = new HostConnectSpec() val compResSpec: ComputeResourceConfigSpec = new ComputeResourceConfigSpec() @Test def createMachineTest() { val si = new ServiceInstance(new URL("https://10.56.161.100/sdk"), "root", "master1234", true) val dcName: String = "NDS" val vmName: String = "vimasterVM" val memorySizeMB: Long = 500 val cupCount: Int = 1 val guestOsId: String = "rhel5Guest" val diskSizeKB: Long = 4.194e+7.toLong // mode: persistent|independent_persistent, independent_nonpersistent val diskMode: String = "persistent" val datastoreName: String = "CONDUCTOR2-local1 (1)" val netName: String = "VLAN860" val nicName: String = "Network Adapter 1" val rootFolder: Folder = si.getRootFolder val dc: Datacenter = new InventoryNavigator(rootFolder).searchManagedEntity("Datacenter", dcName).asInstanceOf[Datacenter] val rp: ResourcePool = new InventoryNavigator(dc).searchManagedEntities("ResourcePool")(0).asInstanceOf[ResourcePool] val vmFolder: Folder = dc.getVmFolder // create vm config spec val vmSpec: VirtualMachineConfigSpec = new VirtualMachineConfigSpec vmSpec.setName(vmName) vmSpec.setAnnotation("VirtualMachine Annotation") vmSpec.setMemoryMB(memorySizeMB) vmSpec.setNumCPUs(cupCount) vmSpec.setGuestId(guestOsId) // create virtual devices val cKey: Int = 1000 val scsiSpec: VirtualDeviceConfigSpec = createScsiSpec(cKey) val diskSpec: VirtualDeviceConfigSpec = createDiskSpec(datastoreName, cKey, diskSizeKB, diskMode) val nicSpec: VirtualDeviceConfigSpec = createNicSpec(netName, nicName) vmSpec.setDeviceChange(Array[VirtualDeviceConfigSpec](scsiSpec, diskSpec, nicSpec)) // create vm file info for the vmx file val vmfi: VirtualMachineFileInfo = new VirtualMachineFileInfo vmfi.setVmPathName("[" + datastoreName + "]") vmSpec.setFiles(vmfi) // call the createVM_Task method on the vm folder val task: Task = vmFolder.createVM_Task(vmSpec, rp, null) val result: String = task.waitForMe if (result eq Task.SUCCESS) { System.out.println("VM Created Sucessfully") } else { System.out.println("VM could not be created. ") } } private def createScsiSpec(cKey: Int): VirtualDeviceConfigSpec = { val scsiSpec: VirtualDeviceConfigSpec = new VirtualDeviceConfigSpec scsiSpec.setOperation(VirtualDeviceConfigSpecOperation.add) val scsiCtrl: VirtualLsiLogicController = new VirtualLsiLogicController scsiCtrl.setKey(cKey) scsiCtrl.setBusNumber(0) scsiCtrl.setSharedBus(VirtualSCSISharing.noSharing) scsiSpec.setDevice(scsiCtrl) return scsiSpec } private def createDiskSpec(dsName: String, cKey: Int, diskSizeKB: Long, diskMode: String): VirtualDeviceConfigSpec = { val diskSpec: VirtualDeviceConfigSpec = new VirtualDeviceConfigSpec diskSpec.setOperation(VirtualDeviceConfigSpecOperation.add) diskSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.create) val vd: VirtualDisk = new VirtualDisk vd.setCapacityInKB(diskSizeKB) diskSpec.setDevice(vd) vd.setKey(0) vd.setUnitNumber(0) vd.setControllerKey(cKey) val diskfileBacking: VirtualDiskFlatVer2BackingInfo = new VirtualDiskFlatVer2BackingInfo val fileName: String = "[" + dsName + "]" diskfileBacking.setFileName(fileName) diskfileBacking.setDiskMode(diskMode) diskfileBacking.setThinProvisioned(true) vd.setBacking(diskfileBacking) return diskSpec } private def createNicSpec(netName: String, nicName: String): VirtualDeviceConfigSpec = { val nicSpec: VirtualDeviceConfigSpec = new VirtualDeviceConfigSpec nicSpec.setOperation(VirtualDeviceConfigSpecOperation.add) val nic: VirtualEthernetCard = new VirtualPCNet32 val nicBacking: VirtualEthernetCardNetworkBackingInfo = new VirtualEthernetCardNetworkBackingInfo nicBacking.setDeviceName(netName) val info: Description = new Description info.setLabel(nicName) info.setSummary(netName) nic.setDeviceInfo(info) nic.setAddressType("generated") nic.setBacking(nicBacking) nic.setKey(0) nicSpec.setDevice(nic) return nicSpec } @Test def generatedCode() { //val datacenterName = "CONDUCTOR2-local1 (1)" val datacenterName = "NDS" val cloneName = "Junit-test" val si = new ServiceInstance(new URL("https://10.56.161.100/sdk"), "root", "master1234", true) val sc = si.getServerConnection() val rootFolder = si.getRootFolder() val baseTemplate :VirtualMachine = new InventoryNavigator(rootFolder).searchManagedEntity("VirtualMachine", "Cisco Centos 6.5").asInstanceOf[VirtualMachine] val resourcePool: Array[ManagedEntity] = new InventoryNavigator(rootFolder).searchManagedEntities("ResourcePool") // Start: CloneVM_Task val virtualMachine2 = new VirtualMachine(sc, baseTemplate.getMOR) //val dc = si.getSearchIndex().findByInventoryPath(datacenterName).asInstanceOf[Datacenter] val dc = new InventoryNavigator(rootFolder).searchManagedEntity("Datacenter", datacenterName).asInstanceOf[Datacenter] // dc = si.getSearchIndex().findByInventoryPath(s"[$datacenterName]").asInstanceOf[Datacenter] if(baseTemplate==null || dc ==null) { System.out.println("VirtualMachine or Datacenter path is NOT correct. Pls double check. ") return } //val networkShaper = new VirtualMachineNetworkShaperInfo val net0 = createNicSpec("VLAN859","eth0") val net1 = createNicSpec("VLAN860","eth1") val vmFolder = dc.getVmFolder() val newConfig = new VirtualMachineConfigSpec() newConfig.setNumCPUs(4) newConfig.setMemoryMB(4000l) newConfig.setDeviceChange(Array(net0,net1)) //newConfig.setNetworkShaper(networkShaper) val cloneSpec = new VirtualMachineCloneSpec() val virtualMachineRelocateSpec1: VirtualMachineRelocateSpec = new VirtualMachineRelocateSpec() virtualMachineRelocateSpec1.setPool(resourcePool.head.getMOR) cloneSpec.setLocation(virtualMachineRelocateSpec1) cloneSpec.setPowerOn(true) cloneSpec.setTemplate(false) cloneSpec.setConfig(newConfig) val task = baseTemplate.cloneVM_Task(vmFolder, cloneName, cloneSpec) System.out.println("Launching the VM clone task. It might take a while. Please wait for the result ...") val status = task.waitForMe() val runScript = new RunScriptAction() runScript.setScript("cat 'tttt' > test.txt") val alarmSpec = new AlarmSpec val action = new AlarmAction //action.s alarmSpec.setAction(action) si.getAlarmManager.createAlarm(task.getAssociatedManagedEntity, alarmSpec) /* val managedObjectReference2 = new ManagedObjectReference() managedObjectReference2.`type` = "Folder" managedObjectReference2.`val` = "group-v22" val folder4 = new Folder(sc, managedObjectReference2) val vmName = "Test-Izek" val virtualMachineCloneSpec = new VirtualMachineCloneSpec() val virtualMachineRelocateSpec = new VirtualMachineRelocateSpec() virtualMachineCloneSpec.setLocation(virtualMachineRelocateSpec) val datastore8 = new ManagedObjectReference() virtualMachineRelocateSpec.datastore = datastore8 datastore8.`type` = "Datastore" datastore8.`val` = "datastore-51" val pool9 = new ManagedObjectReference() virtualMachineRelocateSpec.pool = pool9 pool9.`type` = "ResourcePool" pool9.`val` = "resgroup-85" val host10 = new ManagedObjectReference() virtualMachineRelocateSpec.host = host10 host10.`type` = "HostSystem" host10.`val` = "host-50" val disks11 = new Array[VirtualMachineRelocateSpecDiskLocator](1) val disk12 = new VirtualMachineRelocateSpecDiskLocator() disks11(0) = disk12 disk12.diskId = 2000 val datastore13 = new ManagedObjectReference() disk12.datastore = datastore13 datastore13.`type` = "Datastore" datastore13.`val` = "datastore-51" virtualMachineCloneSpec.template = false val customization14 = new CustomizationSpec() virtualMachineCloneSpec.setCustomization(customization14) //spec6 = customization14 val options15 = new CustomizationLinuxOptions() customization14.setOptions(options15) //customization14 = options15 val identity16 = new CustomizationLinuxPrep() customization14.setIdentity(identity16) //customization14 = identity16 val hostName17 = new CustomizationFixedName() identity16.setHostName(hostName17) hostName17.name = "Izek-Test" identity16.domain = "il.nds.com" identity16.timeZone = "Asia/Jerusalem" identity16.hwClockUTC = true val globalIPSettings18 = new CustomizationGlobalIPSettings() customization14.setGlobalIPSettings(globalIPSettings18) //customization14 = globalIPSettings18 globalIPSettings18.dnsSuffixList = List("il.nds.com").toArray globalIPSettings18.dnsServerList = List("10.63.3.60").toArray val nicSettingMaps19 = new Array[CustomizationAdapterMapping](1) val nicSettingMap20 = new CustomizationAdapterMapping() nicSettingMaps19(0) = nicSettingMap20 val adapter21 = new CustomizationIPSettings() nicSettingMap20.setAdapter(adapter21) //nicSettingMap20 = adapter21 val ip22 = new CustomizationDhcpIpGenerator() adapter21.setIp(ip22) //adapter21 = ip22 adapter21.primaryWINS = "" adapter21.secondaryWINS = "" virtualMachineCloneSpec.powerOn = true virtualMachine2.cloneVM_Task(folder4, vmName, virtualMachineCloneSpec) // End: CloneVM_Task */ si.getServerConnection().logout() } @Test def testJ(){ val f = ScopeUtils.mapper.readValue("{\\n \\"name\\": \\"nds_umsui\\",\\n \\"version\\": \\"3.46.0-1\\",\\n \\"nodes\\": [\\"ndsconsoles\\"],\\n \\"file\\": {\\n \\"baseConfigProperties\\":[\\n \\"config.properties\\"\\n ],\\n \\"additionalValues\\":[\\n {\\n \\"key\\" : \\"upm::host\\",\\n \\"value\\" : \\"upm1\\"\\n }\\n ,{\\n \\"key\\" : \\"ndsconsole::version\\",\\n \\"value\\" : \\"3.45.1-SNAPSHOT\\"\\n }\\n ]\\n }\\n }", classOf[Module]) println(f) } }
foundation-runtime/orchestration
src/test/scala/com/cisco/oss/foundation/orchestration/scope/vmware/VMWareTest.scala
Scala
apache-2.0
11,470
/* * Copyright 2019 Spotify AB. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.spotify.scio.extra.bigquery import com.google.api.services.bigquery.model.TableSchema import com.spotify.scio.annotations.experimental import com.spotify.scio.bigquery.TableRow import org.apache.avro.Schema import org.apache.avro.generic.IndexedRecord import scala.jdk.CollectionConverters._ object AvroConverters extends ToTableRow with ToTableSchema { @experimental def toTableRow[T <: IndexedRecord](record: T): TableRow = { val row = new TableRow record.getSchema.getFields.asScala.foreach { field => Option(record.get(field.pos)).foreach { fieldValue => row.set(field.name, toTableRowField(fieldValue, field)) } } row } /** * Traverses all fields of the supplied avroSchema and converts it into * a TableSchema containing TableFieldSchemas. * * @param avroSchema * @return the equivalent BigQuery schema */ @experimental def toTableSchema(avroSchema: Schema): TableSchema = { val fields = getFieldSchemas(avroSchema) new TableSchema().setFields(fields.asJava) } final case class AvroConversionException( private val message: String, private val cause: Throwable = null ) extends Exception(message, cause) }
regadas/scio
scio-extra/src/main/scala/com/spotify/scio/extra/bigquery/AvroConverters.scala
Scala
apache-2.0
1,821
package org.zouzias.spark.lucenerdd.aws.dfvslucene import org.apache.spark.sql.{Row, SQLContext, SaveMode, SparkSession} import org.apache.spark.SparkConf import org.zouzias.spark.lucenerdd.aws.utils._ import org.zouzias.spark.lucenerdd.facets.FacetedLuceneRDD import org.zouzias.spark.lucenerdd._ import org.apache.spark.sql.functions._ import org.zouzias.spark.lucenerdd.logging.Logging /** * Dataframe vs LuceneRDD */ object DataFrameVsLuceneRDDExample extends Logging { val k = 10 val fieldName = "Country" def main(args: Array[String]) { // initialise spark context val conf = new SparkConf().setAppName(DataFrameVsLuceneRDDExample.getClass.getName) implicit val sparkSession: SparkSession = SparkSession.builder().config(conf).getOrCreate() val executorMemory = conf.get("spark.executor.memory") val executorCores = conf.get("spark.executor.cores") val executorInstances = conf.get("spark.executor.instances") log.info(s"Executor instances: ${executorInstances}") log.info(s"Executor cores: ${executorCores}") log.info(s"Executor memory: ${executorMemory}") logInfo("Loading Cities") val citiesDF = sparkSession.read.parquet("s3://recordlinkage/world-cities-maxmind.parquet") citiesDF.cache() val total = citiesDF.count() logInfo(s"${total} Cities loaded successfully") val dfStart = System.currentTimeMillis() val dfResults = citiesDF.groupBy(fieldName).count().sort(desc("count")).take(k) val dfEnd = System.currentTimeMillis() val luceneRDD = FacetedLuceneRDD(citiesDF.select(fieldName)) luceneRDD.cache() luceneRDD.count() val lucStart =System.currentTimeMillis() luceneRDD.facetQuery("*:*", fieldName, facetNum = k) val lucEnd =System.currentTimeMillis() println("=" * 20) println(s"DF time: ${ (dfEnd - dfStart) / 1000D } seconds") println("=" * 20) println(s"Lucene time: ${(lucEnd - lucStart) / 1000D} seconds") println("=" * 20) // linkedDF.write.mode(SaveMode.Overwrite).parquet(s"s3://spark-lucenerdd/timings/v0.0.20/dataframe-vs-lucenerdd-${today}.parquet") // terminate spark context sparkSession.stop() } def timeLuceneFacetedSearch(luceneRDD: FacetedLuceneRDD[Row], iters: Long, searchInfo: SparkInfo) (implicit sparkSession: SparkSession): Unit = { val timings = (1L until iters).map{ case _ => val start = System.currentTimeMillis() val luceneResults = luceneRDD.facetQuery("*:*", fieldName, k) val end = System.currentTimeMillis() Math.max(0L, end - start) } import sparkSession.sqlContext.implicits._ val timingsDF = timings.map(Timing(SearchType.TermQuery.toString, _, Utils.dayString(), Utils.Version)).toDF() timingsDF.write.mode(SaveMode.Append).parquet(s"s3://spark-lucenerdd/timings/timing-dfvslucene-${searchInfo.toString()}.parquet") } }
zouzias/spark-lucenerdd-aws
src/main/scala/org/zouzias/spark/lucenerdd/aws/dfvslucene/DataFrameVsLuceneRDDExample.scala
Scala
apache-2.0
2,908
object SCL5661 { val y: String = "text" def failfunc(x: Map[String, Int]) = 1 def failfunc(x: String) = "text" /*start*/failfunc(Map(y -> 1))/*end*/ } //Int
triggerNZ/intellij-scala
testdata/typeInference/bugs5/SCL5661.scala
Scala
apache-2.0
167
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.catalog import org.apache.hadoop.conf.Configuration import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, SimpleCatalystConf, TableIdentifier} import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.catalyst.plans.PlanTest import org.apache.spark.sql.catalyst.plans.logical.{Range, SubqueryAlias, View} class InMemorySessionCatalogSuite extends SessionCatalogSuite { protected val utils = new CatalogTestUtils { override val tableInputFormat: String = "com.fruit.eyephone.CameraInputFormat" override val tableOutputFormat: String = "com.fruit.eyephone.CameraOutputFormat" override val defaultProvider: String = "parquet" override def newEmptyCatalog(): ExternalCatalog = new InMemoryCatalog } } /** * Tests for [[SessionCatalog]] * * Note: many of the methods here are very similar to the ones in [[ExternalCatalogSuite]]. * This is because [[SessionCatalog]] and [[ExternalCatalog]] share many similar method * signatures but do not extend a common parent. This is largely by design but * unfortunately leads to very similar test code in two places. */ abstract class SessionCatalogSuite extends PlanTest { protected val utils: CatalogTestUtils protected val isHiveExternalCatalog = false import utils._ private def withBasicCatalog(f: SessionCatalog => Unit): Unit = { val catalog = new SessionCatalog(newBasicCatalog()) catalog.createDatabase(newDb("default"), ignoreIfExists = true) try { f(catalog) } finally { catalog.reset() } } private def withEmptyCatalog(f: SessionCatalog => Unit): Unit = { val catalog = new SessionCatalog(newEmptyCatalog()) catalog.createDatabase(newDb("default"), ignoreIfExists = true) try { f(catalog) } finally { catalog.reset() } } // -------------------------------------------------------------------------- // Databases // -------------------------------------------------------------------------- test("basic create and list databases") { withEmptyCatalog { catalog => catalog.createDatabase(newDb("default"), ignoreIfExists = true) assert(catalog.databaseExists("default")) assert(!catalog.databaseExists("testing")) assert(!catalog.databaseExists("testing2")) catalog.createDatabase(newDb("testing"), ignoreIfExists = false) assert(catalog.databaseExists("testing")) assert(catalog.listDatabases().toSet == Set("default", "testing")) catalog.createDatabase(newDb("testing2"), ignoreIfExists = false) assert(catalog.listDatabases().toSet == Set("default", "testing", "testing2")) assert(catalog.databaseExists("testing2")) assert(!catalog.databaseExists("does_not_exist")) } } def testInvalidName(func: (String) => Unit) { // scalastyle:off // non ascii characters are not allowed in the source code, so we disable the scalastyle. val name = "砖" // scalastyle:on val e = intercept[AnalysisException] { func(name) }.getMessage assert(e.contains(s"`$name` is not a valid name for tables/databases.")) } test("create databases using invalid names") { withEmptyCatalog { catalog => testInvalidName( name => catalog.createDatabase(newDb(name), ignoreIfExists = true)) } } test("get database when a database exists") { withBasicCatalog { catalog => val db1 = catalog.getDatabaseMetadata("db1") assert(db1.name == "db1") assert(db1.description.contains("db1")) } } test("get database should throw exception when the database does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.getDatabaseMetadata("db_that_does_not_exist") } } } test("list databases without pattern") { withBasicCatalog { catalog => assert(catalog.listDatabases().toSet == Set("default", "db1", "db2", "db3")) } } test("list databases with pattern") { withBasicCatalog { catalog => assert(catalog.listDatabases("db").toSet == Set.empty) assert(catalog.listDatabases("db*").toSet == Set("db1", "db2", "db3")) assert(catalog.listDatabases("*1").toSet == Set("db1")) assert(catalog.listDatabases("db2").toSet == Set("db2")) } } test("drop database") { withBasicCatalog { catalog => catalog.dropDatabase("db1", ignoreIfNotExists = false, cascade = false) assert(catalog.listDatabases().toSet == Set("default", "db2", "db3")) } } test("drop database when the database is not empty") { // Throw exception if there are functions left withBasicCatalog { catalog => catalog.externalCatalog.dropTable("db2", "tbl1", ignoreIfNotExists = false, purge = false) catalog.externalCatalog.dropTable("db2", "tbl2", ignoreIfNotExists = false, purge = false) intercept[AnalysisException] { catalog.dropDatabase("db2", ignoreIfNotExists = false, cascade = false) } } withBasicCatalog { catalog => // Throw exception if there are tables left catalog.externalCatalog.dropFunction("db2", "func1") intercept[AnalysisException] { catalog.dropDatabase("db2", ignoreIfNotExists = false, cascade = false) } } withBasicCatalog { catalog => // When cascade is true, it should drop them catalog.externalCatalog.dropDatabase("db2", ignoreIfNotExists = false, cascade = true) assert(catalog.listDatabases().toSet == Set("default", "db1", "db3")) } } test("drop database when the database does not exist") { withBasicCatalog { catalog => // TODO: fix this inconsistent between HiveExternalCatalog and InMemoryCatalog if (isHiveExternalCatalog) { val e = intercept[AnalysisException] { catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = false, cascade = false) }.getMessage assert(e.contains( "org.apache.hadoop.hive.metastore.api.NoSuchObjectException: db_that_does_not_exist")) } else { intercept[NoSuchDatabaseException] { catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = false, cascade = false) } } catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = true, cascade = false) } } test("drop current database and drop default database") { withBasicCatalog { catalog => catalog.setCurrentDatabase("db1") assert(catalog.getCurrentDatabase == "db1") catalog.dropDatabase("db1", ignoreIfNotExists = false, cascade = true) intercept[NoSuchDatabaseException] { catalog.createTable(newTable("tbl1", "db1"), ignoreIfExists = false) } catalog.setCurrentDatabase("default") assert(catalog.getCurrentDatabase == "default") intercept[AnalysisException] { catalog.dropDatabase("default", ignoreIfNotExists = false, cascade = true) } } } test("alter database") { withBasicCatalog { catalog => val db1 = catalog.getDatabaseMetadata("db1") // Note: alter properties here because Hive does not support altering other fields catalog.alterDatabase(db1.copy(properties = Map("k" -> "v3", "good" -> "true"))) val newDb1 = catalog.getDatabaseMetadata("db1") assert(db1.properties.isEmpty) assert(newDb1.properties.size == 2) assert(newDb1.properties.get("k") == Some("v3")) assert(newDb1.properties.get("good") == Some("true")) } } test("alter database should throw exception when the database does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.alterDatabase(newDb("unknown_db")) } } } test("get/set current database") { withBasicCatalog { catalog => assert(catalog.getCurrentDatabase == "default") catalog.setCurrentDatabase("db2") assert(catalog.getCurrentDatabase == "db2") intercept[NoSuchDatabaseException] { catalog.setCurrentDatabase("deebo") } catalog.createDatabase(newDb("deebo"), ignoreIfExists = false) catalog.setCurrentDatabase("deebo") assert(catalog.getCurrentDatabase == "deebo") } } // -------------------------------------------------------------------------- // Tables // -------------------------------------------------------------------------- test("create table") { withBasicCatalog { catalog => assert(catalog.externalCatalog.listTables("db1").isEmpty) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) catalog.createTable(newTable("tbl3", "db1"), ignoreIfExists = false) catalog.createTable(newTable("tbl3", "db2"), ignoreIfExists = false) assert(catalog.externalCatalog.listTables("db1").toSet == Set("tbl3")) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2", "tbl3")) // Create table without explicitly specifying database catalog.setCurrentDatabase("db1") catalog.createTable(newTable("tbl4"), ignoreIfExists = false) assert(catalog.externalCatalog.listTables("db1").toSet == Set("tbl3", "tbl4")) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2", "tbl3")) } } test("create tables using invalid names") { withEmptyCatalog { catalog => testInvalidName(name => catalog.createTable(newTable(name, "db1"), ignoreIfExists = false)) } } test("create table when database does not exist") { withBasicCatalog { catalog => // Creating table in non-existent database should always fail intercept[NoSuchDatabaseException] { catalog.createTable(newTable("tbl1", "does_not_exist"), ignoreIfExists = false) } intercept[NoSuchDatabaseException] { catalog.createTable(newTable("tbl1", "does_not_exist"), ignoreIfExists = true) } // Table already exists intercept[TableAlreadyExistsException] { catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = false) } catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = true) } } test("create temp table") { withBasicCatalog { catalog => val tempTable1 = Range(1, 10, 1, 10) val tempTable2 = Range(1, 20, 2, 10) catalog.createTempView("tbl1", tempTable1, overrideIfExists = false) catalog.createTempView("tbl2", tempTable2, overrideIfExists = false) assert(catalog.getTempView("tbl1") == Option(tempTable1)) assert(catalog.getTempView("tbl2") == Option(tempTable2)) assert(catalog.getTempView("tbl3").isEmpty) // Temporary table already exists intercept[TempTableAlreadyExistsException] { catalog.createTempView("tbl1", tempTable1, overrideIfExists = false) } // Temporary table already exists but we override it catalog.createTempView("tbl1", tempTable2, overrideIfExists = true) assert(catalog.getTempView("tbl1") == Option(tempTable2)) } } test("drop table") { withBasicCatalog { catalog => assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) catalog.dropTable(TableIdentifier("tbl1", Some("db2")), ignoreIfNotExists = false, purge = false) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl2")) // Drop table without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.dropTable(TableIdentifier("tbl2"), ignoreIfNotExists = false, purge = false) assert(catalog.externalCatalog.listTables("db2").isEmpty) } } test("drop table when database/table does not exist") { withBasicCatalog { catalog => // Should always throw exception when the database does not exist intercept[NoSuchDatabaseException] { catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), ignoreIfNotExists = false, purge = false) } intercept[NoSuchDatabaseException] { catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), ignoreIfNotExists = true, purge = false) } intercept[NoSuchTableException] { catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), ignoreIfNotExists = false, purge = false) } catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), ignoreIfNotExists = true, purge = false) } } test("drop temp table") { withBasicCatalog { catalog => val tempTable = Range(1, 10, 2, 10) catalog.createTempView("tbl1", tempTable, overrideIfExists = false) catalog.setCurrentDatabase("db2") assert(catalog.getTempView("tbl1") == Some(tempTable)) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) // If database is not specified, temp table should be dropped first catalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false) assert(catalog.getTempView("tbl1") == None) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) // If temp table does not exist, the table in the current database should be dropped catalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl2")) // If database is specified, temp tables are never dropped catalog.createTempView("tbl1", tempTable, overrideIfExists = false) catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = false) catalog.dropTable(TableIdentifier("tbl1", Some("db2")), ignoreIfNotExists = false, purge = false) assert(catalog.getTempView("tbl1") == Some(tempTable)) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl2")) } } test("rename table") { withBasicCatalog { catalog => assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) catalog.renameTable(TableIdentifier("tbl1", Some("db2")), TableIdentifier("tblone")) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tblone", "tbl2")) catalog.renameTable(TableIdentifier("tbl2", Some("db2")), TableIdentifier("tbltwo")) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tblone", "tbltwo")) // Rename table without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.renameTable(TableIdentifier("tbltwo"), TableIdentifier("table_two")) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tblone", "table_two")) // Renaming "db2.tblone" to "db1.tblones" should fail because databases don't match intercept[AnalysisException] { catalog.renameTable( TableIdentifier("tblone", Some("db2")), TableIdentifier("tblones", Some("db1"))) } // The new table already exists intercept[TableAlreadyExistsException] { catalog.renameTable( TableIdentifier("tblone", Some("db2")), TableIdentifier("table_two")) } } } test("rename tables to an invalid name") { withBasicCatalog { catalog => testInvalidName( name => catalog.renameTable(TableIdentifier("tbl1", Some("db2")), TableIdentifier(name))) } } test("rename table when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.renameTable(TableIdentifier("tbl1", Some("unknown_db")), TableIdentifier("tbl2")) } intercept[NoSuchTableException] { catalog.renameTable(TableIdentifier("unknown_table", Some("db2")), TableIdentifier("tbl2")) } } } test("rename temp table") { withBasicCatalog { catalog => val tempTable = Range(1, 10, 2, 10) catalog.createTempView("tbl1", tempTable, overrideIfExists = false) catalog.setCurrentDatabase("db2") assert(catalog.getTempView("tbl1") == Option(tempTable)) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) // If database is not specified, temp table should be renamed first catalog.renameTable(TableIdentifier("tbl1"), TableIdentifier("tbl3")) assert(catalog.getTempView("tbl1").isEmpty) assert(catalog.getTempView("tbl3") == Option(tempTable)) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2")) // If database is specified, temp tables are never renamed catalog.renameTable(TableIdentifier("tbl2", Some("db2")), TableIdentifier("tbl4")) assert(catalog.getTempView("tbl3") == Option(tempTable)) assert(catalog.getTempView("tbl4").isEmpty) assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl4")) } } test("alter table") { withBasicCatalog { catalog => val tbl1 = catalog.externalCatalog.getTable("db2", "tbl1") catalog.alterTable(tbl1.copy(properties = Map("toh" -> "frem"))) val newTbl1 = catalog.externalCatalog.getTable("db2", "tbl1") assert(!tbl1.properties.contains("toh")) assert(newTbl1.properties.size == tbl1.properties.size + 1) assert(newTbl1.properties.get("toh") == Some("frem")) // Alter table without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.alterTable(tbl1.copy(identifier = TableIdentifier("tbl1"))) val newestTbl1 = catalog.externalCatalog.getTable("db2", "tbl1") // For hive serde table, hive metastore will set transient_lastDdlTime in table's properties, // and its value will be modified, here we ignore it when comparing the two tables. assert(newestTbl1.copy(properties = Map.empty) == tbl1.copy(properties = Map.empty)) } } test("alter table when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.alterTable(newTable("tbl1", "unknown_db")) } intercept[NoSuchTableException] { catalog.alterTable(newTable("unknown_table", "db2")) } } } test("get table") { withBasicCatalog { catalog => assert(catalog.getTableMetadata(TableIdentifier("tbl1", Some("db2"))) == catalog.externalCatalog.getTable("db2", "tbl1")) // Get table without explicitly specifying database catalog.setCurrentDatabase("db2") assert(catalog.getTableMetadata(TableIdentifier("tbl1")) == catalog.externalCatalog.getTable("db2", "tbl1")) } } test("get table when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.getTableMetadata(TableIdentifier("tbl1", Some("unknown_db"))) } intercept[NoSuchTableException] { catalog.getTableMetadata(TableIdentifier("unknown_table", Some("db2"))) } } } test("get option of table metadata") { withBasicCatalog { catalog => assert(catalog.getTableMetadataOption(TableIdentifier("tbl1", Some("db2"))) == Option(catalog.externalCatalog.getTable("db2", "tbl1"))) assert(catalog.getTableMetadataOption(TableIdentifier("unknown_table", Some("db2"))).isEmpty) intercept[NoSuchDatabaseException] { catalog.getTableMetadataOption(TableIdentifier("tbl1", Some("unknown_db"))) } } } test("lookup table relation") { withBasicCatalog { catalog => val tempTable1 = Range(1, 10, 1, 10) val metastoreTable1 = catalog.externalCatalog.getTable("db2", "tbl1") catalog.createTempView("tbl1", tempTable1, overrideIfExists = false) catalog.setCurrentDatabase("db2") // If we explicitly specify the database, we'll look up the relation in that database assert(catalog.lookupRelation(TableIdentifier("tbl1", Some("db2"))).children.head .asInstanceOf[CatalogRelation].tableMeta == metastoreTable1) // Otherwise, we'll first look up a temporary table with the same name assert(catalog.lookupRelation(TableIdentifier("tbl1")) == SubqueryAlias("tbl1", tempTable1)) // Then, if that does not exist, look up the relation in the current database catalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false) assert(catalog.lookupRelation(TableIdentifier("tbl1")).children.head .asInstanceOf[CatalogRelation].tableMeta == metastoreTable1) } } test("look up view relation") { withBasicCatalog { catalog => val metadata = catalog.externalCatalog.getTable("db3", "view1") catalog.setCurrentDatabase("default") // Look up a view. assert(metadata.viewText.isDefined) val view = View(desc = metadata, output = metadata.schema.toAttributes, child = CatalystSqlParser.parsePlan(metadata.viewText.get)) comparePlans(catalog.lookupRelation(TableIdentifier("view1", Some("db3"))), SubqueryAlias("view1", view)) // Look up a view using current database of the session catalog. catalog.setCurrentDatabase("db3") comparePlans(catalog.lookupRelation(TableIdentifier("view1")), SubqueryAlias("view1", view)) } } test("table exists") { withBasicCatalog { catalog => assert(catalog.tableExists(TableIdentifier("tbl1", Some("db2")))) assert(catalog.tableExists(TableIdentifier("tbl2", Some("db2")))) assert(!catalog.tableExists(TableIdentifier("tbl3", Some("db2")))) assert(!catalog.tableExists(TableIdentifier("tbl1", Some("db1")))) assert(!catalog.tableExists(TableIdentifier("tbl2", Some("db1")))) // If database is explicitly specified, do not check temporary tables val tempTable = Range(1, 10, 1, 10) assert(!catalog.tableExists(TableIdentifier("tbl3", Some("db2")))) // If database is not explicitly specified, check the current database catalog.setCurrentDatabase("db2") assert(catalog.tableExists(TableIdentifier("tbl1"))) assert(catalog.tableExists(TableIdentifier("tbl2"))) catalog.createTempView("tbl3", tempTable, overrideIfExists = false) // tableExists should not check temp view. assert(!catalog.tableExists(TableIdentifier("tbl3"))) } } test("getTempViewOrPermanentTableMetadata on temporary views") { withBasicCatalog { catalog => val tempTable = Range(1, 10, 2, 10) intercept[NoSuchTableException] { catalog.getTempViewOrPermanentTableMetadata(TableIdentifier("view1")) }.getMessage intercept[NoSuchTableException] { catalog.getTempViewOrPermanentTableMetadata(TableIdentifier("view1", Some("default"))) }.getMessage catalog.createTempView("view1", tempTable, overrideIfExists = false) assert(catalog.getTempViewOrPermanentTableMetadata( TableIdentifier("view1")).identifier.table == "view1") assert(catalog.getTempViewOrPermanentTableMetadata( TableIdentifier("view1")).schema(0).name == "id") intercept[NoSuchTableException] { catalog.getTempViewOrPermanentTableMetadata(TableIdentifier("view1", Some("default"))) }.getMessage } } test("list tables without pattern") { withBasicCatalog { catalog => val tempTable = Range(1, 10, 2, 10) catalog.createTempView("tbl1", tempTable, overrideIfExists = false) catalog.createTempView("tbl4", tempTable, overrideIfExists = false) assert(catalog.listTables("db1").toSet == Set(TableIdentifier("tbl1"), TableIdentifier("tbl4"))) assert(catalog.listTables("db2").toSet == Set(TableIdentifier("tbl1"), TableIdentifier("tbl4"), TableIdentifier("tbl1", Some("db2")), TableIdentifier("tbl2", Some("db2")))) intercept[NoSuchDatabaseException] { catalog.listTables("unknown_db") } } } test("list tables with pattern") { withBasicCatalog { catalog => val tempTable = Range(1, 10, 2, 10) catalog.createTempView("tbl1", tempTable, overrideIfExists = false) catalog.createTempView("tbl4", tempTable, overrideIfExists = false) assert(catalog.listTables("db1", "*").toSet == catalog.listTables("db1").toSet) assert(catalog.listTables("db2", "*").toSet == catalog.listTables("db2").toSet) assert(catalog.listTables("db2", "tbl*").toSet == Set(TableIdentifier("tbl1"), TableIdentifier("tbl4"), TableIdentifier("tbl1", Some("db2")), TableIdentifier("tbl2", Some("db2")))) assert(catalog.listTables("db2", "*1").toSet == Set(TableIdentifier("tbl1"), TableIdentifier("tbl1", Some("db2")))) intercept[NoSuchDatabaseException] { catalog.listTables("unknown_db", "*") } } } // -------------------------------------------------------------------------- // Partitions // -------------------------------------------------------------------------- test("basic create and list partitions") { withEmptyCatalog { catalog => catalog.createDatabase(newDb("mydb"), ignoreIfExists = false) catalog.createTable(newTable("tbl", "mydb"), ignoreIfExists = false) catalog.createPartitions( TableIdentifier("tbl", Some("mydb")), Seq(part1, part2), ignoreIfExists = false) assert(catalogPartitionsEqual( catalog.externalCatalog.listPartitions("mydb", "tbl"), part1, part2)) // Create partitions without explicitly specifying database catalog.setCurrentDatabase("mydb") catalog.createPartitions( TableIdentifier("tbl"), Seq(partWithMixedOrder), ignoreIfExists = false) assert(catalogPartitionsEqual( catalog.externalCatalog.listPartitions("mydb", "tbl"), part1, part2, partWithMixedOrder)) } } test("create partitions when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.createPartitions( TableIdentifier("tbl1", Some("unknown_db")), Seq(), ignoreIfExists = false) } intercept[NoSuchTableException] { catalog.createPartitions( TableIdentifier("does_not_exist", Some("db2")), Seq(), ignoreIfExists = false) } } } test("create partitions that already exist") { withBasicCatalog { catalog => intercept[AnalysisException] { catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1), ignoreIfExists = false) } catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1), ignoreIfExists = true) } } test("create partitions with invalid part spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1, partWithLessColumns), ignoreIfExists = false) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1, partWithMoreColumns), ignoreIfExists = true) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(partWithUnknownColumns, part1), ignoreIfExists = true) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(partWithEmptyValue, part1), ignoreIfExists = true) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("drop partitions") { withBasicCatalog { catalog => assert(catalogPartitionsEqual( catalog.externalCatalog.listPartitions("db2", "tbl2"), part1, part2)) catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1.spec), ignoreIfNotExists = false, purge = false, retainData = false) assert(catalogPartitionsEqual( catalog.externalCatalog.listPartitions("db2", "tbl2"), part2)) // Drop partitions without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.dropPartitions( TableIdentifier("tbl2"), Seq(part2.spec), ignoreIfNotExists = false, purge = false, retainData = false) assert(catalog.externalCatalog.listPartitions("db2", "tbl2").isEmpty) // Drop multiple partitions at once catalog.createPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1, part2), ignoreIfExists = false) assert(catalogPartitionsEqual( catalog.externalCatalog.listPartitions("db2", "tbl2"), part1, part2)) catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1.spec, part2.spec), ignoreIfNotExists = false, purge = false, retainData = false) assert(catalog.externalCatalog.listPartitions("db2", "tbl2").isEmpty) } } test("drop partitions when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.dropPartitions( TableIdentifier("tbl1", Some("unknown_db")), Seq(), ignoreIfNotExists = false, purge = false, retainData = false) } intercept[NoSuchTableException] { catalog.dropPartitions( TableIdentifier("does_not_exist", Some("db2")), Seq(), ignoreIfNotExists = false, purge = false, retainData = false) } } } test("drop partitions that do not exist") { withBasicCatalog { catalog => intercept[AnalysisException] { catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part3.spec), ignoreIfNotExists = false, purge = false, retainData = false) } catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(part3.spec), ignoreIfNotExists = true, purge = false, retainData = false) } } test("drop partitions with invalid partition spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(partWithMoreColumns.spec), ignoreIfNotExists = false, purge = false, retainData = false) } assert(e.getMessage.contains( "Partition spec is invalid. The spec (a, b, c) must be contained within " + "the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(partWithUnknownColumns.spec), ignoreIfNotExists = false, purge = false, retainData = false) } assert(e.getMessage.contains( "Partition spec is invalid. The spec (a, unknown) must be contained within " + "the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.dropPartitions( TableIdentifier("tbl2", Some("db2")), Seq(partWithEmptyValue.spec, part1.spec), ignoreIfNotExists = false, purge = false, retainData = false) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("get partition") { withBasicCatalog { catalog => assert(catalog.getPartition( TableIdentifier("tbl2", Some("db2")), part1.spec).spec == part1.spec) assert(catalog.getPartition( TableIdentifier("tbl2", Some("db2")), part2.spec).spec == part2.spec) // Get partition without explicitly specifying database catalog.setCurrentDatabase("db2") assert(catalog.getPartition(TableIdentifier("tbl2"), part1.spec).spec == part1.spec) assert(catalog.getPartition(TableIdentifier("tbl2"), part2.spec).spec == part2.spec) // Get non-existent partition intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl2"), part3.spec) } } } test("get partition when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.getPartition(TableIdentifier("tbl1", Some("unknown_db")), part1.spec) } intercept[NoSuchTableException] { catalog.getPartition(TableIdentifier("does_not_exist", Some("db2")), part1.spec) } } } test("get partition with invalid partition spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithLessColumns.spec) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithMoreColumns.spec) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithUnknownColumns.spec) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithEmptyValue.spec) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("rename partitions") { withBasicCatalog { catalog => val newPart1 = part1.copy(spec = Map("a" -> "100", "b" -> "101")) val newPart2 = part2.copy(spec = Map("a" -> "200", "b" -> "201")) val newSpecs = Seq(newPart1.spec, newPart2.spec) catalog.renamePartitions( TableIdentifier("tbl2", Some("db2")), Seq(part1.spec, part2.spec), newSpecs) assert(catalog.getPartition( TableIdentifier("tbl2", Some("db2")), newPart1.spec).spec === newPart1.spec) assert(catalog.getPartition( TableIdentifier("tbl2", Some("db2")), newPart2.spec).spec === newPart2.spec) intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec) } intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec) } // Rename partitions without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.renamePartitions(TableIdentifier("tbl2"), newSpecs, Seq(part1.spec, part2.spec)) assert(catalog.getPartition(TableIdentifier("tbl2"), part1.spec).spec === part1.spec) assert(catalog.getPartition(TableIdentifier("tbl2"), part2.spec).spec === part2.spec) intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl2"), newPart1.spec) } intercept[AnalysisException] { catalog.getPartition(TableIdentifier("tbl2"), newPart2.spec) } } } test("rename partitions when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.renamePartitions( TableIdentifier("tbl1", Some("unknown_db")), Seq(part1.spec), Seq(part2.spec)) } intercept[NoSuchTableException] { catalog.renamePartitions( TableIdentifier("does_not_exist", Some("db2")), Seq(part1.spec), Seq(part2.spec)) } } } test("rename partition with invalid partition spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.renamePartitions( TableIdentifier("tbl1", Some("db2")), Seq(part1.spec), Seq(partWithLessColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.renamePartitions( TableIdentifier("tbl1", Some("db2")), Seq(part1.spec), Seq(partWithMoreColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.renamePartitions( TableIdentifier("tbl1", Some("db2")), Seq(part1.spec), Seq(partWithUnknownColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.renamePartitions( TableIdentifier("tbl1", Some("db2")), Seq(part1.spec), Seq(partWithEmptyValue.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("alter partitions") { withBasicCatalog { catalog => val newLocation = newUriForDatabase() // Alter but keep spec the same val oldPart1 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec) val oldPart2 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec) catalog.alterPartitions(TableIdentifier("tbl2", Some("db2")), Seq( oldPart1.copy(storage = storageFormat.copy(locationUri = Some(newLocation))), oldPart2.copy(storage = storageFormat.copy(locationUri = Some(newLocation))))) val newPart1 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec) val newPart2 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec) assert(newPart1.storage.locationUri == Some(newLocation)) assert(newPart2.storage.locationUri == Some(newLocation)) assert(oldPart1.storage.locationUri != Some(newLocation)) assert(oldPart2.storage.locationUri != Some(newLocation)) // Alter partitions without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.alterPartitions(TableIdentifier("tbl2"), Seq(oldPart1, oldPart2)) val newerPart1 = catalog.getPartition(TableIdentifier("tbl2"), part1.spec) val newerPart2 = catalog.getPartition(TableIdentifier("tbl2"), part2.spec) assert(oldPart1.storage.locationUri == newerPart1.storage.locationUri) assert(oldPart2.storage.locationUri == newerPart2.storage.locationUri) // Alter but change spec, should fail because new partition specs do not exist yet val badPart1 = part1.copy(spec = Map("a" -> "v1", "b" -> "v2")) val badPart2 = part2.copy(spec = Map("a" -> "v3", "b" -> "v4")) intercept[AnalysisException] { catalog.alterPartitions(TableIdentifier("tbl2", Some("db2")), Seq(badPart1, badPart2)) } } } test("alter partitions when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.alterPartitions(TableIdentifier("tbl1", Some("unknown_db")), Seq(part1)) } intercept[NoSuchTableException] { catalog.alterPartitions(TableIdentifier("does_not_exist", Some("db2")), Seq(part1)) } } } test("alter partition with invalid partition spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithLessColumns)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithMoreColumns)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithUnknownColumns)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " + "the partition spec (a, b) defined in table '`db2`.`tbl1`'")) e = intercept[AnalysisException] { catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithEmptyValue)) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("list partition names") { withBasicCatalog { catalog => val expectedPartitionNames = Seq("a=1/b=2", "a=3/b=4") assert(catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2"))) == expectedPartitionNames) // List partition names without explicitly specifying database catalog.setCurrentDatabase("db2") assert(catalog.listPartitionNames(TableIdentifier("tbl2")) == expectedPartitionNames) } } test("list partition names with partial partition spec") { withBasicCatalog { catalog => assert( catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")), Some(Map("a" -> "1"))) == Seq("a=1/b=2")) } } test("list partition names with invalid partial partition spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")), Some(partWithMoreColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must be " + "contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")), Some(partWithUnknownColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must be " + "contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")), Some(partWithEmptyValue.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("list partitions") { withBasicCatalog { catalog => assert(catalogPartitionsEqual( catalog.listPartitions(TableIdentifier("tbl2", Some("db2"))), part1, part2)) // List partitions without explicitly specifying database catalog.setCurrentDatabase("db2") assert(catalogPartitionsEqual(catalog.listPartitions(TableIdentifier("tbl2")), part1, part2)) } } test("list partitions with partial partition spec") { withBasicCatalog { catalog => assert(catalogPartitionsEqual( catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(Map("a" -> "1"))), part1)) } } test("list partitions with invalid partial partition spec") { withBasicCatalog { catalog => var e = intercept[AnalysisException] { catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(partWithMoreColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must be " + "contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(partWithUnknownColumns.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must be " + "contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'")) e = intercept[AnalysisException] { catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(partWithEmptyValue.spec)) } assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " + "empty partition column value")) } } test("list partitions when database/table does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.listPartitions(TableIdentifier("tbl1", Some("unknown_db"))) } intercept[NoSuchTableException] { catalog.listPartitions(TableIdentifier("does_not_exist", Some("db2"))) } } } private def catalogPartitionsEqual( actualParts: Seq[CatalogTablePartition], expectedParts: CatalogTablePartition*): Boolean = { // ExternalCatalog may set a default location for partitions, here we ignore the partition // location when comparing them. // And for hive serde table, hive metastore will set some values(e.g.transient_lastDdlTime) // in table's parameters and storage's properties, here we also ignore them. val actualPartsNormalize = actualParts.map(p => p.copy(parameters = Map.empty, storage = p.storage.copy( properties = Map.empty, locationUri = None, serde = None))).toSet val expectedPartsNormalize = expectedParts.map(p => p.copy(parameters = Map.empty, storage = p.storage.copy( properties = Map.empty, locationUri = None, serde = None))).toSet actualPartsNormalize == expectedPartsNormalize } // -------------------------------------------------------------------------- // Functions // -------------------------------------------------------------------------- test("basic create and list functions") { withEmptyCatalog { catalog => catalog.createDatabase(newDb("mydb"), ignoreIfExists = false) catalog.createFunction(newFunc("myfunc", Some("mydb")), ignoreIfExists = false) assert(catalog.externalCatalog.listFunctions("mydb", "*").toSet == Set("myfunc")) // Create function without explicitly specifying database catalog.setCurrentDatabase("mydb") catalog.createFunction(newFunc("myfunc2"), ignoreIfExists = false) assert(catalog.externalCatalog.listFunctions("mydb", "*").toSet == Set("myfunc", "myfunc2")) } } test("create function when database does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.createFunction( newFunc("func5", Some("does_not_exist")), ignoreIfExists = false) } } } test("create function that already exists") { withBasicCatalog { catalog => intercept[FunctionAlreadyExistsException] { catalog.createFunction(newFunc("func1", Some("db2")), ignoreIfExists = false) } catalog.createFunction(newFunc("func1", Some("db2")), ignoreIfExists = true) } } test("create temp function") { withBasicCatalog { catalog => val tempFunc1 = (e: Seq[Expression]) => e.head val tempFunc2 = (e: Seq[Expression]) => e.last val info1 = new ExpressionInfo("tempFunc1", "temp1") val info2 = new ExpressionInfo("tempFunc2", "temp2") catalog.createTempFunction("temp1", info1, tempFunc1, ignoreIfExists = false) catalog.createTempFunction("temp2", info2, tempFunc2, ignoreIfExists = false) val arguments = Seq(Literal(1), Literal(2), Literal(3)) assert(catalog.lookupFunction(FunctionIdentifier("temp1"), arguments) === Literal(1)) assert(catalog.lookupFunction(FunctionIdentifier("temp2"), arguments) === Literal(3)) // Temporary function does not exist. intercept[NoSuchFunctionException] { catalog.lookupFunction(FunctionIdentifier("temp3"), arguments) } val tempFunc3 = (e: Seq[Expression]) => Literal(e.size) val info3 = new ExpressionInfo("tempFunc3", "temp1") // Temporary function already exists intercept[TempFunctionAlreadyExistsException] { catalog.createTempFunction("temp1", info3, tempFunc3, ignoreIfExists = false) } // Temporary function is overridden catalog.createTempFunction("temp1", info3, tempFunc3, ignoreIfExists = true) assert( catalog.lookupFunction( FunctionIdentifier("temp1"), arguments) === Literal(arguments.length)) } } test("isTemporaryFunction") { withBasicCatalog { catalog => // Returns false when the function does not exist assert(!catalog.isTemporaryFunction(FunctionIdentifier("temp1"))) val tempFunc1 = (e: Seq[Expression]) => e.head val info1 = new ExpressionInfo("tempFunc1", "temp1") catalog.createTempFunction("temp1", info1, tempFunc1, ignoreIfExists = false) // Returns true when the function is temporary assert(catalog.isTemporaryFunction(FunctionIdentifier("temp1"))) // Returns false when the function is permanent assert(catalog.externalCatalog.listFunctions("db2", "*").toSet == Set("func1")) assert(!catalog.isTemporaryFunction(FunctionIdentifier("func1", Some("db2")))) assert(!catalog.isTemporaryFunction(FunctionIdentifier("db2.func1"))) catalog.setCurrentDatabase("db2") assert(!catalog.isTemporaryFunction(FunctionIdentifier("func1"))) // Returns false when the function is built-in or hive assert(FunctionRegistry.builtin.functionExists("sum")) assert(!catalog.isTemporaryFunction(FunctionIdentifier("sum"))) assert(!catalog.isTemporaryFunction(FunctionIdentifier("histogram_numeric"))) } } test("drop function") { withBasicCatalog { catalog => assert(catalog.externalCatalog.listFunctions("db2", "*").toSet == Set("func1")) catalog.dropFunction( FunctionIdentifier("func1", Some("db2")), ignoreIfNotExists = false) assert(catalog.externalCatalog.listFunctions("db2", "*").isEmpty) // Drop function without explicitly specifying database catalog.setCurrentDatabase("db2") catalog.createFunction(newFunc("func2", Some("db2")), ignoreIfExists = false) assert(catalog.externalCatalog.listFunctions("db2", "*").toSet == Set("func2")) catalog.dropFunction(FunctionIdentifier("func2"), ignoreIfNotExists = false) assert(catalog.externalCatalog.listFunctions("db2", "*").isEmpty) } } test("drop function when database/function does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.dropFunction( FunctionIdentifier("something", Some("unknown_db")), ignoreIfNotExists = false) } intercept[NoSuchFunctionException] { catalog.dropFunction(FunctionIdentifier("does_not_exist"), ignoreIfNotExists = false) } catalog.dropFunction(FunctionIdentifier("does_not_exist"), ignoreIfNotExists = true) } } test("drop temp function") { withBasicCatalog { catalog => val info = new ExpressionInfo("tempFunc", "func1") val tempFunc = (e: Seq[Expression]) => e.head catalog.createTempFunction("func1", info, tempFunc, ignoreIfExists = false) val arguments = Seq(Literal(1), Literal(2), Literal(3)) assert(catalog.lookupFunction(FunctionIdentifier("func1"), arguments) === Literal(1)) catalog.dropTempFunction("func1", ignoreIfNotExists = false) intercept[NoSuchFunctionException] { catalog.lookupFunction(FunctionIdentifier("func1"), arguments) } intercept[NoSuchTempFunctionException] { catalog.dropTempFunction("func1", ignoreIfNotExists = false) } catalog.dropTempFunction("func1", ignoreIfNotExists = true) } } test("get function") { withBasicCatalog { catalog => val expected = CatalogFunction(FunctionIdentifier("func1", Some("db2")), funcClass, Seq.empty[FunctionResource]) assert(catalog.getFunctionMetadata(FunctionIdentifier("func1", Some("db2"))) == expected) // Get function without explicitly specifying database catalog.setCurrentDatabase("db2") assert(catalog.getFunctionMetadata(FunctionIdentifier("func1")) == expected) } } test("get function when database/function does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.getFunctionMetadata(FunctionIdentifier("func1", Some("unknown_db"))) } intercept[NoSuchFunctionException] { catalog.getFunctionMetadata(FunctionIdentifier("does_not_exist", Some("db2"))) } } } test("lookup temp function") { withBasicCatalog { catalog => val info1 = new ExpressionInfo("tempFunc1", "func1") val tempFunc1 = (e: Seq[Expression]) => e.head catalog.createTempFunction("func1", info1, tempFunc1, ignoreIfExists = false) assert(catalog.lookupFunction( FunctionIdentifier("func1"), Seq(Literal(1), Literal(2), Literal(3))) == Literal(1)) catalog.dropTempFunction("func1", ignoreIfNotExists = false) intercept[NoSuchFunctionException] { catalog.lookupFunction(FunctionIdentifier("func1"), Seq(Literal(1), Literal(2), Literal(3))) } } } test("list functions") { withBasicCatalog { catalog => val info1 = new ExpressionInfo("tempFunc1", "func1") val info2 = new ExpressionInfo("tempFunc2", "yes_me") val tempFunc1 = (e: Seq[Expression]) => e.head val tempFunc2 = (e: Seq[Expression]) => e.last catalog.createFunction(newFunc("func2", Some("db2")), ignoreIfExists = false) catalog.createFunction(newFunc("not_me", Some("db2")), ignoreIfExists = false) catalog.createTempFunction("func1", info1, tempFunc1, ignoreIfExists = false) catalog.createTempFunction("yes_me", info2, tempFunc2, ignoreIfExists = false) assert(catalog.listFunctions("db1", "*").map(_._1).toSet == Set(FunctionIdentifier("func1"), FunctionIdentifier("yes_me"))) assert(catalog.listFunctions("db2", "*").map(_._1).toSet == Set(FunctionIdentifier("func1"), FunctionIdentifier("yes_me"), FunctionIdentifier("func1", Some("db2")), FunctionIdentifier("func2", Some("db2")), FunctionIdentifier("not_me", Some("db2")))) assert(catalog.listFunctions("db2", "func*").map(_._1).toSet == Set(FunctionIdentifier("func1"), FunctionIdentifier("func1", Some("db2")), FunctionIdentifier("func2", Some("db2")))) } } test("list functions when database does not exist") { withBasicCatalog { catalog => intercept[NoSuchDatabaseException] { catalog.listFunctions("unknown_db", "func*") } } } test("clone SessionCatalog - temp views") { withEmptyCatalog { original => val tempTable1 = Range(1, 10, 1, 10) original.createTempView("copytest1", tempTable1, overrideIfExists = false) // check if tables copied over val clone = original.newSessionCatalogWith( SimpleCatalystConf(caseSensitiveAnalysis = true), new Configuration(), new SimpleFunctionRegistry, CatalystSqlParser) assert(original ne clone) assert(clone.getTempView("copytest1") == Some(tempTable1)) // check if clone and original independent clone.dropTable(TableIdentifier("copytest1"), ignoreIfNotExists = false, purge = false) assert(original.getTempView("copytest1") == Some(tempTable1)) val tempTable2 = Range(1, 20, 2, 10) original.createTempView("copytest2", tempTable2, overrideIfExists = false) assert(clone.getTempView("copytest2").isEmpty) } } test("clone SessionCatalog - current db") { withEmptyCatalog { original => val db1 = "db1" val db2 = "db2" val db3 = "db3" original.externalCatalog.createDatabase(newDb(db1), ignoreIfExists = true) original.externalCatalog.createDatabase(newDb(db2), ignoreIfExists = true) original.externalCatalog.createDatabase(newDb(db3), ignoreIfExists = true) original.setCurrentDatabase(db1) // check if current db copied over val clone = original.newSessionCatalogWith( SimpleCatalystConf(caseSensitiveAnalysis = true), new Configuration(), new SimpleFunctionRegistry, CatalystSqlParser) assert(original ne clone) assert(clone.getCurrentDatabase == db1) // check if clone and original independent clone.setCurrentDatabase(db2) assert(original.getCurrentDatabase == db1) original.setCurrentDatabase(db3) assert(clone.getCurrentDatabase == db2) } } test("SPARK-19737: detect undefined functions without triggering relation resolution") { import org.apache.spark.sql.catalyst.dsl.plans._ Seq(true, false) foreach { caseSensitive => val conf = SimpleCatalystConf(caseSensitive) val catalog = new SessionCatalog(newBasicCatalog(), new SimpleFunctionRegistry, conf) try { val analyzer = new Analyzer(catalog, conf) // The analyzer should report the undefined function rather than the undefined table first. val cause = intercept[AnalysisException] { analyzer.execute( UnresolvedRelation(TableIdentifier("undefined_table")).select( UnresolvedFunction("undefined_fn", Nil, isDistinct = false) ) ) } assert(cause.getMessage.contains("Undefined function: 'undefined_fn'")) } finally { catalog.reset() } } } }
jianran/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
Scala
apache-2.0
59,156
package com.haskforce.codeInsight.visibleModules import com.haskforce.highlighting.annotation.external.GhcMod import com.haskforce.utils.ExecUtil import com.intellij.openapi.module.{Module, ModuleUtilCore} import com.intellij.psi.PsiFile class GhcModVisibleModulesProvider( module: Module, workDir: String ) extends VisibleModulesProvider { override def getVisibleModules: Array[String] = { Option(GhcMod.list(module, workDir)).getOrElse(Array.empty) } } object GhcModVisibleModulesProvider { def create(psiFile: PsiFile): Option[GhcModVisibleModulesProvider] = for { // Guard against ghc-mod not being configured. _ <- Option(GhcMod.getPath(psiFile.getProject)) module <- Option(ModuleUtilCore.findModuleForPsiElement(psiFile)) workDir <- Option(ExecUtil.guessWorkDir(module)) } yield new GhcModVisibleModulesProvider(module, workDir) }
carymrobbins/intellij-haskforce
src/com/haskforce/codeInsight/visibleModules/GhcModVisibleModulesProvider.scala
Scala
apache-2.0
874
/* * stateless-future-util * Copyright 2014 深圳岂凡网络有限公司 (Shenzhen QiFun Network Corp., LTD) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.qifun.statelessFuture package util import java.util.concurrent.atomic.AtomicReference import scala.util.control.Exception.Catcher import scala.util.control.TailCalls._ import scala.util.Failure import java.util.concurrent.CancellationException import scala.util.Success import scala.util.Try import scala.collection.immutable.Queue import scala.util.control.NoStackTrace object CancellablePromise { private implicit class Scala210TailRec[A](underlying: TailRec[A]) { final def flatMap[B](f: A => TailRec[B]): TailRec[B] = { tailcall(f(underlying.result)) } } private type CancelFunction = () => Unit private type HandlerList[AwaitResult] = Queue[(AwaitResult => TailRec[Unit], Catcher[TailRec[Unit]])] private type State[AwaitResult] = Either[HandlerList[AwaitResult], Try[AwaitResult]] private type Underlying[AwaitResult] = AtomicReference[State[AwaitResult]] final def apply[AwaitResult] = new AnyValCancellablePromise[AwaitResult](new Underlying[AwaitResult](Left(Queue.empty))) final class AnyValCancellablePromise[AwaitResult] private[CancellablePromise] ( val state: AtomicReference[Either[Queue[(AwaitResult => TailRec[Unit], Catcher[TailRec[Unit]])], Try[AwaitResult]]]) extends AnyVal with CancellablePromise[AwaitResult] private val CancellationFailure = Failure(new CancellationException) } /** * A [[Future.Stateful]] that will be completed when another [[Future]] being completed. * * @param stateReference The internal stateReference that should never be accessed by other modules. */ trait CancellablePromise[AwaitResult] extends Any with Promise[AwaitResult] with CancellableFuture[AwaitResult] { // 提供类似C#的隐式参数CancellationToken,对用户来说比较易用。 // 提供CancelableFuture,对实现者来说,更自然,因为更贴近Java底层的API。而且也避免了一处事件回调。 // 如果做人工智能,总是需要自己实现类似CancellationToken的机制。比如我先前做的Interruptor. final def cancel() { tryComplete(CancellablePromise.CancellationFailure).result } }
Atry/stateless-future-util
src/main/scala/com/qifun/statelessFuture/util/CancellablePromise.scala
Scala
apache-2.0
2,805
package stir.pancake sealed trait Direction { val column: Int; val row: Int } case object Up extends Direction { val column: Int = 0; val row: Int = 1 } case object Down extends Direction { val column: Int = 0; val row: Int = -1 } case object Left extends Direction { val column: Int = -1; val row: Int = 0 } case object Right extends Direction { val column: Int = 1; val row: Int = 0 } object Direction { def list: List[Direction] = List(Up,Down,Left,Right) }
zaklogician/silver-pancake
src/stir/pancake/Directions.scala
Scala
bsd-3-clause
475
/** * Copyright (c) 2002-2012 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.commands.expressions import org.neo4j.cypher.internal.symbols.{SymbolTable, CypherType, ScalarType} import collection.Map import org.neo4j.cypher.internal.pipes.ExecutionContext case class Null() extends Expression { def apply(v1: ExecutionContext) = null def rewrite(f: (Expression) => Expression): Expression = f(this) def filter(f: (Expression) => Boolean): Seq[Expression] = if (f(this)) Seq(this) else Seq() def calculateType(symbols: SymbolTable): CypherType = ScalarType() def symbolTableDependencies = Set() }
dksaputra/community
cypher/src/main/scala/org/neo4j/cypher/internal/commands/expressions/Null.scala
Scala
gpl-3.0
1,364
package org.jetbrains.plugins.scala package codeInspection package collections import com.intellij.testFramework.EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START} /** * Ignat Loskutov */ abstract class ExistsForallReplaceTest extends OperationsOnCollectionInspectionTest { override protected val classOfInspection: Class[_ <: OperationOnCollectionInspection] = classOf[ExistsForallReplaceInspection] } class ReplaceForallWithExistsTest extends ExistsForallReplaceTest { override protected val hint: String = ScalaInspectionBundle.message("replace.with.exists") def test_1(): Unit = { val selected = s"""$START!Seq("").forall(!_.isEmpty)$END""" checkTextHasError(selected) val text = """!Seq("").forall(!_.isEmpty)""" val result = """Seq("").exists(_.isEmpty)""" testQuickFix(text, result, hint) } def test_2(): Unit = { val selected = s"""$START!Seq("").forall(!_.isEmpty)$END""" checkTextHasError(selected) val text = """!Seq("").forall(s => !s.isEmpty)""" val result = """Seq("").exists(s => s.isEmpty)""" testQuickFix(text, result, hint) } } class ReplaceSmthWithNotContainsTest extends ExistsForallReplaceTest { override protected val hint: String = ScalaInspectionBundle.message("replace.with.forall") def test_1(): Unit = { val selected = s"""$START!Seq("").exists(!_.isEmpty)$END""" checkTextHasError(selected) val text = """!Seq("").exists(!_.isEmpty)""" val result = """Seq("").forall(_.isEmpty)""" testQuickFix(text, result, hint) } def test_2(): Unit = { val selected = s"""$START!Seq("").exists(!_.isEmpty)$END""" checkTextHasError(selected) val text = """!Seq("").exists((s: String) => !s.isEmpty)""" val result = """Seq("").forall((s: String) => s.isEmpty)""" testQuickFix(text, result, hint) } }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/collections/ExistsForallReplaceTest.scala
Scala
apache-2.0
1,862
package org.cg.scala.dhc.domelments import java.util.Optional import org.cg.spelstuff.{FilterResult, ParseResult, Spel} import org.springframework.expression.spel.SpelNode import scala.collection.JavaConverters._ case class FlatItemAttributeNSpelItem(refItemName: String, attributeName: String, ln: String, spelIdentifier: String) case class ItemAttributeReference(val itemName: String, val attributeName: String) /** * Created by ssmertnig on 5/1/17. */ class SpelItem(value: String, val error: Option[String], val identifiers: Seq[String], val itemReferences: Option[List[ItemAttributeReference]]) { override def equals(o: scala.Any): Boolean = o.isInstanceOf[SpelItem] && { val other = o.asInstanceOf[SpelItem] error.equals(other.error) && identifiers.equals(other.identifiers) } override def toString: String = s"SPEL IDENT $value ${error.toString}" } object SpelItem { val SINGLE_NAME_REGEX = "^[A-Za-z0-9\\\\-_]*$".r() val RESOLVE = "resolve(" def apply(value: String) = { if (SINGLE_NAME_REGEX.findFirstIn(value).isDefined) new SpelItem(value, None, Seq(value), None) else if (value.toLowerCase.indexOf(RESOLVE) >= 0) parseResolve(value) else { val filtered = Spel.of(value).filterPropertyOrFieldReference() if (filtered.parseError.isPresent) getParseErrorItem(value, filtered.parseError.get()) else new SpelItem(value, None, filtered.fieldsOrProperties.asScala.toSeq.map(f => f.getName), None) } } def parseResolve(spelExpr: String) = { val parseResult = Spel.of(spelExpr).parse() if (parseResult.parseError.isPresent) getParseErrorItem(spelExpr, parseResult.parseError.get()) else new SpelItem(spelExpr, None, Seq(), getReferencesFromSpel(spelExpr, parseResult.root)) } def getReferencesFromSpel(spelExpr: String, root: Optional[SpelNode]) = if (!root.isPresent) None else Some(collectReferencesFromSpel(spelExpr, root.get(), List())) def getNestedResolveNode(spelExpr: String, node: SpelNode) = if (node.getChild(1).getClass.getName.equals("org.springframework.expression.spel.ast.OpPlus") && getValue(spelExpr, node.getChild(1).getChild(1)).startsWith(RESOLVE)) Some(node.getChild(1).getChild(1)) else None // see spel.gif in the project root for the ast structure dealt with here def collectReferencesFromSpel(spelExpr: String, node: SpelNode, list: List[ItemAttributeReference]): List[ItemAttributeReference] = { if (node.getChildCount != 2 || !getValue(spelExpr, node).startsWith(RESOLVE)) list else { val attributeRef = getValue(spelExpr, node.getChild(1)) if (node.getChild(0).getChildCount != 2) list else { val itemRefNode = node.getChild(0).getChild(0); val itemRef = getValue(spelExpr, itemRefNode); val itemAttributeRef = new ItemAttributeReference(itemRef, attributeRef) val nestedResolveNode = getNestedResolveNode(spelExpr, node.getChild(0)) if (nestedResolveNode.isDefined) collectReferencesFromSpel(spelExpr, nestedResolveNode.get, itemAttributeRef :: list) else itemAttributeRef :: list } } } def getValue(spelExpr: String, node: SpelNode) = spelExpr.substring(node.getStartPosition, node.getEndPosition).replace("'", "") private def getParseErrorItem(value: String, error: String) = { new SpelItem(value, Some(error), Seq(), None) } }
curiosag/datahubchecker
datahubchecker-utility/src/main/scala/org/cg/scala/dhc/domelments/SpelItem.scala
Scala
unlicense
3,481
package com.sksamuel.scapegoat.inspections.exception import com.sksamuel.scapegoat.InspectionTest /** @author Marconi Lanna */ class CatchFatalTest extends InspectionTest { override val inspections = Seq(new CatchFatal) "catch _ fatal exception" - { "should report warning" in { val code1 = """object Test { try { } catch { case e : Exception => case _ : VirtualMachineError => } } """.stripMargin compileCodeSnippet(code1) compiler.scapegoat.feedback.warnings.size shouldBe 1 } } "catch e fatal exception" - { "should report warning" in { val code2 = """object Test { try { } catch { case e : RuntimeException => case x : ThreadDeath => case f : Exception => } } """.stripMargin compileCodeSnippet(code2) compiler.scapegoat.feedback.warnings.size shouldBe 1 } } "catch without fatal exception case" - { "should not report warning" in { val code2 = """object Test { try { } catch { case e : RuntimeException => case f : Exception => } } """.stripMargin compileCodeSnippet(code2) compiler.scapegoat.feedback.warnings.size shouldBe 0 } } }
sksamuel/scalac-scapegoat-plugin
src/test/scala/com/sksamuel/scapegoat/inspections/exception/CatchFatalTest.scala
Scala
apache-2.0
1,598
import sbt._ import sbt.Keys._ import releaseit.ReleaseItPlugin._ object build extends Build { // ensure we use proper encoding private val encoding: String = sys.props("file.encoding") assert(encoding == "UTF-8", s"File encoding must be UTF-8 but was $encoding") val commonScalacOptions = Seq( "-target:jvm-1.7", //Target platform for object files. "-language:postfixOps", //Language: allows postfix operations "-language:higherKinds", //Language: enables higher kinds (http://stackoverflow.com/questions/6246719/what-is-a-higher-kinded-type-in-scala) "-language:implicitConversions", //Language: enables implicit conversions in code "-deprecation", //Emit warning and location for usages of deprecated APIs. "-unchecked", //Enable additional warnings where generated code depends on assumptions. "-encoding", "UTF-8", //Specify character encoding used by source files. "-feature", //Emit warning and location for usages of features that should be imported explicitly. "-Xfatal-warnings", //Fail the compilation if there are any warnings. "-Xlint", //Enable specific warning "-Yno-adapted-args", //Do not adapt an argument list (either by inserting () or creating a tuple) to match the receiver. "-Ywarn-dead-code", //Warn when dead code is identified. "-Ywarn-numeric-widen", //Warn when numerics are widened. "-Ywarn-value-discard", //Warn when non-Unit expression results are unused. "-Xfuture" //Turn on future language features. ) // define project as aggregate of our 4 elements // Project is a method defined below lazy val root = project("project", base = ".") .aggregate(service, client, model, acceptance) .settings(publishArtifact := false) // declare service and its dependencies. lazy val service = project("service") .dependsOn(model) .settings(releaseItSettings ++ Seq( name := "project-service", scalacOptions ++= commonScalacOptions, libraryDependencies ++= Seq( logbackClassic, scodec, c3po, scalatest, h2 ), resourceGenerators in Compile <+= (resourceManaged, baseDirectory, target) map { (managedBase: File, base: File, target: File) => ResourceGenerators.generate(managedBase, base, target) }, releaseMainClass := Some("project.service.Boot"), releaseToEnvironments := Seq("prod", "dev", "local"), releasePackageBuildConfigurationName := "Service_BuildTestAndPackage", releaseApplicationDescription := "Project Service" ): _*) lazy val client = project("client") .dependsOn(model) .settings(Seq( name := "project-client", scalacOptions ++= commonScalacOptions, libraryDependencies ++= Seq( scalatest, scalacache ) ): _*) lazy val model = project("model").settings(Seq( name := "project-model", scalacOptions ++= commonScalacOptions, libraryDependencies ++= Seq( ) ): _*) lazy val acceptance = project("acceptance") .dependsOn(client, service) .settings(Seq( name := "project-acceptance", scalacOptions ++= commonScalacOptions, libraryDependencies ++= Seq( scalatest )): _*) // basic configuration common to all modules def project(id: String, base: String = null) = Project(id = id, base = file(Option(base).getOrElse(id))).settings(Seq( version := Option(System.getProperty("version")).getOrElse("dev-SNAPSHOT"), organization := "com.project", Keys.scalaVersion := "2.11.7", scalaBinaryVersion := "2.11", externalResolvers := Seq( Resolver.defaultLocal ), credentials += Credentials("Sonatype Nexus Repository Manager", "nexus.local", "user", "pwd"), publishTo := Some("Nexus repo" at "http://nexus.local:8081/nexus/content/repositories/project-releases/"), parallelExecution in Test := true ) ++ net.virtualvoid.sbt.graph.Plugin.graphSettings: _*) val scalacache = "com.github.cb372" %% "scalacache-guava" % "0.6.4" val argonaut = "io.argonaut" %% "argonaut" % "6.1-M5" val logbackClassic = "ch.qos.logback" % "logback-classic" % "1.1.3" val c3po = "com.mchange" % "c3p0" % "0.9.5.1" val scodec = "org.scodec" %% "scodec-bits" % "1.0.7" val h2 = "com.h2database" % "h2" % "1.4.187" val scalatest = "org.scalatest" %% "scalatest" % "2.2.4" % "test" }
pvillega/sbt-template
project/build.scala
Scala
mit
4,786
import core.CultureHubPlugin import models.OrganizationConfiguration import play.api.Play import play.api.Play.current import test.Specs2TestContext import util.OrganizationConfigurationHandler import play.api.test._ import play.api.test.Helpers._ /** * * @author Manuel Bernhardt <[email protected]> */ class PlatformSpec extends Specs2TestContext { val organizationConfigurationHandler = OrganizationConfigurationHandler "The OrganizationConfigurationHandler" should { "load configurations from disk into memory" in { withTestConfig { organizationConfigurationHandler.configure() val (configurations, errors) = OrganizationConfiguration.buildConfigurations(Play.application.configuration, CultureHubPlugin.hubPlugins) configurations.size should not equalTo 0 errors.size should equalTo(0) } } } }
delving/culture-hub
test/PlatformSpec.scala
Scala
apache-2.0
877
/* Copyright (C) 2008-2014 University of Massachusetts Amherst. This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible) http://factorie.cs.umass.edu, http://github.com/factorie Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cc.factorie.variable import cc.factorie.util.Cubbie import cc.factorie.{util, variable} import scala.collection.mutable // For single categorical values /** A value in a CategoricalDomain. Each value is assigned an intValue in the range 0...size-1. Each value has a category of type C. These are the values used to map from words to integer parameter indices, etc. @author Andrew McCallum */ trait CategoricalValue[C] extends DiscreteValue { def domain: CategoricalDomain[C] def category: C override def toString: String = category.toString } /** A domain for categorical variables. It stores not only a size, but also the mapping from category values (of type T = this.CategoryType) to densely packed integers suitable for indices into parameter vectors. For example, a common use case is mapping Strings (NLP or document classification words) into indices, and back. Furthermore if domain.gatherCounts = true, this domain will count the number of calls to 'index'. Then you can reduce the size of the Domain by calling 'trimBelowCount' or 'trimBelowSize', which will recreate the new mapping from categories to densely-packed non-negative integers (making the old mapping no longer valid). Thus, in typical usage you would (1) read in the data, (2) trim the domain, (3) re-read the data with the new mapping, creating variables. @author Andrew McCallum */ class CategoricalDomain[C] extends DiscreteDomain(0) with IndexedSeq[CategoricalValue[C]] with CategoricalVectorDomain[C] with Domain with cc.factorie.util.ProtectedIntArrayBuffer { protected class CategoricalValue(val singleIndex:Int, val category:C) extends variable.CategoricalValue[C] { override def copy = this def domain = CategoricalDomain.this def dim1 = CategoricalDomain.this.size } type Value <: variable.CategoricalValue[C] def this(values:Iterable[C]) = { this(); values.foreach(value(_)); freeze() } private val __indices: java.util.HashMap[C,Value] = new java.util.HashMap[C,Value] def _indices = __indices private val lock = new util.RWLock /** If positive, throw error if size tries to grow larger than it. Use for growable multi-dim Factor weightsSet; override this method with the largest you think your growable domain will get. */ var maxSize = -1 override def dimensionDomain: CategoricalDomain[C] = this @inline final override def length = lock.withReadLock { _elements.length } var growPastMaxSize: Boolean = true /** Return the CategoricalValue associated with the given category. If the category is not already in this CategoricalDomain and 'frozen' is false, and 'mazSize' will not be exceeded, then add the category to this CategoricalDomain. This method is thread-safe so that multiple threads may read and index data simultaneously. */ def value(category: C): Value = { if (category == null) throw new Error("Null is not a valid category.") if (_frozen) { __indices.get(category) } else { lock.withReadLock { var thisIndex = __indices.get(category) if (thisIndex eq null) { // double-tap locking necessary to ensure only one thread adds to _indices lock.readUnlock() lock.writeLock() try { thisIndex = __indices.get(category) if (thisIndex eq null) { val m = _elements.length if (maxSize > 0 && m >= maxSize) { if (growPastMaxSize) throw new Error("Index size exceeded maxSize") else { println("Warning - max domain size %d exceeded! Freezing." format maxSize) freeze() return null.asInstanceOf[Value] } } // TODO Consider calling "new String(category)" here to avoid substring memory leak: http://stackoverflow.com/questions/15612157/substring-method-in-string-class-causes-memory-leak val e: Value = newCategoricalValue(m, category).asInstanceOf[Value] _elements += e __indices.put(category, e) thisIndex = e } } finally { lock.writeUnlock() lock.readLock() } } thisIndex } } } /** Return the CategoricalValue at index i. */ override def apply(i:Int): Value = _elements(i) def category(i:Int): C = lock.withReadLock {_elements(i).category.asInstanceOf[C]} def categories: Seq[C] = lock.withReadLock { _elements.map(_.category.asInstanceOf[C]) } /** Return the integer associated with the category, do not increment the count of category, even if gatherCounts is true. */ def indexOnly(category:C): Int = { val v = value(category) if (v eq null) -1 else v.intValue } /** Return the integer associated with the category, and also, if gatherCounts is true, also increment the count of category. If the category is not already in this CategoricalDomain and 'frozen' is false, and 'mazSize' will not be exceeded, then add the category to this CategoricalDomain. This method is thread-safe so that multiple threads may read and index data simultaneously. */ def index(category:C): Int = { val i = indexOnly(category) if (gatherCounts && i != -1) incrementCount(i) i } /** Return the integer associated with the category, and also (whether or not 'gatherCounts' is true') increment by 'count' the number of times this Domain says the category has been seen. If the category is not already in this CategoricalDomain and 'frozen' is false, and 'maxSize' will not be exceeded, then add the category to this CategoricalDomain. This method is thread-safe so that multiple threads may read and index data simultaneously. */ def indexWithCount(category:C, count:Int): Int = { val i = indexOnly(category) this synchronized { _increment(i, count) } i } /** Like indexOnly, but throw an exception if the category is not already there. */ def getIndex(category:C): Int = lock.withReadLock({ val v = __indices.get(category) if (v ne null) v.intValue else throw new Error("Category not present; use index() to cause the creation of a new value.") }) /** Like indexOnly, but return -1 if the category is not already there. */ def indexOrNegativeOne(category:C): Int = lock.withReadLock({ val v = __indices.get(category) if (v eq null) -1 else { v.intValue } }) override def freeze(): Unit = { _frozen = true } def +=(x:C) : Unit = this.index(x) def ++=(xs:Traversable[C]) : Unit = xs.foreach(this += _) /** Wipe the domain, its elements, indices and counts clean */ def clear(): Unit = { _frozen = false; _elements.clear(); lock.withWriteLock { _indices.clear(); _clear() } } // Separate argument types preserves return collection type def indexAll(c: Iterator[C]) = c map index def indexAll(c: List[C]) = c map index def indexAll(c: Array[C]) = c map index def indexAll(c: Set[C]) = c map index override def dimensionName(i:Int): String = category(i).toString override def toString() = "CategoricalDomain[]("+size+")" protected def newCategoricalValue(i:Int, e:C) = new CategoricalValue(i, e) /** If type T is not string, this should be overridden to provide de-serialization */ override def stringToCategory(s:String): C = s.asInstanceOf[C] // Code for managing occurrence counts /** If true, then each call to CategoricalDomain.index will increment a count associated with value in the domain. This count can then later be used to trim the set of domain values by various thresholds. */ var gatherCounts = false def count(i:Int): Int = _apply(i) def count(category:C): Int = _apply(indexOnly(category)) def counts: cc.factorie.util.IntSeq = _takeAsIntSeq(length) // _toSeq.take(length) private var cachedCountsTotal: Long = -1 def countsTotal: Long = if (frozen && cachedCountsTotal >= 0) cachedCountsTotal else { var total: Long = 0 var i = 0 val len = _length while (i < len) { total += _apply(i) i += 1 } cachedCountsTotal = total total } def incrementCount(i:Int): Unit = this synchronized { _increment(i, 1) } def incrementCount(category:C): Unit = incrementCount(indexOnly(category)) private def someCountsGathered: Boolean = { var i = 0; while (i < _length) { if (_apply(i) > 0) return true; i += 1 }; false } /** Returns the number of unique entries trimmed */ def trimBelowCount(threshold:Int, preserveCounts:Boolean = false): Int = { assert(!frozen) if (!someCountsGathered) throw new Error("Can't trim without first gathering any counts.") val origEntries = _elements.clone() val origCounts = _toArray clear() // This will also clear the counts gatherCounts = false if (preserveCounts) { for (i <- 0 until origEntries.size) if (origCounts(i) >= threshold) indexWithCount(origEntries(i).category.asInstanceOf[C], origCounts(i)) } else { for (i <- 0 until origEntries.size) if (origCounts(i) >= threshold) indexOnly(origEntries(i).category.asInstanceOf[C]) } freeze() origEntries.size - size } /** Returns the number of unique entries trimmed */ def trimAboveCount(threshold:Int): Int = { assert(!frozen) if (!someCountsGathered) throw new Error("Can't trim without first gathering any counts.") val origEntries = _elements.clone() clear() gatherCounts = false for (i <- 0 until origEntries.size) if (_apply(i) <= threshold) indexOnly(origEntries(i).category.asInstanceOf[C]) _clear() freeze() origEntries.size - size } /** Returns the count threshold below which entries were discarded. */ def trimBelowSize(target:Int): Int = { assert(!frozen) var threshold = 2 while (sizeAtOrAboveCount(threshold) >= target) threshold += 1 trimBelowCount(threshold) threshold } /** Return the number of unique entries with count equal to 'c'. */ def sizeAtCount(c:Int): Int = { if (!someCountsGathered) throw new Error("No counts gathered.") var ret = 0 val min = math.min(size, _length) for (i <- 0 until min) if (_apply(i) == c) ret += 1 ret } /** Return the number of unique entries with count greater than or equal to 'threshold'. This returned value will be the size of the Domain after a call to trimBelowCount(threshold). */ def sizeAtOrAboveCount(threshold:Int): Int = { if (!someCountsGathered) throw new Error("No counts gathered.") var ret = 0 val min = math.min(size, _length) for (i <- 0 until min) if (_apply(i) >= threshold) ret += 1 ret } /** Return the number of unique entries with count below 'threshold'. */ def sizeBelowCount(threshold:Int): Int = size - sizeAtOrAboveCount(threshold) } object CategoricalDomain { val NULL_INDEX = -1 } class CategoricalDomainCubbie[T](val cd: CategoricalDomain[T]) extends Cubbie { // This cubbie automatically writes into the underlying CategoricalDomain instead of // using an intermediate HashMap representation setMap(new mutable.Map[String, Any] { override def update(key: String, value: Any): Unit = { val isFrozen = cd.frozen if (key == "size") { /* cd.size = value.asInstanceOf[Int] */ } else if (key == "frozen") { if (value.asInstanceOf[Boolean]) cd.freeze() } else if (key == "categories") { cd.unfreeze() val categories = value.asInstanceOf[Iterable[String]] //categories.map(c => if (cd.string2T != null) cd.string2T(c) else c.asInstanceOf[T]).foreach(cd.value(_)) categories.map(c => cd.stringToCategory(c)).foreach(cd.value(_)) if (isFrozen) cd.freeze() } else sys.error("Unknown cubbie slot key: \\"%s\\"" format key) } def += (kv: (String, Any)): this.type = { update(kv._1, kv._2); this } def -= (key: String): this.type = sys.error("Can't remove slots from cubbie map!") def get(key: String): Option[Any] = if (key == "size") Some(cd.size) else if (key == "frozen") Some(cd.frozen) else if (key == "categories") Some(cd.categories.map(_.toString)) // toString because not all categories are already Strings else None //{ println("CategoricalDomainCubbie.get key="+key); None } def iterator: Iterator[(String, Any)] = List("size", "frozen", "categories").map(s => (s, get(s).get)).iterator }) } /* CategoricalDomain also facilitates counting occurrences of entries, and trimming the Domain size. WARNING: Any indices that you use and store before trimming will not be valid after trimming! Typical usage: <pre> class Token(s:String) extends CategoricalVariable(s) data.readAndIndex Domain[Token].trimBelowSize(100000) // this also automatically turns off counting data.readIndexAndCreateVariables // again </pre> */ // TODO Consider categorical remapping interface in the future. ///** To be used to avoid re-reading the data after CategoricalDomain trimming, // but not yet implemented. */ //trait CategoricalRemapping { def remapCategories(fn:(Int)=>Int) }
patverga/factorie
src/main/scala/cc/factorie/variable/CategoricalDomain.scala
Scala
apache-2.0
13,974
package com.github.ldaniels528.trifecta.messages.codec import scala.util.Try /** * Message Decoder * @author [email protected] */ trait MessageDecoder[T] { /** * Decodes the binary message into a typed object * @param message the given binary message * @return a decoded message wrapped in a Try-monad */ def decode(message: Array[Byte]): Try[T] /** * Returns the string representation of the message decoder * @return the string representation of the message decoder */ override def toString: String = getClass.getSimpleName }
ldaniels528/trifecta
src/main/scala/com/github/ldaniels528/trifecta/messages/codec/MessageDecoder.scala
Scala
apache-2.0
574